Skip to content

Commit 74c9967

Browse files
Donglai Weiclaude
andcommitted
Use waterz.face_merge.slice_overlaps in branch_merge, update large decode yaml
branch_merge.py now imports slice_overlaps from waterz.face_merge instead of duplicating the overlap computation. Both border stitching (large_decode) and z-slice resolution (branch_merge) share the same implementation. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1 parent 54b6593 commit 74c9967

2 files changed

Lines changed: 12 additions & 55 deletions

File tree

connectomics/decoding/decoders/branch_merge.py

Lines changed: 2 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -102,54 +102,10 @@ def _compute_z_extents(seg: np.ndarray) -> Tuple[Dict[int, int], Dict[int, int]]
102102

103103

104104
# ---------------------------------------------------------------------------
105-
# Slice overlap computation
105+
# Slice overlap computation — delegates to waterz.face_merge.slice_overlaps
106106
# ---------------------------------------------------------------------------
107107

108-
109-
def _slice_overlaps(
110-
s0: np.ndarray,
111-
s1: np.ndarray,
112-
z_aff: Optional[np.ndarray] = None,
113-
) -> np.ndarray:
114-
"""Overlap statistics between two consecutive 2D label maps.
115-
116-
Returns (N, K) float64 array where each row is::
117-
118-
[id0, id1, size0, size1, overlap, mean_z_affinity?]
119-
"""
120-
fg = (s0 > 0) & (s1 > 0)
121-
ncols = 6 if z_aff is not None else 5
122-
if not fg.any():
123-
return np.empty((0, ncols), dtype=np.float64)
124-
125-
a = s0[fg].astype(np.int64)
126-
b = s1[fg].astype(np.int64)
127-
128-
u0, c0 = np.unique(s0[s0 > 0], return_counts=True)
129-
u1, c1 = np.unique(s1[s1 > 0], return_counts=True)
130-
size0_map = dict(zip(u0.tolist(), c0.tolist()))
131-
size1_map = dict(zip(u1.tolist(), c1.tolist()))
132-
133-
pairs = np.stack([a, b], axis=1)
134-
unique_pairs, inverse, counts = np.unique(
135-
pairs, axis=0, return_inverse=True, return_counts=True,
136-
)
137-
138-
n = len(unique_pairs)
139-
result = np.zeros((n, ncols), dtype=np.float64)
140-
result[:, 0] = unique_pairs[:, 0]
141-
result[:, 1] = unique_pairs[:, 1]
142-
result[:, 2] = np.array([size0_map[int(i)] for i in unique_pairs[:, 0]])
143-
result[:, 3] = np.array([size1_map[int(i)] for i in unique_pairs[:, 1]])
144-
result[:, 4] = counts
145-
146-
if z_aff is not None:
147-
aff_vals = z_aff[fg].astype(np.float64)
148-
aff_sums = np.zeros(n, dtype=np.float64)
149-
np.add.at(aff_sums, inverse, aff_vals)
150-
result[:, 5] = aff_sums / counts
151-
152-
return result
108+
from waterz.face_merge import slice_overlaps as _slice_overlaps
153109

154110

155111
# ---------------------------------------------------------------------------

tutorials/waterz_decoding_large.yaml

Lines changed: 10 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,28 +23,29 @@ description: >
2323
# ── Chunked decoding config ──────────────────────────────────────────────────
2424
large_decode:
2525
# Input affinity file (HDF5, dataset "main", shape (C,Z,Y,X))
26-
affinity_path: "" # SET THIS
26+
affinity_path: "/projects/weilab/dataset/liconn/mansour/DL288B_251222S_cond5_40x_12tiles_round1_fused_488_crop512x1024x1024_ffn_sharp_tta_x8_prediction_uint8.h5" # SET THIS
2727

2828
# Workflow directory (shared filesystem for parallel workers)
29-
workflow_root: "" # SET THIS: e.g. /scratch/waterz_workflow/
29+
workflow_root: "/projects/weilab/weidf/lib/pytorch_connectomics/outputs/neuron_liconn_mit/DL228B/" # SET THIS: e.g. /scratch/waterz_workflow/
3030

3131
# Chunk size in voxels (Z, Y, X). Each chunk is decoded independently.
3232
# Smaller chunks = less memory per worker but more border stitching.
3333
# Rule of thumb: ~256^3 for 16 GB RAM, ~512^3 for 64 GB RAM.
34-
chunk_shape: [256, 512, 512]
34+
chunk_shape: [80, 2066, 2066]
3535

3636
# Waterz agglomeration parameters (same as decode_waterz kwargs)
3737
thresholds: [0.5]
3838
merge_function: aff85_his256
39-
aff_threshold_low: 0.1
39+
aff_threshold_low: 0.001
4040
aff_threshold_high: 0.999
4141
channel_order: xyz
4242

43-
# Border stitching parameters
44-
border_min_overlap: 1 # min overlap pixels to consider a pair
45-
border_one_sided_threshold: 0.9 # IOU for small-into-large merge at borders
46-
border_iou_threshold: 0.0 # full Jaccard IOU threshold (0=disabled)
47-
border_affinity_threshold: 0.0 # min boundary affinity (0=disabled)
43+
# Border stitching parameters (same algorithm as branch_merge)
44+
border_min_overlap: 10 # min overlap pixels to consider a pair
45+
border_iou_threshold: 0.0 # full Jaccard IOU threshold (0=disabled)
46+
border_one_sided_threshold: 0.9 # overlap/min_size for small-into-large merge
47+
border_one_sided_min_size: 0 # min segment size in face for one-sided merge
48+
border_affinity_threshold: 0.0 # min boundary affinity (0=disabled)
4849

4950
# Output
5051
write_output: true # assemble final volume

0 commit comments

Comments
 (0)