Skip to content

Commit

Permalink
Merge pull request #51 from Hekstra-Lab/modernize
Browse files Browse the repository at this point in the history
Add intensity images to tracking
  • Loading branch information
ianhi authored Feb 26, 2024
2 parents 1fba59e + e2eaca7 commit e4c772b
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 11 deletions.
37 changes: 32 additions & 5 deletions microutil/btrack/btrack.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,15 @@
]


def gogogo_btrack(labels, config_file, radius, tracks_out):
def gogogo_btrack(
labels,
config_file,
radius,
tracks_out=None,
intensity_image=None,
properties=(),
use_weighted_centroid=True,
):
"""
Run btrack on a single position timeseries. Write the track data to h5
files using the build in export capability of btrack.
Expand All @@ -22,6 +30,13 @@ def gogogo_btrack(labels, config_file, radius, tracks_out):
Maximum search radius for btrack
tracks_out : str
Path to h5 files where track data will be saved
intensity_image: np.ndarray or None
Underlying intensity image from which to compute optional features
properties : tuple[str,...]
Other properties to compute
use_weighted_centroid : bool default True
Whether to use the weighted centroid for objects if intensity_image is provided.
Default True.
Returns
-------
Expand All @@ -30,7 +45,17 @@ def gogogo_btrack(labels, config_file, radius, tracks_out):
consistently labelled through time
"""

objects = btrack.utils.segmentation_to_objects(labels)
objects = btrack.utils.segmentation_to_objects(
labels,
intensity_image=intensity_image,
use_weighted_centroid=use_weighted_centroid,
properties=properties,
num_workers=4,
)

tracking_updates = ['motion']
if len(properties) > 0:
tracking_updates.append('visual')

with btrack.BayesianTracker(verbose=False) as tracker:
tracker.configure_from_file(config_file)
Expand All @@ -43,14 +68,16 @@ def gogogo_btrack(labels, config_file, radius, tracks_out):
tracker.volume = ((0, labels.shape[-2]), (0, labels.shape[-1]), (-1e5, 1e5))

# track them (in interactive mode)
tracker.track(step_size=100)
tracker.track(tracking_updates=tracking_updates, step_size=50)

# generate hypotheses and run the global optimizer
tracker.optimize()

# tracker.export(tracks_out, obj_type='obj_type_1')
if tracks_out is not None:
tracker.export(tracks_out, obj_type='obj_type_1')

tracks = tracker.tracks
# all_tracks = pd.concat([pd.DataFrame(t.to_dict()) for t in tracks])

tracked_labels = btrack.utils.update_segmentation(labels, tracks)
return tracked_labels

Expand Down
2 changes: 1 addition & 1 deletion microutil/loading/legacy.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def dir_to_df(dir):
if i == 0:
all_files = df
else:
all_files = all_files.append(df)
all_files = pd.concat([all_files, df])

else:
all_files = dir_to_df(data_dir)
Expand Down
11 changes: 8 additions & 3 deletions microutil/preprocess.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ def normalize_fluo(imgs, mode_cutoff_ratio=3.0, n_bins=4096, eps=0.01, dims=list
)


def save_dataset(ds, zarr_path, position_slice, scene_char='S'):
def save_dataset(ds, zarr_path, position_slice, scene_char='S', scene_size=None):
"""
Dave a dataset into a specific region of an existing xarray zarr store.
Intended to be used in preprocessing scripts where each scene is being
Expand All @@ -114,14 +114,19 @@ def save_dataset(ds, zarr_path, position_slice, scene_char='S'):
Passed to the region kwarg of xr.Dataset.to_zarr.
scene_char : str default 'S'
Name of the scene dimension.
scene_size : int or None
If the zarr store does not exist but you know the number of scenes from another
source, pass it here to create the dataset with the appropriate dimension.
Returns
-------
None: just write the dataset to disk.
"""
if scene_size is None:
existing_ds = xr.open_zarr(zarr_path)
scene_size = existing_ds.sizes[scene_char]

existing_ds = xr.open_zarr(zarr_path)
dummy = _make_dask_dummy(ds).expand_dims({scene_char: existing_ds.sizes[scene_char]})
dummy = _make_dask_dummy(ds).expand_dims({scene_char: scene_size})
_ = dummy.to_zarr(zarr_path, consolidated=True, compute=False, mode='a')
ds.expand_dims(scene_char).to_zarr(zarr_path, region={scene_char: position_slice}, mode='a')

Expand Down
2 changes: 1 addition & 1 deletion microutil/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ def individualize(
def _individualize(mask, topology, threshold):

if topology is None:
topology = -ndi.morphology.distance_transform_edt(mask)
topology = -ndi.distance_transform_edt(mask)

peak_idx = peak_local_max(-topology, min_distance, threshold_abs=threshold)
peak_mask = np.zeros_like(mask, dtype=bool)
Expand Down
2 changes: 1 addition & 1 deletion microutil/tests/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,4 @@ def open_zarr(path):
Utility to open an xarray dataset from either dir that pytest might be called from.
If called from root the path will be different than in the test dir
"""
return xr.open_zarr(str(dir_.joinpath(path)))
return xr.open_zarr(str(dir_.joinpath(path)), consolidated=False)

0 comments on commit e4c772b

Please sign in to comment.