Skip to content

Commit

Permalink
Merge pull request #1211 from flatironinstitute/cli_demos_refactor
Browse files Browse the repository at this point in the history
Refactor CLI demos to be applications
  • Loading branch information
pgunn authored May 1, 2024
2 parents df5b487 + b45633a commit 6581329
Show file tree
Hide file tree
Showing 34 changed files with 1,029 additions and 1,156 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ The main use cases and notebooks are listed in the following table:

A comprehensive list of references, where you can find detailed discussion of the methods and their development, can be found [here](https://caiman.readthedocs.io/en/master/CaImAn_features_and_references.html#references).

# CLI demos
Caiman also provides commandline demos, similar to the notebooks, demonstrating how to work with the codebase outside of Jupyter. They take their configuration primarily from json files (which you will want to modify to work with your data and its specifics) and should be reasonably easy to modify if they don't already do what you want them to do (in particular, saving things; a standard output format for Caiman is something intended for future releases). To run them, activate your environment, and find the demos in demos/general under your caiman data directory; you can run them like you would any other python application, or edit them with your code editor. Each demo comes with a json configuration file that you can customise. There is a README in the demos directory that covers some of this.

# How to get help
- [Online documentation](https://caiman.readthedocs.io/en/latest/) contains a lot of general information about Caiman, the parameters, how to interpret its outputs, and more.
Expand Down
63 changes: 0 additions & 63 deletions caiman/base/movies.py
Original file line number Diff line number Diff line change
Expand Up @@ -673,69 +673,6 @@ def NonnegativeMatrixFactorization(self,

return space_components, time_components

def online_NMF(self,
n_components: int = 30,
method: str = 'nnsc',
lambda1: int = 100,
iterations: int = -5,
model=None,
**kwargs) -> tuple[np.ndarray, np.ndarray]:
""" Method performing online matrix factorization and using the spams
(http://spams-devel.gforge.inria.fr/doc-python/html/index.html) package from Inria.
Implements bith the nmf and nnsc methods
Args:
n_components: int
method: 'nnsc' or 'nmf' (see http://spams-devel.gforge.inria.fr/doc-python/html/index.html)
lambda1: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
iterations: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
batchsize: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
model: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html
**kwargs: more arguments to be passed to nmf or nnsc
Returns:
time_comps
space_comps
"""
try:
import spams # XXX consider moving this to the head of the file
except:
logging.error("You need to install the SPAMS package")
raise

T, d1, d2 = np.shape(self)
d = d1 * d2
X = np.asfortranarray(np.reshape(self, [T, d], order='F'))

if method == 'nmf':
(time_comps, V) = spams.nmf(X, return_lasso=True, K=n_components, numThreads=4, iter=iterations, **kwargs)

elif method == 'nnsc':
(time_comps, V) = spams.nnsc(X,
return_lasso=True,
K=n_components,
lambda1=lambda1,
iter=iterations,
model=model,
**kwargs)
else:
raise Exception('Method unknown')

space_comps = []

for _, mm in enumerate(V):
space_comps.append(np.reshape(mm.todense(), (d1, d2), order='F'))

return time_comps, np.array(space_comps)

def IPCA(self, components: int = 50, batch: int = 1000) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
"""
Iterative Principal Component analysis, see sklearn.decomposition.incremental_pca
Expand Down
11 changes: 11 additions & 0 deletions caiman/base/timeseries.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,7 @@ def save(self,
Args:
file_name: str
name of file. Possible formats are tif, avi, npz, mmap and hdf5
If a path is not part of the filename, it will be saved into a temporary directory under caiman_data
to32: Bool
whether to transform to 32 bits
Expand All @@ -165,6 +166,9 @@ def save(self,
if saving as .tif, specifies the compression level
if saving as .avi or .mkv, compress=0 uses the IYUV codec, otherwise the FFV1 codec is used
Returns:
generated_filename: The full filename, path included, where the data was saved
Raises:
Exception 'Extension Unknown'
Expand Down Expand Up @@ -197,6 +201,8 @@ def foo(i):
if to32 and not ('float32' in str(self.dtype)):
curfr = curfr.astype(np.float32)
tif.save(curfr, compress=compress)
return file_name

elif extension == '.npz':
if to32 and not ('float32' in str(self.dtype)):
input_arr = self.astype(np.float32)
Expand All @@ -209,6 +215,8 @@ def foo(i):
fr=self.fr,
meta_data=self.meta_data,
file_name=self.file_name)
return file_name

elif extension in ('.avi', '.mkv'):
codec = None
if compress == 0:
Expand Down Expand Up @@ -241,6 +249,7 @@ def foo(i):
for d in data:
vw.write(cv2.cvtColor(d, cv2.COLOR_GRAY2BGR))
vw.release()
return file_name

elif extension == '.mat':
if self.file_name[0] is not None:
Expand Down Expand Up @@ -271,6 +280,7 @@ def foo(i):
'meta_data': self.meta_data,
'file_name': f_name
})
return file_name

elif extension in ('.hdf5', '.h5'):
with h5py.File(file_name, "w") as f:
Expand All @@ -289,6 +299,7 @@ def foo(i):
if self.meta_data[0] is not None:
logging.debug("Metadata for saved file: " + str(self.meta_data))
dset.attrs["meta_data"] = cpk.dumps(self.meta_data)
return file_name
elif extension == '.mmap':
base_name = name

Expand Down
27 changes: 9 additions & 18 deletions caiman/behavior/behavior.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,10 @@ def select_roi(img: np.ndarray, n_rois: int = 1) -> list:
each element is an the mask considered a ROIs
"""

# FIXME This function depends on particular builds of OpenCV
# and may be difficult to support moving forward; would be good to
# move this kind of code out of the core and find more portable ways
# to do it
masks = []
for _ in range(n_rois):
fig = plt.figure()
Expand Down Expand Up @@ -130,8 +134,8 @@ def extract_magnitude_and_angle_from_OF(spatial_filter_,
x, y = scipy.signal.medfilt(time_trace, kernel_size=[1, 1]).T
x = scipy.signal.savgol_filter(x.squeeze(), sav_filter_size, 1)
y = scipy.signal.savgol_filter(y.squeeze(), sav_filter_size, 1)
mag, dirct = to_polar(x - caiman.components_evaluation.mode_robust(x),
y - caiman.components_evaluation.mode_robust(y))
mag, dirct = to_polar(x - caiman.utils.stats.mode_robust(x),
y - caiman.utils.stats.mode_robust(y))
dirct = scipy.signal.medfilt(dirct.squeeze(), kernel_size=1).T

# normalize to pixel units
Expand Down Expand Up @@ -325,25 +329,12 @@ def extract_components(mov_tot,

if method_factorization == 'nmf':
nmf = NMF(n_components=n_components, **kwargs)

time_trace = nmf.fit_transform(newm)
spatial_filter = nmf.components_
spatial_filter = np.concatenate([np.reshape(sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter], axis=0)

elif method_factorization == 'dict_learn':
import spams
newm = np.asfortranarray(newm, dtype=np.float32)
time_trace = spams.trainDL(newm, K=n_components, mode=0, lambda1=1, posAlpha=True, iter=max_iter_DL)

spatial_filter = spams.lasso(newm,
D=time_trace,
return_reg_path=False,
lambda1=0.01,
mode=spams.spams_wrap.PENALTY,
pos=True)

spatial_filter = np.concatenate([np.reshape(sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter.toarray()],
axis=0)
else:
# Caiman used to support a method_factorization called dict_learn, implemented using spams.lasso
raise Exception(f"Unknown or unsupported method_factorization: {method_factorization}")

time_trace = [np.reshape(ttr, (c, T)).T for ttr in time_trace.T]

Expand Down
4 changes: 2 additions & 2 deletions caiman/mmapping.py
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ def save_memmap(filenames:list[str],
recompute_each_memmap = True


if recompute_each_memmap or (remove_init>0) or (idx_xy is not None)\
if recompute_each_memmap or (remove_init > 0) or (idx_xy is not None)\
or (xy_shifts is not None) or (add_to_movie != 0) or (border_to_0>0)\
or slices is not None:

Expand Down Expand Up @@ -527,7 +527,7 @@ def save_memmap(filenames:list[str],
sys.stdout.flush()
Ttot = Ttot + T

fname_new = caiman.paths.fn_relocated(fname_tot + f'_frames_{Ttot}.mmap')
fname_new = os.path.join(caiman.paths.get_tempdir(), caiman.paths.fn_relocated(f'{fname_tot}_frames_{Ttot}.mmap'))
try:
# need to explicitly remove destination on windows
os.unlink(fname_new)
Expand Down
14 changes: 6 additions & 8 deletions caiman/motion_correction.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,12 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig
"""
if 'ndarray' in str(type(fname)) or isinstance(fname, caiman.base.movies.movie):
logging.info('Creating file for motion correction "tmp_mov_mot_corr.hdf5"')
caiman.movie(fname).save('tmp_mov_mot_corr.hdf5')
fname = ['tmp_mov_mot_corr.hdf5']
mc_tempfile = caiman.paths.fn_relocated('tmp_mov_mot_corr.hdf5')
if os.path.isfile(mc_tempfile):
os.remove(mc_tempfile)
logging.info(f"Creating file for motion correction: {mc_tempfile}")
caiman.movie(fname).save(mc_tempfile)
fname = [mc_tempfile]

if not isinstance(fname, list):
fname = [fname]
Expand Down Expand Up @@ -3124,12 +3127,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0
if base_name is None:
base_name = os.path.splitext(os.path.split(fname)[1])[0]
base_name = caiman.paths.fn_relocated(base_name)

fname_tot:Optional[str] = caiman.paths.memmap_frames_filename(base_name, dims, T, order)
if isinstance(fname, tuple):
fname_tot = os.path.join(os.path.split(fname[0])[0], fname_tot)
else:
fname_tot = os.path.join(os.path.split(fname)[0], fname_tot)

np.memmap(fname_tot, mode='w+', dtype=np.float32,
shape=caiman.mmapping.prepare_shape(shape_mov), order=order)
Expand Down
11 changes: 6 additions & 5 deletions caiman/paths.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,18 +45,18 @@ def get_tempdir() -> str:
os.makedirs(temp_under_data)
return temp_under_data

def fn_relocated(fn:str) -> str:
def fn_relocated(fn:str, force_temp:bool=False) -> str:
""" If the provided filename does not contain any path elements, this returns what would be its absolute pathname
as located in get_tempdir(). Otherwise it just returns what it is passed.
The intent behind this is to ease having functions that explicitly mention pathnames have them go where they want,
but if all they think about is filenames, they go under CaImAn's notion of its temporary dir. This is under the
principle of "sensible defaults, but users can override them".
"""
if not 'CAIMAN_NEW_TEMPFILE' in os.environ: # XXX We will ungate this in a future version of caiman
return fn
if str(os.path.basename(fn)) == str(fn): # No path stuff
if os.path.split(fn)[0] == '': # No path stuff
return os.path.join(get_tempdir(), fn)
elif force_temp:
return os.path.join(get_tempdir(), os.path.split(fn)[1])
else:
return fn

Expand Down Expand Up @@ -124,4 +124,5 @@ def generate_fname_tot(base_name:str, dims:list[int], order:str) -> str:
d1, d2, d3 = dims[0], dims[1], dims[2]
ret = '_'.join([base_name, 'd1', str(d1), 'd2', str(d2), 'd3', str(d3), 'order', order])
ret = re.sub(r'(_)+', '_', ret) # Turn repeated underscores into just one
return ret
return fn_relocated(ret, force_temp=True)

20 changes: 7 additions & 13 deletions caiman/source_extraction/cnmf/cnmf.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p
ssub=2, tsub=2, p_ssub=1, p_tsub=1, method_init='greedy_roi', alpha_snmf=0.5,
rf=None, stride=None, memory_fact=1, gnb=1, nb_patch=1, only_init_patch=False,
method_deconvolution='oasis', n_pixels_per_process=4000, block_size_temp=5000, num_blocks_per_run_temp=20,
block_size_spat=5000, num_blocks_per_run_spat=20,
num_blocks_per_run_spat=20,
check_nan=True, skip_refinement=False, normalize_init=True, options_local_NMF=None,
minibatch_shape=100, minibatch_suff_stat=3,
update_num_comps=True, rval_thr=0.9, thresh_fitness_delta=-20,
Expand All @@ -86,7 +86,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p
max_num_added=3, min_num_trial=2, thresh_CNN_noisy=0.5,
fr=30, decay_time=0.4, min_SNR=2.5, ssub_B=2, init_iter=2,
sniper_mode=False, use_peak_max=False, test_both=False,
expected_comps=500, max_merge_area=None, params=None):
expected_comps=500, params=None):
"""
Constructor of the CNMF method
Expand Down Expand Up @@ -247,8 +247,6 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p
init_iter: int, optional
number of iterations for 1-photon imaging initialization
max_merge_area: int, optional
maximum area (in pixels) of merged components, used to determine whether to merge components during fitting process
"""

self.dview = dview
Expand All @@ -272,7 +270,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p
gnb=gnb, normalize_init=normalize_init, options_local_NMF=options_local_NMF,
ring_size_factor=ring_size_factor, rolling_length=rolling_length, rolling_sum=rolling_sum,
ssub=ssub, ssub_B=ssub_B, tsub=tsub,
block_size_spat=block_size_spat, num_blocks_per_run_spat=num_blocks_per_run_spat,
num_blocks_per_run_spat=num_blocks_per_run_spat,
block_size_temp=block_size_temp, num_blocks_per_run_temp=num_blocks_per_run_temp,
update_background_components=update_background_components,
method_deconvolution=method_deconvolution, p=p, s_min=s_min,
Expand All @@ -284,9 +282,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p
n_refit=n_refit, num_times_comp_updated=num_times_comp_updated, simultaneously=simultaneously,
sniper_mode=sniper_mode, test_both=test_both, thresh_CNN_noisy=thresh_CNN_noisy,
thresh_fitness_delta=thresh_fitness_delta, thresh_fitness_raw=thresh_fitness_raw, thresh_overlap=thresh_overlap,
update_num_comps=update_num_comps, use_dense=use_dense, use_peak_max=use_peak_max, alpha_snmf=alpha_snmf,
max_merge_area=max_merge_area
)
update_num_comps=update_num_comps, use_dense=use_dense, use_peak_max=use_peak_max, alpha_snmf=alpha_snmf)
else:
self.params = params
params.set('patch', {'n_processes': n_processes})
Expand Down Expand Up @@ -479,7 +475,6 @@ def fit(self, images, indices=(slice(None), slice(None))):
self.params.set('spatial', {'n_pixels_per_process': self.params.get('preprocess', 'n_pixels_per_process')})

logging.info('using ' + str(self.params.get('preprocess', 'n_pixels_per_process')) + ' pixels per process')
logging.info('using ' + str(self.params.get('spatial', 'block_size_spat')) + ' block_size_spat')
logging.info('using ' + str(self.params.get('temporal', 'block_size_temp')) + ' block_size_temp')

if self.params.get('patch', 'rf') is None: # no patches
Expand Down Expand Up @@ -540,7 +535,7 @@ def fit(self, images, indices=(slice(None), slice(None))):
logging.info('refinement...')
if self.params.get('merging', 'do_merge'):
logging.info('merging components ...')
self.merge_comps(Yr, mx=50, fast_merge=True, max_merge_area=self.params.get('merging', 'max_merge_area'))
self.merge_comps(Yr, mx=50, fast_merge=True)

logging.info('Updating spatial ...')

Expand Down Expand Up @@ -922,7 +917,7 @@ def update_spatial(self, Y, use_init=True, **kwargs):

return self

def merge_comps(self, Y, mx=50, fast_merge=True, max_merge_area=None):
def merge_comps(self, Y, mx=50, fast_merge=True):
"""merges components
"""
self.estimates.A, self.estimates.C, self.estimates.nr, self.estimates.merged_ROIs, self.estimates.S, \
Expand All @@ -933,8 +928,7 @@ def merge_comps(self, Y, mx=50, fast_merge=True, max_merge_area=None):
self.params.get_group('spatial'), dview=self.dview,
bl=self.estimates.bl, c1=self.estimates.c1, sn=self.estimates.neurons_sn,
g=self.estimates.g, thr=self.params.get('merging', 'merge_thr'), mx=mx,
fast_merge=fast_merge, merge_parallel=self.params.get('merging', 'merge_parallel'),
max_merge_area=max_merge_area)
fast_merge=fast_merge, merge_parallel=self.params.get('merging', 'merge_parallel'))

return self

Expand Down
5 changes: 2 additions & 3 deletions caiman/source_extraction/cnmf/estimates.py
Original file line number Diff line number Diff line change
Expand Up @@ -1235,7 +1235,7 @@ def deconvolve(self, params, dview=None, dff_flag=False):
self.S_dff = np.stack([results[1][i] for i in order])

def merge_components(self, Y, params, mx=50, fast_merge=True,
dview=None, max_merge_area=None):
dview=None):
"""merges components
"""
# FIXME This method shares its name with a function elsewhere in the codebase (which it wraps)
Expand All @@ -1247,8 +1247,7 @@ def merge_components(self, Y, params, mx=50, fast_merge=True,
params.get_group('spatial'), dview=dview,
bl=self.bl, c1=self.c1, sn=self.neurons_sn,
g=self.g, thr=params.get('merging', 'merge_thr'), mx=mx,
fast_merge=fast_merge, merge_parallel=params.get('merging', 'merge_parallel'),
max_merge_area=max_merge_area)
fast_merge=fast_merge, merge_parallel=params.get('merging', 'merge_parallel'))

def manual_merge(self, components, params):
''' merge a given list of components. The indices
Expand Down
Loading

0 comments on commit 6581329

Please sign in to comment.