Skip to content

Commit

Permalink
Merge pull request #586 from MouseLand/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
carsen-stringer authored Nov 30, 2020
2 parents 17501ef + d46868d commit 6c84f25
Show file tree
Hide file tree
Showing 42 changed files with 1,391 additions and 422 deletions.
2 changes: 1 addition & 1 deletion .dvc/config
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,4 @@
url = gdrive://0ACw_QYaWTX7mUk9PVA
gdrive_use_service_account = true
gdrive_service_account_email = [email protected]
gdrive_service_account_p12_file_path = .dvc/creds/suite2p-testdata-dvc-b0d23791539c.p12
gdrive_service_account_p12_file_path = creds/suite2p-testdata-dvc-b0d23791539c.p12
2 changes: 1 addition & 1 deletion data/test_data.dvc
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
outs:
- md5: 273c132e8b2d31901a45c70188e68535.dir
- md5: eaa434ed20e421fccab8089842be58b5.dir
path: test_data
15 changes: 15 additions & 0 deletions docs/gui.rst
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,21 @@ added to the *.npy files as the first N ROIs (where N is the number that you dre
.. image:: _static/manual_roi.png
:width: 600

Merging ROIs
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can merge selected ROIs (multi-select with CTRL) by pressing ALT+ENTER,
or get suggested merges in the "Merge ROI" menu. The merged ROIs then must
be saved before you close the GUI to write the new ROIs to the *.npy files.
Each merged ROI is appended to the end of the list of ROIs (in stat), and the
ROIs that were merged to create it are in the key 'imerge'. Note in the stat file
and other files the original ROIs (that create the ROI) are NOT removed so that
you retain the original signals and original suite2p output. In the GUI
ROI view the merged ROIs are shown.
The merging of fluorescence is done by taking the mean of the selected cells'
fluorescences. The list of merges are available in the stat for you to choose
alternative strategies for combining signals.

View registered binary
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

Expand Down
12 changes: 6 additions & 6 deletions docs/outputs.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,13 @@ All can be loaded in python with numpy
import numpy as np
F = np.load('F.npy')
Fneu = np.load('F.npy')
spks = np.load('spks.npy')
stat = np.load('stat.npy')
ops = np.load('ops.npy')
F = np.load('F.npy', allow_pickle=True)
Fneu = np.load('F.npy', allow_pickle=True)
spks = np.load('spks.npy', allow_pickle=True)
stat = np.load('stat.npy', allow_pickle=True)
ops = np.load('ops.npy', allow_pickle=True)
ops = ops.item()
iscell = np.load('iscell.npy')
iscell = np.load('iscell.npy', allow_pickle=True)
MATLAB output
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down
10 changes: 5 additions & 5 deletions jupyter/make_ops.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@
"import numpy as np\n",
"import sys\n",
"sys.path.insert(0, 'C:/Users/carse/github/suite2p/')\n",
"from suite2p import run_s2p\n",
"from suite2p import default_ops\n",
"\n",
"ops = run_s2p.default_ops()\n",
"ops = default_ops()\n",
"\n",
"np.save('../suite2p/ops/ops.npy', ops)"
]
Expand All @@ -24,7 +24,7 @@
"source": [
"import numpy as np\n",
"\n",
"ops = run_s2p.default_ops()\n",
"ops = default_ops()\n",
"\n",
"ops['1Preg'] = True\n",
"ops['smooth_sigma'] = 6\n",
Expand All @@ -43,7 +43,7 @@
"source": [
"import numpy as np\n",
"\n",
"ops = run_s2p.default_ops()\n",
"ops = default_ops()\n",
"\n",
"ops['connected'] = True\n",
"ops['allow_overlap'] = True\n",
Expand Down Expand Up @@ -74,4 +74,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
}
2 changes: 1 addition & 1 deletion jupyter/run_pipeline_sbx.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
"ops['um_per_pixel_y'] = um_per_pix_y\n",
"\n",
"# run one experiment\n",
"opsEnd=run_s2p.run_s2p(ops=ops,db={})"
"opsEnd=run_s2p(ops=ops,db={})"
]
}
],
Expand Down
22 changes: 11 additions & 11 deletions jupyter/run_pipeline_tiffs_or_batch.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,10 @@
"import sys\n",
"# option to import from github folder\n",
"sys.path.insert(0, 'C:/Users/carse/github/suite2p/')\n",
"from suite2p import run_s2p\n",
"from suite2p import run_s2p, default_ops\n",
"\n",
"# set your options for running\n",
"ops = run_s2p.default_ops() # populates ops with the default options"
"ops = default_ops() # populates ops with the default options"
]
},
{
Expand All @@ -38,7 +38,7 @@
" }\n",
"\n",
"# run one experiment\n",
"opsEnd=run_s2p.run_s2p(ops=ops,db=db)"
"opsEnd = run_s2p(ops=ops, db=db)"
]
},
{
Expand All @@ -53,7 +53,7 @@
"db.append({'data_path': ['C:/Users/carse/github/tiffs2']})\n",
"\n",
"for dbi in db:\n",
" opsEnd=run_s2p.run_s2p(ops=ops,db=dbi)"
" opsEnd = run_s2p(ops=ops, db=dbi)"
]
},
{
Expand All @@ -77,7 +77,7 @@
"\n",
"\n",
"# run one experiment\n",
"opsEnd=run_s2p.run_s2p(ops=ops,db=db)\n"
"opsEnd = run_s2p(ops=ops,db=db)\n"
]
},
{
Expand Down Expand Up @@ -109,7 +109,7 @@
"\n",
"\n",
"# run one experiment\n",
"opsEnd=run_s2p.run_s2p(ops=ops,db=db)\n"
"opsEnd=run_s2p(ops=ops, db=db)\n"
]
},
{
Expand All @@ -121,7 +121,7 @@
"## change the save directory from 'suite2p' to a chosen name\n",
"# note the fast_disk will always be in 'suite2p', just the save_path will change\n",
"\n",
"ops = run_s2p.default_ops() # populates ops with the default options\n",
"ops = default_ops() # populates ops with the default options\n",
"ops['sparse_mode'] = 1\n",
"ops['threshold_scaling'] = 3.0\n",
"db = {\n",
Expand All @@ -134,7 +134,7 @@
" }\n",
"\n",
"# run one experiment\n",
"opsEnd=run_s2p.run_s2p(ops=ops,db=db)"
"opsEnd = run_s2p(ops=ops, db=db)"
]
},
{
Expand All @@ -145,7 +145,7 @@
"source": [
"# h5py file with multiple data fields (untested)\n",
"\n",
"ops = run_s2p.default_ops() # populates ops with the default options\n",
"ops = default_ops() # populates ops with the default options\n",
"ops['nplanes'] = 12\n",
"ops['nchannels'] = 2\n",
"ops['fs'] = 5.0\n",
Expand All @@ -158,7 +158,7 @@
" }\n",
"\n",
"# run one experiment\n",
"opsEnd=run_s2p.run_s2p(ops=ops,db=db)"
"opsEnd = run_s2p(ops=ops, db=db)"
]
}
],
Expand All @@ -183,4 +183,4 @@
},
"nbformat": 4,
"nbformat_minor": 2
}
}
28 changes: 13 additions & 15 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,32 +17,30 @@
'setuptools_scm',
],
use_scm_version=True,
install_requires=[], # see environment.yml for this info.
tests_require=[
'pytest',
'pytest-qt',
],
extras_require={
"docs": [
'sphinx>=3.0',
'sphinxcontrib-apidoc',
'sphinx_rtd_theme',
'sphinx-prompt',
'sphinx-autodoc-typehints',
'importlib-metadata',
install_requires=['importlib-metadata',
'natsort',
'rastermap>0.1.0',
'tifffile',
'scanimage-tiff-reader>=1.4.1',
'pyqtgraph',
'importlib-metadata',
'paramiko',
'numpy>=1.16',
'numba>=0.43.1',
'matplotlib',
'scipy',
'h5py',
'scikit-learn',
'scikit-learn',], # see environment.yml for this info.
tests_require=[
'pytest',
'pytest-qt',
],
extras_require={
"docs": [
'sphinx>=3.0',
'sphinxcontrib-apidoc',
'sphinx_rtd_theme',
'sphinx-prompt',
'sphinx-autodoc-typehints',
],
# Note: Available in pypi, but cleaner to install as pyqt from conda.
"gui": [
Expand Down
1 change: 1 addition & 0 deletions suite2p/classification/classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,7 @@ def _get_logp(self, stats):
x = stats[:,n]
x[x<self.grid[0,n]] = self.grid[0,n]
x[x>self.grid[-1,n]] = self.grid[-1,n]
x[np.isnan(x)] = self.grid[0,n]
ibin = np.digitize(x, self.grid[:,n], right=True) - 1
logp[:,n] = np.log(self.p[ibin,n] + 1e-6) - np.log(1-self.p[ibin,n] + 1e-6)
return logp
Expand Down
4 changes: 3 additions & 1 deletion suite2p/classification/classify.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,6 @@ def classify(stat: np.ndarray,
):
"""Returns array of classifier output from classification process."""
keys = list(set(keys).intersection(set(stat[0])))
return Classifier(classfile, keys=keys).run(stat)
print(keys)
iscell = Classifier(classfile, keys=keys).run(stat)
return iscell
Loading

0 comments on commit 6c84f25

Please sign in to comment.