-
Notifications
You must be signed in to change notification settings - Fork 13
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
246 changed files
with
53,571 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,4 @@ | ||
# Sphinx build info version 1 | ||
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. | ||
config: 8c982f8d80782740aefa675a008ebc5e | ||
tags: 645f666f9bcd5a90fca523b33c5a78b7 |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added
BIN
+12.6 KB
pr-preview/pr-389/.doctrees/auto_examples/norun_hcpfc_pearson.doctree
Binary file not shown.
Binary file not shown.
Binary file added
BIN
+29.3 KB
pr-preview/pr-389/.doctrees/auto_examples/run_compute_parcel_mean.doctree
Binary file not shown.
Binary file added
BIN
+22.7 KB
pr-preview/pr-389/.doctrees/auto_examples/run_datagrabber_bids_datalad.doctree
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
77 changes: 77 additions & 0 deletions
77
pr-preview/pr-389/_downloads/0129f8507efff39850cf153c51edb3ab/run_ets_rss_marker.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,77 @@ | ||
""" | ||
Extracting root sum of squares from edge-wise timeseries. | ||
========================================================= | ||
This example uses a ``RSSETSMarker`` to compute root sum of squares | ||
of the edge-wise timeseries using the Schaefer parcellation | ||
(100 rois and 200 rois, 17 Yeo networks) for a 4D nifti BOLD file. | ||
Authors: Leonard Sasse, Sami Hamdan, Nicolas Nieto, Synchon Mandal | ||
License: BSD 3 clause | ||
""" | ||
|
||
import tempfile | ||
|
||
import junifer.testing.registry # noqa: F401 | ||
from junifer.api import collect, run | ||
from junifer.storage import SQLiteFeatureStorage | ||
from junifer.utils import configure_logging | ||
|
||
|
||
############################################################################### | ||
# Set the logging level to info to see extra information: | ||
configure_logging(level="INFO") | ||
|
||
############################################################################## | ||
# Define the DataGrabber interface | ||
datagrabber = { | ||
"kind": "SPMAuditoryTestingDataGrabber", | ||
} | ||
|
||
############################################################################### | ||
# Define the markers interface | ||
markers = [ | ||
{ | ||
"name": "Schaefer100x17_RSSETS", | ||
"kind": "RSSETSMarker", | ||
"parcellation": "Schaefer100x17", | ||
}, | ||
{ | ||
"name": "Schaefer200x17_RSSETS", | ||
"kind": "RSSETSMarker", | ||
"parcellation": "Schaefer200x17", | ||
}, | ||
] | ||
|
||
############################################################################### | ||
# Create a temporary directory for junifer feature extraction: | ||
# At the end you can read the extracted data into a ``pandas.DataFrame``. | ||
with tempfile.TemporaryDirectory() as tmpdir: | ||
# Define the storage interface | ||
storage = { | ||
"kind": "SQLiteFeatureStorage", | ||
"uri": f"{tmpdir}/test.sqlite", | ||
} | ||
# Run the defined junifer feature extraction pipeline | ||
run( | ||
workdir=tmpdir, | ||
datagrabber=datagrabber, | ||
markers=markers, | ||
storage=storage, | ||
elements=["sub001"], # we calculate for one subject only | ||
) | ||
# Collect extracted features data | ||
collect(storage=storage) | ||
# Create storage object to read in extracted features | ||
db = SQLiteFeatureStorage(uri=storage["uri"]) | ||
|
||
# List all the features | ||
print(db.list_features()) | ||
# Read extracted features | ||
df_rssets = db.read_df(feature_name="BOLD_Schaefer200x17_RSSETS_rss_ets") | ||
|
||
############################################################################### | ||
# Now we take a look at the dataframe | ||
df_rssets.head() |
53 changes: 53 additions & 0 deletions
53
pr-preview/pr-389/_downloads/042dd4fe6bffbb0ea320b529af464ace/run_run_gmd_mean.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,53 @@ | ||
""" | ||
UKB VBM GMD Extraction | ||
====================== | ||
Authors: Federico Raimondo | ||
License: BSD 3 clause | ||
""" | ||
import tempfile | ||
|
||
import junifer.testing.registry # noqa: F401 | ||
from junifer.api import run | ||
|
||
|
||
datagrabber = { | ||
"kind": "OasisVBMTestingDataGrabber", | ||
} | ||
|
||
markers = [ | ||
{ | ||
"name": "Schaefer1000x7_TrimMean80", | ||
"kind": "ParcelAggregation", | ||
"parcellation": "Schaefer1000x7", | ||
"method": "trim_mean", | ||
"method_params": {"proportiontocut": 0.2}, | ||
}, | ||
{ | ||
"name": "Schaefer1000x7_Mean", | ||
"kind": "ParcelAggregation", | ||
"parcellation": "Schaefer1000x7", | ||
"method": "mean", | ||
}, | ||
{ | ||
"name": "Schaefer1000x7_Std", | ||
"kind": "ParcelAggregation", | ||
"parcellation": "Schaefer1000x7", | ||
"method": "std", | ||
}, | ||
] | ||
|
||
storage = { | ||
"kind": "SQLiteFeatureStorage", | ||
} | ||
|
||
with tempfile.TemporaryDirectory() as tmpdir: | ||
uri = f"{tmpdir}/test.sqlite" | ||
storage["uri"] = uri | ||
run( | ||
workdir="/tmp", | ||
datagrabber=datagrabber, | ||
markers=markers, | ||
storage=storage, | ||
) |
43 changes: 43 additions & 0 deletions
43
pr-preview/pr-389/_downloads/04a7e637e66cd7d7b36569a7cb7e70b5/norun_ukbvm_gmd.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
""" | ||
UKB VBM GMD Extraction | ||
====================== | ||
Authors: Federico Raimondo | ||
License: BSD 3 clause | ||
""" | ||
|
||
|
||
from junifer.api import run | ||
|
||
|
||
markers = [ | ||
{ | ||
"name": "Schaefer1000x7_TrimMean80", | ||
"kind": "ParcelAggregation", | ||
"parcellation": "Schaefer1000x7", | ||
"method": "trim_mean", | ||
"method_params": {"proportiontocut": 0.2}, | ||
}, | ||
{ | ||
"name": "Schaefer1000x7_Mean", | ||
"kind": "ParcelAggregation", | ||
"parcellation": "Schaefer1000x7", | ||
"method": "mean", | ||
}, | ||
{ | ||
"name": "Schaefer1000x7_Std", | ||
"kind": "ParcelAggregation", | ||
"parcellation": "Schaefer1000x7", | ||
"method": "std", | ||
}, | ||
] | ||
|
||
run( | ||
workdir="/tmp", | ||
datagrabber="JuselessUKBVBM", | ||
elements=("sub-1627474", "ses-2"), | ||
markers=markers, | ||
storage="SQLDataFrameStorage", | ||
storage_params={"outpath": "/data/project/juniferexample"}, | ||
) |
Binary file added
BIN
+14.9 KB
pr-preview/pr-389/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip
Binary file not shown.
158 changes: 158 additions & 0 deletions
158
pr-preview/pr-389/_downloads/35c7ed013e0aac743386dbe5f567501f/run_junifer_julearn.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,158 @@ | ||
{ | ||
"cells": [ | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"\n# Run junifer and julearn.\n\nThis example uses a ParcelAggregation marker to compute the mean of each parcel\nusing the Schaefer parcellation (100 rois, 7 Yeo networks) for a 3D nifti to\nextract some features for machine learning using julearn to predict some other\ndata.\n\nAuthors: Leonard Sasse, Sami Hamdan, Nicolas Nieto, Synchon Mandal\n\nLicense: BSD 3 clause\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"import tempfile\n\nimport nilearn\nimport pandas as pd\nfrom julearn import run_cross_validation, PipelineCreator\n\nimport junifer.testing.registry # noqa: F401\nfrom junifer.api import collect, run\nfrom junifer.storage.sqlite import SQLiteFeatureStorage\nfrom junifer.utils import configure_logging" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Set the logging level to info to see extra information:\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"configure_logging(level=\"INFO\")" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Define the markers you want:\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"marker_dicts = [\n {\n \"name\": \"Schaefer100x17_TrimMean80\",\n \"kind\": \"ParcelAggregation\",\n \"parcellation\": \"Schaefer100x17\",\n \"method\": \"trim_mean\",\n \"method_params\": {\"proportiontocut\": 0.2},\n },\n {\n \"name\": \"Schaefer200x17_Mean\",\n \"kind\": \"ParcelAggregation\",\n \"parcellation\": \"Schaefer200x17\",\n \"method\": \"mean\",\n },\n]" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Define target and confounds for julearn machine learning:\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"y = \"age\"\nconfound = \"sex\"" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Load the VBM phenotype data for machine learning data:\n- Fetch the Oasis dataset\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"oasis_dataset = nilearn.datasets.fetch_oasis_vbm()\nage = oasis_dataset.ext_vars[y][:10]\nsex = (\n pd.Series(oasis_dataset.ext_vars[\"mf\"][:10])\n .map(lambda x: 1 if x == \"F\" else 0)\n .values\n)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Create a temporary directory for junifer feature extraction:\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"with tempfile.TemporaryDirectory() as tmpdir:\n storage = {\"kind\": \"SQLiteFeatureStorage\", \"uri\": f\"{tmpdir}/test.sqlite\"}\n # run the defined junifer feature extraction pipeline\n run(\n workdir=\"/tmp\",\n datagrabber={\"kind\": \"OasisVBMTestingDataGrabber\"},\n markers=marker_dicts,\n storage=storage,\n )\n\n # read in extracted features and add confounds and targets\n # for julearn run cross validation\n collect(storage)\n db = SQLiteFeatureStorage(uri=storage[\"uri\"])\n\n df_vbm = db.read_df(feature_name=\"VBM_GM_Schaefer200x17_Mean_aggregation\")\n oasis_subjects = [x[0] for x in df_vbm.index]\n df_vbm.index = oasis_subjects" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Using julearn for machine learning:\nWe predict the age given our vbm features and sex as a confound.\n\n" | ||
] | ||
}, | ||
{ | ||
"cell_type": "code", | ||
"execution_count": null, | ||
"metadata": { | ||
"collapsed": false | ||
}, | ||
"outputs": [], | ||
"source": [ | ||
"X = list(df_vbm.columns)\ndf_vbm[y] = age\ndf_vbm[confound] = sex\n\nX_types = {\n \"features\": X,\n \"confound\": confound,\n}\n\ncreator = PipelineCreator(problem_type=\"regression\", apply_to=\"features\")\ncreator.add(\"zscore\", apply_to=[\"features\", \"confound\"])\ncreator.add(\"confound_removal\", apply_to=\"features\", confounds=\"confound\")\ncreator.add(\"ridge\")\n\nscores = run_cross_validation(\n X=X + [confound],\n y=y,\n X_types=X_types,\n data=df_vbm,\n model=creator,\n cv=3,\n)\nprint(scores)" | ||
] | ||
}, | ||
{ | ||
"cell_type": "markdown", | ||
"metadata": {}, | ||
"source": [ | ||
"Interpretation of results:\nDoing machine learning with only 10 datapoints is not meaningful.\nThis explains the big variation in scores\nfor different cross-validation folds.\n\n" | ||
] | ||
} | ||
], | ||
"metadata": { | ||
"kernelspec": { | ||
"display_name": "Python 3", | ||
"language": "python", | ||
"name": "python3" | ||
}, | ||
"language_info": { | ||
"codemirror_mode": { | ||
"name": "ipython", | ||
"version": 3 | ||
}, | ||
"file_extension": ".py", | ||
"mimetype": "text/x-python", | ||
"name": "python", | ||
"nbconvert_exporter": "python", | ||
"pygments_lexer": "ipython3", | ||
"version": "3.12.7" | ||
} | ||
}, | ||
"nbformat": 4, | ||
"nbformat_minor": 0 | ||
} |
Oops, something went wrong.