Skip to content

Commit

Permalink
Deploy preview for PR 389 🛫
Browse files Browse the repository at this point in the history
  • Loading branch information
fraimondo committed Nov 6, 2024
1 parent 5512ac5 commit 6181bc8
Show file tree
Hide file tree
Showing 246 changed files with 53,571 additions and 0 deletions.
4 changes: 4 additions & 0 deletions pr-preview/pr-389/.buildinfo
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 8c982f8d80782740aefa675a008ebc5e
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file added pr-preview/pr-389/.doctrees/api/api.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/cli.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/configs.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/data.doctree
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/index.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/markers.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/nilearn.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/onthefly.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/pipeline.doctree
Binary file not shown.
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/stats.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/storage.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/testing.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/api/utils.doctree
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/builtin.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/contribution.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/environment.pickle
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/faq.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/help.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/index.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/installation.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/maintaining.doctree
Binary file not shown.
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/starting.doctree
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/using/index.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/using/masks.doctree
Binary file not shown.
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/using/running.doctree
Binary file not shown.
Binary file added pr-preview/pr-389/.doctrees/whats_new.doctree
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
"""
Extracting root sum of squares from edge-wise timeseries.
=========================================================
This example uses a ``RSSETSMarker`` to compute root sum of squares
of the edge-wise timeseries using the Schaefer parcellation
(100 rois and 200 rois, 17 Yeo networks) for a 4D nifti BOLD file.
Authors: Leonard Sasse, Sami Hamdan, Nicolas Nieto, Synchon Mandal
License: BSD 3 clause
"""

import tempfile

import junifer.testing.registry # noqa: F401
from junifer.api import collect, run
from junifer.storage import SQLiteFeatureStorage
from junifer.utils import configure_logging


###############################################################################
# Set the logging level to info to see extra information:
configure_logging(level="INFO")

##############################################################################
# Define the DataGrabber interface
datagrabber = {
"kind": "SPMAuditoryTestingDataGrabber",
}

###############################################################################
# Define the markers interface
markers = [
{
"name": "Schaefer100x17_RSSETS",
"kind": "RSSETSMarker",
"parcellation": "Schaefer100x17",
},
{
"name": "Schaefer200x17_RSSETS",
"kind": "RSSETSMarker",
"parcellation": "Schaefer200x17",
},
]

###############################################################################
# Create a temporary directory for junifer feature extraction:
# At the end you can read the extracted data into a ``pandas.DataFrame``.
with tempfile.TemporaryDirectory() as tmpdir:
# Define the storage interface
storage = {
"kind": "SQLiteFeatureStorage",
"uri": f"{tmpdir}/test.sqlite",
}
# Run the defined junifer feature extraction pipeline
run(
workdir=tmpdir,
datagrabber=datagrabber,
markers=markers,
storage=storage,
elements=["sub001"], # we calculate for one subject only
)
# Collect extracted features data
collect(storage=storage)
# Create storage object to read in extracted features
db = SQLiteFeatureStorage(uri=storage["uri"])

# List all the features
print(db.list_features())
# Read extracted features
df_rssets = db.read_df(feature_name="BOLD_Schaefer200x17_RSSETS_rss_ets")

###############################################################################
# Now we take a look at the dataframe
df_rssets.head()
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
"""
UKB VBM GMD Extraction
======================
Authors: Federico Raimondo
License: BSD 3 clause
"""
import tempfile

import junifer.testing.registry # noqa: F401
from junifer.api import run


datagrabber = {
"kind": "OasisVBMTestingDataGrabber",
}

markers = [
{
"name": "Schaefer1000x7_TrimMean80",
"kind": "ParcelAggregation",
"parcellation": "Schaefer1000x7",
"method": "trim_mean",
"method_params": {"proportiontocut": 0.2},
},
{
"name": "Schaefer1000x7_Mean",
"kind": "ParcelAggregation",
"parcellation": "Schaefer1000x7",
"method": "mean",
},
{
"name": "Schaefer1000x7_Std",
"kind": "ParcelAggregation",
"parcellation": "Schaefer1000x7",
"method": "std",
},
]

storage = {
"kind": "SQLiteFeatureStorage",
}

with tempfile.TemporaryDirectory() as tmpdir:
uri = f"{tmpdir}/test.sqlite"
storage["uri"] = uri
run(
workdir="/tmp",
datagrabber=datagrabber,
markers=markers,
storage=storage,
)
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
"""
UKB VBM GMD Extraction
======================
Authors: Federico Raimondo
License: BSD 3 clause
"""


from junifer.api import run


markers = [
{
"name": "Schaefer1000x7_TrimMean80",
"kind": "ParcelAggregation",
"parcellation": "Schaefer1000x7",
"method": "trim_mean",
"method_params": {"proportiontocut": 0.2},
},
{
"name": "Schaefer1000x7_Mean",
"kind": "ParcelAggregation",
"parcellation": "Schaefer1000x7",
"method": "mean",
},
{
"name": "Schaefer1000x7_Std",
"kind": "ParcelAggregation",
"parcellation": "Schaefer1000x7",
"method": "std",
},
]

run(
workdir="/tmp",
datagrabber="JuselessUKBVBM",
elements=("sub-1627474", "ses-2"),
markers=markers,
storage="SQLDataFrameStorage",
storage_params={"outpath": "/data/project/juniferexample"},
)
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Run junifer and julearn.\n\nThis example uses a ParcelAggregation marker to compute the mean of each parcel\nusing the Schaefer parcellation (100 rois, 7 Yeo networks) for a 3D nifti to\nextract some features for machine learning using julearn to predict some other\ndata.\n\nAuthors: Leonard Sasse, Sami Hamdan, Nicolas Nieto, Synchon Mandal\n\nLicense: BSD 3 clause\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import tempfile\n\nimport nilearn\nimport pandas as pd\nfrom julearn import run_cross_validation, PipelineCreator\n\nimport junifer.testing.registry # noqa: F401\nfrom junifer.api import collect, run\nfrom junifer.storage.sqlite import SQLiteFeatureStorage\nfrom junifer.utils import configure_logging"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Set the logging level to info to see extra information:\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"configure_logging(level=\"INFO\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Define the markers you want:\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"marker_dicts = [\n {\n \"name\": \"Schaefer100x17_TrimMean80\",\n \"kind\": \"ParcelAggregation\",\n \"parcellation\": \"Schaefer100x17\",\n \"method\": \"trim_mean\",\n \"method_params\": {\"proportiontocut\": 0.2},\n },\n {\n \"name\": \"Schaefer200x17_Mean\",\n \"kind\": \"ParcelAggregation\",\n \"parcellation\": \"Schaefer200x17\",\n \"method\": \"mean\",\n },\n]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Define target and confounds for julearn machine learning:\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"y = \"age\"\nconfound = \"sex\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Load the VBM phenotype data for machine learning data:\n- Fetch the Oasis dataset\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"oasis_dataset = nilearn.datasets.fetch_oasis_vbm()\nage = oasis_dataset.ext_vars[y][:10]\nsex = (\n pd.Series(oasis_dataset.ext_vars[\"mf\"][:10])\n .map(lambda x: 1 if x == \"F\" else 0)\n .values\n)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Create a temporary directory for junifer feature extraction:\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"with tempfile.TemporaryDirectory() as tmpdir:\n storage = {\"kind\": \"SQLiteFeatureStorage\", \"uri\": f\"{tmpdir}/test.sqlite\"}\n # run the defined junifer feature extraction pipeline\n run(\n workdir=\"/tmp\",\n datagrabber={\"kind\": \"OasisVBMTestingDataGrabber\"},\n markers=marker_dicts,\n storage=storage,\n )\n\n # read in extracted features and add confounds and targets\n # for julearn run cross validation\n collect(storage)\n db = SQLiteFeatureStorage(uri=storage[\"uri\"])\n\n df_vbm = db.read_df(feature_name=\"VBM_GM_Schaefer200x17_Mean_aggregation\")\n oasis_subjects = [x[0] for x in df_vbm.index]\n df_vbm.index = oasis_subjects"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Using julearn for machine learning:\nWe predict the age given our vbm features and sex as a confound.\n\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"X = list(df_vbm.columns)\ndf_vbm[y] = age\ndf_vbm[confound] = sex\n\nX_types = {\n \"features\": X,\n \"confound\": confound,\n}\n\ncreator = PipelineCreator(problem_type=\"regression\", apply_to=\"features\")\ncreator.add(\"zscore\", apply_to=[\"features\", \"confound\"])\ncreator.add(\"confound_removal\", apply_to=\"features\", confounds=\"confound\")\ncreator.add(\"ridge\")\n\nscores = run_cross_validation(\n X=X + [confound],\n y=y,\n X_types=X_types,\n data=df_vbm,\n model=creator,\n cv=3,\n)\nprint(scores)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Interpretation of results:\nDoing machine learning with only 10 datapoints is not meaningful.\nThis explains the big variation in scores\nfor different cross-validation folds.\n\n"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.7"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Loading

0 comments on commit 6181bc8

Please sign in to comment.