diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index eb660d1269..29c2aa3e74 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -10,7 +10,7 @@ repos:
rev: v8.16.3
hooks:
- id: gitleaks
-
+
- repo: https://github.com/pycqa/isort
rev: 5.12.0
hooks:
@@ -32,4 +32,4 @@ repos:
rev: v17.0.4
hooks:
- id: clang-format
-
+ types_or: [c++]
diff --git a/doc/htmldoc/.gitignore b/doc/htmldoc/.gitignore
index afbca2a449..df2d4597b8 100644
--- a/doc/htmldoc/.gitignore
+++ b/doc/htmldoc/.gitignore
@@ -4,3 +4,5 @@
auto_examples/
# Some images are copied over from the source into the doc static.
static/examples
+# json data is produced at build time by Sphinx
+static/data/filter_model.json
diff --git a/doc/htmldoc/_ext/extractor_userdocs.py b/doc/htmldoc/_ext/extractor_userdocs.py
deleted file mode 100644
index 160982a645..0000000000
--- a/doc/htmldoc/_ext/extractor_userdocs.py
+++ /dev/null
@@ -1,527 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# extractor_userdocs.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-
-import glob
-import json
-import logging
-import os
-import re
-from collections import Counter
-from itertools import chain, combinations
-from math import comb
-from pprint import pformat
-
-from tqdm import tqdm
-
-logging.basicConfig(level=logging.WARNING)
-log = logging.getLogger(__name__)
-
-
-def relative_glob(*pattern, basedir=os.curdir, **kwargs):
- tobase = os.path.relpath(basedir, os.curdir)
- # prefix all patterns with basedir and expand
- names = chain.from_iterable(glob.glob(os.path.join(tobase, pat), **kwargs) for pat in pattern)
- # remove prefix from all expanded names
- return [name[len(tobase) + 1 :] for name in names]
-
-
-def UserDocExtractor(filenames, basedir="..", replace_ext=".rst", outdir="userdocs/"):
- """
- Extract all user documentation from given files.
-
- This method searches for "BeginUserDocs" and "EndUserDocs" keywords and
- extracts all text inbetween as user-level documentation. The keyword
- "BeginUserDocs" may optionally be followed by a colon ":" and a comma
- separated list of tags till the end of the line. Note that this allows tags
- to contain spaces, i.e. you do not need to introduce underscores or hyphens
- for multi-word tags.
-
- Example
- -------
-
- /* BeginUserDocs: example, user documentation generator
-
- [...]
-
- EndUserDocs */
-
- This will extract "[...]" as documentation for the file and tag it with
- 'example' and 'user documentation generator'.
-
- The extracted documentation is written to a file in `basedir` named after
- the sourcefile with ".rst" replacing the original extension.
-
- Parameters
- ----------
-
- filenames : iterable
- Any iterable with input file names (relative to `basedir`).
-
- basedir : str, path
- Directory to which input `filenames` are relative.
-
- replace_ext : str
- Replacement for the extension of the original filename when writing to `outdir`.
-
- outdir : str, path
- Directory where output files are created.
-
- Returns
- -------
-
- dict
- mapping tags to lists of documentation filenames (relative to `outdir`).
- """
- if not os.path.exists(outdir):
- log.info("creating output directory " + outdir)
- os.mkdir(outdir)
- userdoc_re = re.compile(r"BeginUserDocs:?\s*(?P([\w -]+(,\s*)?)*)\n+(?P(.|\n)*)EndUserDocs")
- tagdict = dict() # map tags to lists of documents
- nfiles_total = 0
- with tqdm(unit="files", total=len(filenames)) as progress:
- for filename in filenames:
- progress.set_postfix(file=os.path.basename(filename)[:15], refresh=False)
- progress.update(1)
- log.info("extracting user documentation from %s...", filename)
- nfiles_total += 1
- match = None
- with open(os.path.join(basedir, filename), "r", encoding="utf8") as infile:
- match = userdoc_re.search(infile.read())
- if not match:
- log.info("No user documentation found in " + filename)
- continue
- outname = os.path.basename(os.path.splitext(filename)[0]) + replace_ext
- tags = [t.strip() for t in match.group("tags").split(",")]
- for tag in tags:
- tagdict.setdefault(tag, list()).append(outname)
- doc = match.group("doc")
- try:
- doc = rewrite_short_description(doc, filename)
- except ValueError as e:
- log.warning("Documentation added unfixed: %s", e)
- try:
- doc = rewrite_see_also(doc, filename, tags)
- except ValueError as e:
- log.info("Failed to rebuild 'See also' section: %s", e)
- write_rst_files(doc, tags, outdir, outname)
-
- log.info("%4d tags found:\n%s", len(tagdict), pformat(list(tagdict.keys())))
- nfiles = len(set.union(*[set(x) for x in tagdict.values()]))
- log.info("%4d files in input", nfiles_total)
- log.info("%4d files with documentation", nfiles)
- return tagdict
-
-
-def rewrite_short_description(doc, filename, short_description="Short description"):
- """
- Modify a given text by replacing the first section named as given in
- `short_description` by the filename and content of that section.
- Parameters
- ----------
- doc : str
- restructured text with all sections
- filename : str, path
- name that is inserted in the replaced title (and used for useful error
- messages).
- short_description : str
- title of the section that is to be rewritten to the document title
- Returns
- -------
- str
- original parameter doc with short_description section replaced
- """
-
- titles = getTitles(doc)
- if not titles:
- raise ValueError("No sections found in '%s'!" % filename)
- name = os.path.splitext(os.path.basename(filename))[0]
- for title, nexttitle in zip(titles, titles[1:] + [None]):
- if title.group(1) != short_description:
- continue
- secstart = title.end()
- secend = len(doc) + 1 # last section ends at end of document
- if nexttitle:
- secend = nexttitle.start()
- sdesc = doc[secstart:secend].strip().replace("\n", " ")
- fixed_title = "%s – %s" % (name, sdesc)
- return doc[: title.start()] + fixed_title + "\n" + "=" * len(fixed_title) + "\n\n" + doc[secend:]
- raise ValueError("No section '%s' found in %s!" % (short_description, filename))
-
-
-def rewrite_see_also(doc, filename, tags, see_also="See also"):
- """
- Replace the content of a section named `see_also` in the document `doc`
- with links to indices of all its tags.
- The original content of the section -if not empty- will discarded and
- logged as a warning.
- Parameters
- ----------
- doc : str
- restructured text with all sections
- filename : str, path
- name that is inserted in the replaced title (and used for useful error
- messages).
- tags : iterable (list or dict)
- all tags the given document is linked to. These are used to construct the
- links in the `see_also` section.
- see_also : str
- title of the section that is to be rewritten to the document title
- Returns
- -------
- str
- original parameter doc with see_also section replaced
- """
-
- titles = getTitles(doc)
- if not titles:
- raise ValueError("No sections found in '%s'!" % filename)
-
- def rightcase(text):
- """
- Make text title-case except for acronyms, where an acronym is
- identified simply by being all upper-case.
- This function operates on the whole string, so a text with mixed
- acronyms and non-acronyms will not be recognized and everything will be
- title-cased, including the embedded acronyms.
- Parameters
- ----------
- text : str
- text that needs to be changed to the right casing.
- Returns
- -------
- str
- original text with poentially different characters being
- upper-/lower-case.
- """
- if text != text.upper():
- return text.title() # title-case any tag that is not an acronym
- return text # return acronyms unmodified
-
- for title, nexttitle in zip(titles, titles[1:] + [None]):
- if title.group(1) != see_also:
- continue
- secstart = title.end()
- secend = len(doc) + 1 # last section ends at end of document
- if nexttitle:
- secend = nexttitle.start()
- original = doc[secstart:secend].strip().replace("\n", " ")
- if original:
- log.info("dropping manual 'see also' list in %s user docs: '%s'", filename, original)
- return (
- doc[:secstart]
- + "\n"
- + ", ".join([":doc:`{taglabel} `".format(tag=tag, taglabel=rightcase(tag)) for tag in tags])
- + "\n\n"
- + doc[secend:]
- )
- raise ValueError("No section '%s' found in %s!" % (see_also, filename))
-
-
-def write_rst_files(doc, tags, outdir, outname):
- """
- Write raw rst to a file and generate a wrapper with index
- """
- with open(os.path.join(outdir, outname), "w") as outfile:
- outfile.write(doc)
-
-
-def make_hierarchy(tags, *basetags):
- """
- This method adds a single level of hierachy to the given dictionary.
-
- First a list of items with given basetags is created (intersection). Then
- this list is subdivided into sections by creating intersections with all
- remaining tags.
-
- Parameters
- ----------
- tags : dict
- flat dictionary of tag to entry
-
- basetags : iterable
- iterable of a subset of tags.keys(), if no basetags are given the
- original tags list is returned unmodified.
-
- Returns
- -------
-
- dict
- A hierarchical dictionary of (dict or set) with items in the
- intersection of basetag.
- """
- if not basetags:
- return tags
-
- # items having all given basetags
- baseitems = set.intersection(*[set(items) for tag, items in tags.items() if tag in basetags])
- tree = dict()
- subtags = [t for t in tags.keys() if t not in basetags]
- for subtag in subtags:
- docs = set(tags[subtag]).intersection(baseitems)
- if docs:
- tree[subtag] = docs
- remaining = None
- if tree.values():
- remaining = baseitems.difference(set.union(*tree.values()))
- if remaining:
- tree[""] = remaining
- return {basetags: tree}
-
-
-def rst_index(hierarchy, current_tags=[], underlines="=-~", top=True):
- """
- Create an index page from a given hierarchical dict of documents.
-
- The given `hierarchy` is pretty-printed and returned as a string.
-
- Parameters
- ----------
- hierarchy : dict
- dictionary or dict-of-dict returned from `make_hierarchy()`
-
- current_tags : list
- applied filters for the current index (parameters given to
- `make_hierarchy()`. Defaults to `[]`, which doesn't display any filters.
-
- underlines : iterable
- list of characters to use for underlining deeper levels of the generated
- index.
-
- top : bool
- optional argument keeping track of recursive calls. Calls from within
- `rst_index` itself will always call with `top=False`.
-
- Returns
- -------
-
- str
- formatted pretty index.
- """
-
- def mktitle(t, ul, link=None):
- text = t
- if t != t.upper():
- text = t.title() # title-case any tag that is not an acronym
- title = ":doc:`{text} <{filename}>`".format(text=text, filename=link or "index_" + t)
- text = title + "\n" + ul * len(title) + "\n\n"
- return text
-
- def mkitem(t):
- return "* :doc:`%s`" % os.path.splitext(t)[0]
-
- output = list()
- if top:
- # Prevent warnings by adding an orphan role so Sphinx does not expect it in toctrees
- orphan_text = ":orphan:" + "\n\n"
- page_title = "Model directory"
- description = """
- The model directory is organized and autogenerated by keywords (e.g., adaptive threshold,
- conductance-based etc.). Models that contain a specific keyword will be listed under that word.
- For more information on models, see our :ref:`intro to NEST models `.
- """
- if len(hierarchy.keys()) == 1:
- page_title += ": " + ", ".join(current_tags)
- output.append(orphan_text + page_title)
- output.append(underlines[0] * len(page_title) + "\n")
- output.append(description + "\n")
- if len(hierarchy.keys()) != 1:
- underlines = underlines[1:]
-
- for tags, items in sorted(hierarchy.items()):
- if "NOINDEX" in tags:
- continue
- if isinstance(tags, str):
- title = tags
- else:
- title = " & ".join(tags)
- if title and not len(hierarchy) == 1: # not print title if already selected by current_tags
- output.append(mktitle(title, underlines[0]))
- if isinstance(items, dict):
- output.append(rst_index(items, current_tags, underlines[1:], top=False))
- else:
- for item in sorted(items):
- output.append(mkitem(item))
- output.append("")
- return "\n".join(output)
-
-
-def reverse_dict(tags):
- """
- Return the reversed dict-of-list
-
- Given a dictionary `keys:values`, this function creates the inverted
- dictionary `value:[key, key2, ...]` with one entry per value of the given
- dict. Since many keys can have the same value, the reversed dict must have
- list-of-keys as values.
-
- Parameters
- ----------
-
- tags : dict
- Values must be hashable to be used as keys for the result.
-
- Returns
- -------
-
- dict
- Mapping the original values to lists of original keys.
- """
- revdict = dict()
- for tag, items in tags.items():
- for item in items:
- revdict.setdefault(item, list()).append(tag)
- return revdict
-
-
-def CreateTagIndices(tags, outdir="userdocs/"):
- """
- This function generates all combinations of tags and creates an index page
- for each combination using `rst_index`.
-
- Parameters
- ----------
-
- tags : dict
- dictionary of tags
-
- outdir : str, path
- path to the intended output directory (handed to `rst_index`.
-
- Returns
- -------
-
- list
- list of names of generated files.
- """
- taglist = list(tags.keys())
- maxtaglen = max([len(t) for t in tags])
- for tag, count in sorted([(tag, len(lst)) for tag, lst in tags.items()], key=lambda x: x[1]):
- log.info(" %%%ds tag in %%d files" % maxtaglen, tag, count)
- if "" in taglist:
- taglist.remove("")
- indexfiles = list()
- depth = min(4, len(taglist)) # how many levels of indices to create at most
- nindices = sum([comb(len(taglist), L) for L in range(depth - 1)])
- log.info("indices down to level %d → %d possible keyword combinations", depth, nindices)
- for current_tags in tqdm(
- chain(*[combinations(taglist, L) for L in range(depth - 1)]), unit="idx", desc="keyword indices", total=nindices
- ):
- current_tags = sorted(current_tags)
- indexname = "index%s.rst" % "".join(["_" + x for x in current_tags])
- hier = make_hierarchy(tags.copy(), *current_tags)
- if not any(hier.values()):
- log.debug("index %s is empyt!", str(current_tags))
- continue
- nfiles = len(set.union(*chain([set(subtag) for subtag in hier.values()])))
- log.debug("generating index for %s...", str(current_tags))
- indextext = rst_index(hier, current_tags)
- with open(os.path.join(outdir, indexname), "w") as outfile:
- outfile.write(indextext)
- indexfiles.append(indexname)
- log.info("%4d non-empty index files generated", len(indexfiles))
- return indexfiles
-
-
-class JsonWriter:
- """
- Helper class to have a unified data output interface.
- """
-
- def __init__(self, outdir):
- self.outdir = outdir
- log.info("writing JSON files to %s", self.outdir)
-
- def write(self, obj, name):
- """
- Store the given object with the given name.
- """
- outname = os.path.join(self.outdir, name + ".json")
- with open(outname, "w") as outfile:
- json.dump(obj, outfile)
- log.info("data saved as " + outname)
-
-
-def getTitles(text):
- """
- extract all sections from the given RST file
-
- Parameters
- ----------
-
- text : str
- restructuredtext user documentation
-
- Returns
- -------
-
- list
- elements are the section title re.match objects
- """
- titlechar = r"\+"
- title_re = re.compile(r"^(?P.+)\n(?P" + titlechar + r"+)$", re.MULTILINE)
- titles = []
- # extract all titles
- for match in title_re.finditer(text):
- log.debug("MATCH from %s to %s: %s", match.start(), match.end(), pformat(match.groupdict()))
- if len(match.group("title")) != len(match.group("underline")):
- log.warning(
- "Length of section title '%s' (%d) does not match length of underline (%d)",
- match.group("title"),
- len(match.group("title")),
- len(match.group("underline")),
- )
- titles.append(match)
- return titles
-
-
-def ExtractUserDocs(listoffiles, basedir="..", outdir="userdocs/"):
- """
- Extract and build all user documentation and build tag indices.
-
- Writes extracted information to JSON files in outdir. In particular the
- list of seen tags mapped to files they appear in, and the indices generated
- from all combinations of tags.
-
- Parameters are the same as for `UserDocExtractor` and are handed to it
- unmodified.
-
- Returns
- -------
-
- None
- """
- data = JsonWriter(outdir)
- # Gather all information and write RSTs
- tags = UserDocExtractor(listoffiles, basedir=basedir, outdir=outdir)
- data.write(tags, "tags")
-
- indexfiles = CreateTagIndices(tags, outdir=outdir)
- data.write(indexfiles, "indexfiles")
-
- toc_list = [name[:-4] for names in tags.values() for name in names]
- idx_list = [indexfile[:-4] for indexfile in indexfiles]
-
- with open(os.path.join(outdir, "toc-tree.json"), "w") as tocfile:
- json.dump(list(set(toc_list)) + list(set(idx_list)), tocfile)
-
-
-if __name__ == "__main__":
- ExtractUserDocs(relative_glob("models/*.h", "nestkernel/*.h", basedir=".."), outdir="userdocs/")
diff --git a/doc/htmldoc/_ext/model_tag_setup.py b/doc/htmldoc/_ext/model_tag_setup.py
new file mode 100644
index 0000000000..99afe62ce9
--- /dev/null
+++ b/doc/htmldoc/_ext/model_tag_setup.py
@@ -0,0 +1,384 @@
+# -*- coding: utf-8 -*-
+#
+# model_tag_setup.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import glob
+import json
+import logging
+import os
+import re
+from pathlib import Path
+from pprint import pformat
+
+logging.basicConfig(level=logging.INFO)
+log = logging.getLogger(__name__)
+
+# The following function is used in two other functions, in two separate Sphinx events
+
+
+def extract_model_text():
+ """
+ Function to extract user documentation from header files.
+
+ This function searches for documentation blocks in header files located in
+ two specified directories: "../../models" and "../../nestkernel". The documentation
+ blocks are identified by markers "BeginUserDocs" and "EndUserDocs".
+
+ Yields
+ ------
+
+ tuple: A tuple containing the match object and the file path for each file
+ where documentation is found.
+
+ Note
+ ----
+ The documentation block format is expected to be:
+
+ BeginUserDocs: [tags]
+ Documentation text
+ EndUserDocs
+ """
+ model_paths = Path("../../models").glob("*.h")
+ nestkernel_paths = Path("../../nestkernel").glob("*.h")
+ file_paths = list(model_paths) + list(nestkernel_paths)
+
+ userdoc_re = re.compile(
+ r"""
+ BeginUserDocs:\s* # Match 'BeginUserDocs:' followed by any whitespace
+ (?P(?:[\w -]+(?:,\s*)?)+) # Match tags (terms, spaces, commas, hyphens)
+ \n\n # Match two newlines
+ (?P.*?) # Capture the document text non-greedily
+ (?=EndUserDocs) # Positive lookahead for 'EndUserDocs'
+ """,
+ re.VERBOSE | re.DOTALL,
+ )
+
+ for file_path in file_paths:
+ with open(file_path, "r", encoding="utf8") as file:
+ match = userdoc_re.search(file.read())
+ if not match:
+ log.info("No user documentation found in " + str(file_path))
+ continue
+ yield match, file_path
+
+
+# The following block of functions are called at Sphinx core event config-inited
+
+
+def create_rst_files(app, config):
+ """
+ Generates reStructuredText (RST) files from header files containing user documentation.
+
+ This function creates an output directory if it does not exist and processes header
+ files located in predefined directories. It extracts user documentation blocks from
+ the files, optionally modifies the documentation, and writes the resulting text to
+ RST files.
+
+ Parameters
+ ----------
+ app : Sphinx application object
+ The Sphinx application instance, used to access the application's configuration.
+ config : Sphinx config object
+ The configuration object for the Sphinx application.
+
+ """
+
+ outdir = "models/"
+ if not os.path.exists(outdir):
+ log.info("creating output directory " + outdir)
+ os.mkdir(outdir)
+ outnames = []
+ for match, file_path in extract_model_text():
+ doc = match.group("doc")
+ filename = file_path.name
+ outname = filename.replace(".h", ".rst")
+ try:
+ doc = rewrite_short_description(doc, filename)
+ except ValueError as e:
+ log.warning("Documentation added unfixed: %s", e)
+ write_rst_files(doc, outdir, outname)
+
+
+def rewrite_short_description(doc, filename, short_description="Short description"):
+ """
+ Modify a given text by replacing the first section named as given in
+ `short_description` by the filename and content of that section.
+
+ Parameters
+ ----------
+ doc : str
+ restructured text with all sections
+ filename : str, path
+ name that is inserted in the replaced title (and used for useful error
+ messages).
+ short_description : str
+ title of the section that is to be rewritten to the document title
+
+ Returns
+ -------
+ str
+ original parameter doc with short_description section replaced
+ """
+
+ titles = getTitles(doc)
+ if not titles:
+ raise ValueError("No sections found in '%s'!" % filename)
+ name = Path(filename).stem
+ for title, nexttitle in zip(titles, titles[1:] + [None]):
+ if title.group(1) != short_description:
+ continue
+ secstart = title.end()
+ secend = len(doc) + 1 # last section ends at end of document
+ if nexttitle:
+ secend = nexttitle.start()
+ sdesc = doc[secstart:secend].strip().replace("\n", " ")
+ fixed_title = "%s – %s" % (name, sdesc)
+ return doc[: title.start()] + fixed_title + "\n" + "=" * len(fixed_title) + "\n\n" + doc[secend:]
+ raise ValueError("No section '%s' found in %s!" % (short_description, filename))
+
+
+def getTitles(text):
+ """
+ extract all sections from the given RST file
+
+ Parameters
+ ----------
+
+ text : str
+ restructuredtext user documentation
+
+ Returns
+ -------
+
+ list
+ elements are the section title re.match objects
+ """
+ titlechar = r"\+"
+ title_re = re.compile(r"^(?P.+)\n(?P" + titlechar + r"+)$", re.MULTILINE)
+ titles = []
+ # extract all titles
+ for match in title_re.finditer(text):
+ log.debug("MATCH from %s to %s: %s", match.start(), match.end(), pformat(match.groupdict()))
+ if len(match.group("title")) != len(match.group("underline")):
+ log.warning(
+ "Length of section title '%s' (%d) does not match length of underline (%d)",
+ match.group("title"),
+ len(match.group("title")),
+ len(match.group("underline")),
+ )
+ titles.append(match)
+ return titles
+
+
+def write_rst_files(doc, outdir, outname):
+ """
+ Write raw rst to a file and generate a wrapper with index
+ """
+ with open(os.path.join(outdir, outname), "w") as outfile:
+ outfile.write(doc)
+
+
+# The following block of functions are called at Sphinx core event
+# env-before-read-docs
+
+
+def get_model_tags(app, env, docname):
+ """
+ Prepares the environment dictionaries by loading models, tags, and their combinations
+ from files, and writes this data to a JSON file for client-side use.
+
+ This function ensures that two dictionaries (`model_dict` and `tag_dict`) are
+ initialized in the environment if they don't already exist. It then populates `model_dict`
+ with model names extracted from the specified directory and uses these models to populate
+ `tag_dict` with tags.
+
+ Parameters
+ ----------
+ app
+ Sphinx application object
+ env
+ The build environment object of Sphinx, which stores shared data between the builders
+ docname : str
+ The name of the document being processed.
+
+ Note
+ ----
+
+ Writes to `static/data/filter_model.json` which is used client-side.
+ """
+
+ # Initialize necessary dictionaries if not already present
+ if not hasattr(env, "tag_dict"):
+ env.tag_dict = {}
+
+ if not hasattr(env, "model_dict"):
+ env.model_dict = {}
+
+ # Extract models and tags, and find tag-to-model relationships
+ env.model_dict = prepare_model_dict()
+ env.tag_dict = find_models_in_tag_combinations(env.model_dict)
+
+ json_output = Path("static/data/filter_model.json")
+ json_output.parent.mkdir(exist_ok=True, parents=True)
+ # Write the JSON output directly to a file used for dynamically loading data client-side
+ with open(json_output, "w+") as json_file:
+ json.dump(env.tag_dict, json_file, indent=2)
+
+
+def prepare_model_dict():
+ """
+ Extracts user documentation tags from header files and organizes them into a dictionary.
+
+ This function iterates through the header files found by `extract_model_text()`, extracts
+ the tags from each file, and creates a dictionary where the keys are the filenames (with
+ the ".h" extension replaced by ".html") and the values are lists of tags.
+
+ The tags are expected to be comma-separated and will be stripped of whitespace. Tags
+ that are empty after stripping are excluded from the list.
+
+ Returns
+ -------
+
+ dict
+ A dictionary with filenames as keys and lists of tags as values.
+
+ Example
+ -------
+
+ If a header file named "example.h" contains the following documentation block:
+
+ BeginUserDocs: neuron, adaptive threshold, integrate-and-fire
+ ...
+ EndUserDocs
+
+ The resulting dictionary will have an entry:
+ {
+ "example.html": ["neuron", "adaptive_threshold", "integrate-and-fire"]
+ }
+ """
+ models_dict = {}
+
+ for match, file_path in extract_model_text():
+ filename = file_path.name
+ formatted_path = filename.replace(".h", ".html")
+
+ # Initialize with no tags for the file
+ models_dict[formatted_path] = []
+
+ tags = [t.strip() for t in match.group("tags").split(",")]
+ if "NOINDEX" in tags:
+ continue
+ # Strip whitespace from each tag, replace spaces with underscores, and filter out empty strings
+ tags = [tag.strip() for tag in tags if tag.strip()]
+ models_dict[formatted_path] = tags
+
+ return models_dict
+
+
+def find_models_in_tag_combinations(models_dict):
+ """
+ Processes a dictionary mapping models to tags to create a list of tag-model combinations.
+
+ This function reverses a dictionary that maps models to a list of tags, creating a new
+ mapping from tags to models. It then creates a structured list of dictionaries, each
+ containing information about a tag and the associated models.
+
+ Parameters
+ ----------
+
+ models_dict : dict[str, list[str]]
+ A dictionary where keys are model identifiers and values are lists of tags.
+
+ Returns
+ -------
+
+ result_list : list[dict]
+ A list of dictionaries, each containing the tag, associated models, and the count of models.
+
+ """
+ # Reverse the models_dict to map tags to models
+ model_to_tags = {}
+ for model, tags in models_dict.items():
+ for tag in tags:
+ if tag not in model_to_tags:
+ model_to_tags[tag] = set()
+ model_to_tags[tag].add(model)
+
+ result_list = []
+
+ for tag, models in model_to_tags.items():
+ if isinstance(models, set):
+ models = list(models) # Convert set to list for JSON serialization
+
+ tag_info = {"tag": tag, "models": models, "count": len(models)} # Number of models
+ result_list.append(tag_info)
+
+ return result_list
+
+
+# The following function is called at Sphinx core even source read
+
+
+def template_renderer(app, docname, source):
+ """
+ Modifies the source content for specified templates by rendering them with model and tag data.
+
+ Checks if the document being processed is one of the specified templates. If it is,
+ it retrieves the models and tags from the environment and
+ uses this data to render the template content.
+
+ Parameters
+ ----------
+
+ app
+ Sphinx application object
+ docname : str
+ The name of the document being processed, used to determine if the
+ current document matches one of the specified templates.
+ source : list
+ A list containing the source content of the document; modified in-place
+ and used to inject the rendered content.
+ """
+ env = app.builder.env
+ template_files = ["models/index", "neurons/index", "synapses/index", "devices/index"]
+
+ # Render the document if it matches one of the specified templates
+ if any(docname == template_file for template_file in template_files):
+ html_context = {"tag_dict": env.tag_dict, "model_dict": env.model_dict}
+ src = source[0]
+ rendered = app.builder.templates.render_string(src, html_context)
+ source[0] = rendered
+
+
+def setup(app):
+ """
+ Configures application hooks for the Sphinx documentation builder.
+
+ This function connects other functions to run during separte Sphinx events
+ """
+ app.connect("config-inited", create_rst_files)
+ app.connect("env-before-read-docs", get_model_tags)
+ app.connect("source-read", template_renderer)
+
+ return {
+ "version": "0.1",
+ "parallel_read_safe": True,
+ "parallel_write_safe": True,
+ }
diff --git a/doc/htmldoc/clean_source_dirs.py b/doc/htmldoc/clean_source_dirs.py
index 7a499e3d7d..39ea494957 100644
--- a/doc/htmldoc/clean_source_dirs.py
+++ b/doc/htmldoc/clean_source_dirs.py
@@ -1,35 +1,35 @@
-# -*- coding: utf-8 -*-
-#
-# clean_source_dirs.py
-#
-# This file is part of NEST.
-#
-# Copyright (C) 2004 The NEST Initiative
-#
-# NEST is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 2 of the License, or
-# (at your option) any later version.
-#
-# NEST is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with NEST. If not, see .
-import os
-import pathlib
-import shutil
-from glob import glob
-
-for dir_ in ("auto_examples", "models"):
- for file_ in glob(str(pathlib.Path(__file__).parent / dir_ / "*")):
- if not any(file_.endswith(f) for f in (".gitignore", "models-main.rst", "models-toc.rst")):
- try:
- try:
- os.unlink(file_)
- except OSError:
- shutil.rmtree(file_)
- except Exception:
- print(f"Couldn't remove '{file_}'")
+# -*- coding: utf-8 -*-
+#
+# clean_source_dirs.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+import os
+import pathlib
+import shutil
+from glob import glob
+
+for dir_ in ("auto_examples", "models"):
+ for file_ in glob(str(pathlib.Path(__file__).parent / dir_ / "*")):
+ if not any(file_.endswith(f) for f in (".gitignore", "index.rst")):
+ try:
+ try:
+ os.unlink(file_)
+ except OSError:
+ shutil.rmtree(file_)
+ except Exception:
+ print(f"Couldn't remove '{file_}'")
diff --git a/doc/htmldoc/conf.py b/doc/htmldoc/conf.py
index 73ee994099..2206227a47 100644
--- a/doc/htmldoc/conf.py
+++ b/doc/htmldoc/conf.py
@@ -32,8 +32,6 @@
extension_module_dir = os.path.abspath("./_ext")
sys.path.append(extension_module_dir)
-from extractor_userdocs import ExtractUserDocs, relative_glob # noqa
-
repo_root_dir = os.path.abspath("../..")
pynest_dir = os.path.join(repo_root_dir, "pynest")
# Add the NEST Python module to the path (just the py files, the binaries are mocked)
@@ -57,6 +55,7 @@
"sphinxcontrib.plantuml",
"add_button_notebook",
"IPython.sphinxext.ipython_console_highlighting",
+ "model_tag_setup",
"nbsphinx",
"extract_api_functions",
"sphinx_design",
@@ -176,10 +175,12 @@
html_css_files = [
"css/custom.css",
+ "css/filter_models.css",
"css/pygments.css",
]
html_js_files = [
+ "js/filter_models.js",
"js/custom.js",
]
html_sidebars = {"**": ["logo-text.html", "globaltoc.html", "localtoc.html", "searchbox.html"]}
@@ -212,31 +213,6 @@
}
-def config_inited_handler(app, config):
- models_rst_dir = os.path.abspath("models")
- ExtractUserDocs(
- listoffiles=relative_glob("models/*.h", "nestkernel/*.h", basedir=repo_root_dir),
- basedir=repo_root_dir,
- outdir=models_rst_dir,
- )
-
-
-def toc_customizer(app, docname, source):
- if docname == "models/models-toc":
- models_toc = json.load(open("models/toc-tree.json"))
- html_context = {"nest_models": models_toc}
- models_source = source[0]
- rendered = app.builder.templates.render_string(models_source, html_context)
- source[0] = rendered
-
-
-def setup(app):
- # for events see
- # https://www.sphinx-doc.org/en/master/extdev/appapi.html#sphinx-core-events
- app.connect("source-read", toc_customizer)
- app.connect("config-inited", config_inited_handler)
-
-
nitpick_ignore = [
("py:class", "None"),
("py:class", "optional"),
diff --git a/doc/htmldoc/developer_space/sli_docs/neural-simulations.rst b/doc/htmldoc/developer_space/sli_docs/neural-simulations.rst
index ea36f82c7c..8159326fe5 100644
--- a/doc/htmldoc/developer_space/sli_docs/neural-simulations.rst
+++ b/doc/htmldoc/developer_space/sli_docs/neural-simulations.rst
@@ -204,8 +204,8 @@ NEST kernel using ``GetKernelStatus /node_models get``, while passing
the key 'synapse_models' will return the list of available synapse
models.
-You can find a list of all available neuron models in our :doc:`model
-directory <../../models/index_neuron>`.
+You can find a list of all available neuron models in our :doc:`neuron models
+page `.
Creating nodes
~~~~~~~~~~~~~~
diff --git a/doc/htmldoc/developer_space/workflows/documentation_workflow/user_documentation_workflow.rst b/doc/htmldoc/developer_space/workflows/documentation_workflow/user_documentation_workflow.rst
index 1563c37539..131a868a74 100644
--- a/doc/htmldoc/developer_space/workflows/documentation_workflow/user_documentation_workflow.rst
+++ b/doc/htmldoc/developer_space/workflows/documentation_workflow/user_documentation_workflow.rst
@@ -181,22 +181,25 @@ you will need to build the documentation locally with Sphinx.
#. Navigate to the ``doc/htmldoc`` folder:
-.. code-block:: bash
+ .. code-block:: bash
- cd nest-simulator/doc/htmldoc
+ cd nest-simulator/doc/htmldoc
#. Build the docs:
-.. code-block:: bash
+ .. code-block:: bash
- sphinx-build . ../_build/html -b html
+ sphinx-build . ../_build/html -b html
#. Preview files. They are located in ``doc/_build/html``
-.. code-block:: bash
+ .. code-block:: bash
- ../_build/html/index.html
+ cd ../_build/html/
+ python3 -m http.server
+
+ Open the provided URL in your browser.
.. tip::
@@ -208,7 +211,6 @@ you will need to build the documentation locally with Sphinx.
make docs
-
Create a pull request
~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/htmldoc/devices/index.rst b/doc/htmldoc/devices/index.rst
index 4477c36a7c..c19ac6e4d8 100644
--- a/doc/htmldoc/devices/index.rst
+++ b/doc/htmldoc/devices/index.rst
@@ -3,6 +3,10 @@
All about devices in NEST
=========================
+
+Guides on using devices in NEST
+-------------------------------
+
.. grid:: 1 1 2 2
:gutter: 1
@@ -17,9 +21,27 @@ All about devices in NEST
:link-type: ref
-
.. toctree::
:maxdepth: 1
:glob:
+ :hidden:
*
+
+
+.. dropdown:: List of devices
+ :color: info
+
+ {% for items in tag_dict %}
+ {% if items.tag == "device" %}
+ {% for item in items.models | sort %}
+ * :doc:`/models/{{ item | replace(".html", "") }}`
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+
+Naming conventions for devices
+------------------------------
+
+A device name should represent its physical counterpart - like a multimeter is ``multimeter``. In general, the term ``recorder`` is used for devices
+that store the output (e.g., spike times or synaptic strengths over time) of other nodes and make it accessible to the user. The term ``generator`` is used for devices that provide input into the simulation.
diff --git a/doc/htmldoc/get-started_index.rst b/doc/htmldoc/get-started_index.rst
index 277ac2e933..b3cf5d9ddd 100644
--- a/doc/htmldoc/get-started_index.rst
+++ b/doc/htmldoc/get-started_index.rst
@@ -221,7 +221,7 @@ More topics
Devices
Spatially-structured networks
High performance computing
- NEST models
+ NEST models
NEST and SONATA
Simulation behavior
Randomness in NEST
diff --git a/doc/htmldoc/models/.gitignore b/doc/htmldoc/models/.gitignore
index 944b838d33..3a245cda74 100644
--- a/doc/htmldoc/models/.gitignore
+++ b/doc/htmldoc/models/.gitignore
@@ -3,5 +3,4 @@
*.rst
*.json
# Only these files should be tracked.
-!models-main.rst
-!models-toc.rst
+!index.rst
diff --git a/doc/htmldoc/models/index.rst b/doc/htmldoc/models/index.rst
new file mode 100644
index 0000000000..9ad4867193
--- /dev/null
+++ b/doc/htmldoc/models/index.rst
@@ -0,0 +1,155 @@
+.. _modelsmain:
+
+
+Model directory
+===============
+
+
+NEST has over 100 models, choose an option for finding the one you need!
+
+.. _model_selector:
+
+Model selector
+--------------
+
+
+Select a tag to display corresponding models.
+
+By selecting multiple tags, you can refine your search to models that match all selected tags.
+
+.. raw:: html
+
+
+
+
+ List of models based on your selection
+
+
+
+|
+|
+
+----
+
+
+.. _full_list:
+
+Complete A-Z list of models
+----------------------------
+
+.. dropdown:: Show/Hide list
+ :animate: fade-in-slide-down
+ :color: info
+
+
+ **Neurons**
+
+ {% for items in tag_dict %}
+ {% if items.tag == "neuron" %}
+ {% for item in items.models | sort %}
+ * :doc:`{{ item | replace(".html", "") }}`
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+
+ **Synapses**
+
+ {% for items in tag_dict %}
+ {% if items.tag == "synapse" %}
+ {% for item in items.models | sort %}
+ * :doc:`{{ item | replace(".html", "") }}`
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+
+ **Devices**
+
+ {% for items in tag_dict %}
+ {% if items.tag == "device" %}
+ {% for item in items.models | sort %}
+ * :doc:`{{ item | replace(".html", "") }}`
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+
+
+
+Learn more about . . .
+-----------------------
+
+.. grid:: 1 1 2 2
+ :gutter: 1
+
+ .. grid-item-card::
+
+ * :ref:`neuron models `
+
+ * :ref:`synapse models `
+
+ * :ref:`devices `
+
+ * :ref:`creating and customizing models with NESTML `
+
+ * :ref:`Model terminology `
+
+
+
+----
+
+.. _model_meaning:
+
+
+What we mean by `models`
+------------------------
+
+The term `models` in the context of NEST (and the field of computational neuroscience as a whole) is used with two different meanings:
+
+1. **Neuron and synapse models**. These consist of a set of mathematical
+ equations and algorithmic components that describe the
+ characteristics and behavior of biological neurons and synapses. In
+ NEST, the terms neuron and synapse models are also used for the C++
+ implementations of these conceptual entities. Most of the models in
+ NEST are based on either peer-reviewed publications or text books
+ like [1]_. This is what we mean for models in our model directory.
+
+2. **Network models**. These models are created from individual neuron
+ and synapse models using the different commands provided by the
+ :ref:`PyNEST API `. Examples for such network models
+ are the :doc:`microcircuit model
+ <../auto_examples/Potjans_2014/index>` or the `multi-area model
+ `_).
+ You can also explore :doc:`all our example networks `.
+
+.. seealso::
+
+ See our glossary section on :ref:`common abbreviations used for model terms `. It includes alternative terms commonly used in the literature.
+
+
+.. _nestml_ad:
+
+Create and customize models with NESTML
+---------------------------------------
+
+Check out :doc:`NESTML `, a domain-specific language for neuron and synapse models.
+NESTML enables fast prototyping of new models using an easy to understand, yet powerful syntax. This is achieved by a combination of a flexible processing toolchain
+written in Python with high simulation performance through the automated generation of C++ code, suitable for use in NEST Simulator.
+
+.. seealso::
+
+ See the :doc:`NESTML docs for installation details `.
+
+References
+~~~~~~~~~~
+
+.. [1] Dayan P and Abbott L (2001). Theoretical Neuroscience: Computational
+ and Mathematical Modeling of Neural Systems. Cambridge, MA: MIT Press.
+ https://pure.mpg.de/pubman/faces/ViewItemOverviewPage.jsp?itemId=item_300
+
+
+.. toctree::
+ :maxdepth: 1
+ :hidden:
+
+ {% for keys in model_dict %}
+ {{ keys | replace(".html", "") }}
+ {% endfor %}
diff --git a/doc/htmldoc/models/models-main.rst b/doc/htmldoc/models/models-main.rst
deleted file mode 100644
index d8d9830d7e..0000000000
--- a/doc/htmldoc/models/models-main.rst
+++ /dev/null
@@ -1,109 +0,0 @@
-.. _modelsmain:
-
-Models in NEST
-==============
-
-What we mean by `models`
-------------------------
-
-The term `models` in the context of NEST (and the field of computational neuroscience as a whole) is used with two different meanings:
-
-1. **Neuron and synapse models**. These consist of a set of mathematical
- equations and algorithmic components that describe the
- characteristics and behavior of biological neurons and synapses. In
- NEST, the terms neuron and synapse models are also used for the C++
- implementations of these conceptual entities. Most of the models in
- NEST are based on either peer-reviewed publications or text books
- like [1]_.
-2. **Network models**. These models are created from individual neuron
- and synapse models using the different commands provided by the
- :ref:`PyNEST API `. Examples for such network models
- are the :doc:`microcircuit model
- <../auto_examples/Potjans_2014/index>` or the `multi-area model
- `_). In the following
- description, we focus on neuron and synapse models and not on
- network models.
-
-Find a model
-------------
-
-By default, NEST comes with a ton of models! Textbook standards like
-integrate-and-fire and Hodgkin-Huxley-type models are available
-alongside high-quality implementations of models published by the
-neuroscience community. The model directory is organized by keywords
-(e.g., :doc:`adaptive threshold `,
-:doc:`conductance-based `, etc.). Models
-that contain a specific keyword will be listed under that word.
-
-In many modeling situations, the full set of models that ship with
-NEST by default is not needed. To only include a subset of the models
-with NEST, please have a look at the :ref:`modelset configuration
-options `.
-
-.. seealso::
-
- Discover :doc:`all the models in our directory `.
-
-Create and customize models with NESTML
----------------------------------------
-
-Check out :doc:`NESTML `, a domain-specific language for neuron and synapse models.
-NESTML enables fast prototyping of new models using an easy to understand, yet powerful syntax. This is achieved by a combination of a flexible processing toolchain
-written in Python with high simulation performance through the automated generation of C++ code, suitable for use in NEST Simulator.
-
-.. seealso::
-
- See the :doc:`NESTML docs for installation details `.
-
-.. note::
-
- NESTML is also available as part of NEST's official :ref:`docker image `.
-
-Model naming
-------------
-
-Neuron models
-~~~~~~~~~~~~~
-
-Neuron model names in NEST combine abbreviations that describe the dynamics and synapse specifications for that model.
-They may also include the author's name of a model based on a specific paper.
-
-For example, the neuron model name
-
-``iaf_cond_beta``
-
- corresponds to an implementation of a spiking neuron using
- integrate-and-fire dynamics with conductance-based
- synapses. Incoming spike events induce a postsynaptic change of
- conductance modeled by a beta function.
-
-As an example for a neuron model name based on specific paper,
-
-``hh_cond_exp_traub``
-
- implements a modified version of the Hodgkin Huxley neuron model
- based on Traub and Miles (1991)
-
-Synapse models
-~~~~~~~~~~~~~~
-
-Synapse models include the word synapse as the last word in the model name.
-
-Synapse models may begin with the author name (e.g., ``clopath_synapse``) or process (e.g., ``stdp_synapse``).
-
-Devices
-~~~~~~~
-
-A device name should represent its physical counterpart - like a multimeter is ``multimeter``. In general, the term ``recorder`` is used for devices
-that store the output (e.g., spike times or synaptic strengths over time) of other nodes and make it accessible to the user. The term ``generator`` is used for devices that provide input into the simulation.
-
-.. seealso::
-
- See our glossary section on :ref:`common abbreviations used for model terms `. It includes alternative terms commonly used in the literature.
-
-References
-~~~~~~~~~~
-
-.. [1] Dayan P and Abbott L (2001). Theoretical Neuroscience: Computational
- and Mathematical Modeling of Neural Systems. Cambridge, MA: MIT Press.
- https://pure.mpg.de/pubman/faces/ViewItemOverviewPage.jsp?itemId=item_300
diff --git a/doc/htmldoc/models/models-toc.rst b/doc/htmldoc/models/models-toc.rst
deleted file mode 100644
index 21c55cc3b7..0000000000
--- a/doc/htmldoc/models/models-toc.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-:orphan:
-
-.. _models_contents:
-
-Models contents
-===============
-
-
-.. toctree::
- :maxdepth: 1
- :hidden:
-
- {% for item in nest_models %}
- {{ item }}
- {% endfor %}
-
diff --git a/doc/htmldoc/neurons/index.rst b/doc/htmldoc/neurons/index.rst
index cc89b966e7..12e3343c55 100644
--- a/doc/htmldoc/neurons/index.rst
+++ b/doc/htmldoc/neurons/index.rst
@@ -3,9 +3,73 @@
All about neurons in NEST
=========================
+
+Guides on using neurons in NEST
+-------------------------------
+
+.. grid:: 1 1 2 2
+ :gutter: 1
+
+ .. grid-item::
+
+ .. grid:: 1 1 1 1
+
+ .. grid-item-card:: Node management (neurons and devices)
+
+ * :ref:`node_handles`
+ * :ref:`param_ex`
+
+ .. grid-item::
+
+ .. grid:: 1 1 1 1
+
+ .. grid-item-card:: Exact integration
+ :class-title: sd-d-flex-row sd-align-minor-center
+ :link: exact_integration
+ :link-type: ref
+
+ .. grid-item-card:: Precise spike times
+ :class-title: sd-d-flex-row sd-align-minor-center
+ :link: sim_precise_spike_times
+ :link-type: ref
+
.. toctree::
:maxdepth: 1
:glob:
+ :hidden:
*
+.. dropdown:: List of neuron models
+ :color: info
+
+ {% for items in tag_dict %}
+ {% if items.tag == "neuron" %}
+ {% for item in items.models | sort %}
+ * :doc:`/models/{{ item | replace(".html", "") }}`
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+
+
+Neuron model naming conventions
+-------------------------------
+
+Neuron model names in NEST combine abbreviations that describe the dynamics and synapse specifications for that model.
+They may also include the author's name of a model based on a specific paper.
+
+For example, the neuron model name
+
+``iaf_cond_beta``
+
+ corresponds to an implementation of a spiking neuron using
+ integrate-and-fire dynamics with conductance-based
+ synapses. Incoming spike events induce a postsynaptic change of
+ conductance modeled by a beta function.
+
+As an example for a neuron model name based on specific paper,
+
+``hh_cond_exp_traub``
+
+ implements a modified version of the Hodgkin Huxley neuron model
+ based on Traub and Miles (1991)
diff --git a/doc/htmldoc/neurons/node_handles.rst b/doc/htmldoc/neurons/node_handles.rst
index 46a29541cc..6bcdca2e78 100644
--- a/doc/htmldoc/neurons/node_handles.rst
+++ b/doc/htmldoc/neurons/node_handles.rst
@@ -6,7 +6,7 @@ How to handle nodes (neurons and devices)
In NEST 3.0, ``nest.Create()`` returns a *NodeCollection* object instead of a list of global IDs.
This provides a more compact and flexible way for handling nodes.
-In most use cases, you will not need to make many changes to your scripts in NEST 3.0, unless you have used **topology** or **subnets**.
+In most use cases, you will not need to make many changes to your scripts in NEST 3, unless you have used **topology** or **subnets**.
NodeCollection supports the following functionality:
@@ -342,4 +342,3 @@ can contain lists and single values at the same time.
pop = nest.Create("iaf_psc_alpha", 2, params= {"I_e": [200.0, 150.0], "tau_m": 20.0, "V_m": [-77.0, -66.0]})
print(pop.get(["I_e", "tau_m", "V_m"]))
-
diff --git a/doc/htmldoc/static/css/filter_models.css b/doc/htmldoc/static/css/filter_models.css
new file mode 100644
index 0000000000..32a22037e1
--- /dev/null
+++ b/doc/htmldoc/static/css/filter_models.css
@@ -0,0 +1,34 @@
+#tag-container {
+ display: flex;
+ flex-wrap: wrap;
+ gap: 10px;
+}
+
+#model-list {
+ margin-top: 1rem;
+}
+
+.filter-button {
+ transition: background-color 200ms, color 200ms;
+ background-color: transparent;
+ font: inherit;
+ cursor: pointer;
+ display: inline-block;
+ padding: 0 8px;
+ color: #717171;
+ border: 1px solid #9b9b9b;
+ border-radius: 25px;
+ font-size: 14px;
+ white-space: nowrap;
+}
+
+.filter-button:hover {
+ background-color: #f3f3f3;
+ color: #3a3a3a;
+}
+
+.filter-button.is-active {
+ background-color: #0085b6;
+ border-color: #0085b6;
+ color: #fff;
+}
diff --git a/doc/htmldoc/static/js/filter_models.js b/doc/htmldoc/static/js/filter_models.js
new file mode 100644
index 0000000000..6ef80e6299
--- /dev/null
+++ b/doc/htmldoc/static/js/filter_models.js
@@ -0,0 +1,152 @@
+// Wait for the DOM to be fully loaded before executing the script
+document.addEventListener('DOMContentLoaded', function() {
+ // Fetch the JSON data containing model tags and counts
+ fetch('../_static/data/filter_model.json')
+ .then(response => response.json())
+ .then(data => {
+ // Populate the tags on the page with the fetched data
+ populateTags(data);
+ })
+ .catch(error => console.error('Error loading the JSON data:', error));
+});
+
+/**
+ * Populates the tag buttons on the page.
+ *
+ * This function creates buttons for each tag in the provided data,
+ * giving priority to certain tags. Tags are sorted and displayed
+ * accordingly.
+ *
+ * @param {Array} data - Array of objects containing tags and their counts.
+ */
+function populateTags(data) {
+ const container = document.getElementById('tag-container');
+ container.innerHTML = ''; // Ensure no duplicates
+
+ const priorityTags = ['neuron', 'synapse', 'device'];
+ const priorityData = [];
+ const otherData = [];
+
+ // Split data into priority and other based on tags
+ data.forEach(item => {
+ if (priorityTags.includes(item.tag)) {
+ priorityData.push(item);
+ } else {
+ otherData.push(item);
+ }
+ });
+
+ // Sort otherData by count in descending order
+ otherData.sort((a, b) => b.count - a.count);
+
+ // Merge arrays for rendering
+ const finalData = [...priorityData, ...otherData];
+
+ // Render buttons for each tag
+ finalData.forEach(item => {
+ const button = document.createElement('button');
+ button.className = 'filter-button';
+ button.textContent = `${item.tag} (${item.count})`;
+ button.dataset.tag = item.tag; // Store the tag as a data attribute
+ button.onclick = function() {
+ this.classList.toggle('is-active');
+ updateModelDisplay();
+ };
+ container.appendChild(button);
+ });
+}
+
+/**
+ * Updates the model display based on the selected tags.
+ *
+ * This function fetches the JSON data again, filters the models based on
+ * the active tags, and then displays the filtered models.
+ */
+function updateModelDisplay() {
+ const activeButtons = document.querySelectorAll('.filter-button.is-active');
+ const selectedTags = Array.from(activeButtons).map(button => button.dataset.tag); // Use data attribute for tags
+ fetch('../_static/data/filter_model.json')
+ .then(response => response.json())
+ .then(data => {
+ const filteredModels = filterModelsByTags(data, selectedTags);
+ displayModels(filteredModels);
+ });
+}
+
+/**
+ * Filters the models based on the selected tags.
+ *
+ * This function creates an intersection of models that match all selected tags.
+ *
+ * @param {Array} data - Array of objects containing tags and their models.
+ * @param {Array} selectedTags - Array of tags selected by the user.
+ * @returns {Array} - Array of models that match all selected tags.
+ */
+function filterModelsByTags(data, selectedTags) {
+ const tagModelMap = new Map(data.map(item => [item.tag, item.models]));
+ let intersection = selectedTags.reduce((acc, tag, index) => {
+ if (index === 0) {
+ return tagModelMap.get(tag) || [];
+ } else {
+ return acc.filter(model => tagModelMap.get(tag).includes(model));
+ }
+ }, []);
+ return intersection;
+}
+
+/**
+ * Displays the filtered models on the page.
+ *
+ * This function updates the DOM to show the models that match the selected tags,
+ * fetching the HTML content for each model and displaying it as a list.
+ *
+ * @param {Array} models - Array of model URLs to be displayed.
+ */
+function displayModels(models) {
+ const modelList = document.getElementById('model-list');
+ modelList.innerHTML = ''; // Clear previous content
+
+ if (models.length === 0) {
+ modelList.innerHTML = "Sorry, we couldn't find any results. Try another combination of tags";
+ } else {
+ models.sort(); // Sort models alphabetically by their URLs
+
+ // Create an unordered list element
+ const ul = document.createElement('ul');
+ modelList.appendChild(ul); // Append the list to the model list container
+
+ // Process each model
+ models.forEach(model => {
+ // Fetch the HTML content of the model
+ fetch(model)
+ .then(response => response.text())
+ .then(htmlContent => {
+ // Use DOMParser to parse the HTML content
+ const parser = new DOMParser();
+ const doc = parser.parseFromString(htmlContent, "text/html");
+ let firstHeading = doc.querySelector("h1") ? doc.querySelector("h1").textContent : "No Heading";
+
+ // Remove pilcrow character
+ firstHeading = firstHeading.replace(/\W$/g, '');
+ // Create the link with model name and first heading
+ const link = document.createElement('a');
+ const modelBaseName = model.split('/').pop().replace('.html', ''); // Extract base name for display
+ link.href = model; // Use the full model path directly
+ link.textContent = `${firstHeading}`; // Include the first heading
+ link.className = 'model-link'; // For CSS styling
+ // link.target = '_blank'; // Opens link in a new tab
+
+ // Create a list item and append the link
+ const li = document.createElement('li');
+ li.appendChild(link);
+ ul.appendChild(li);
+ })
+ .catch(error => {
+ console.error('Failed to load model content:', error);
+ const li = document.createElement('li');
+ li.textContent = `Failed to load: ${model}`;
+ ul.appendChild(li);
+ });
+ });
+ }
+}
diff --git a/doc/htmldoc/synapses/connection_generator.rst b/doc/htmldoc/synapses/connection_generator.rst
index dcacacf607..87527431c2 100644
--- a/doc/htmldoc/synapses/connection_generator.rst
+++ b/doc/htmldoc/synapses/connection_generator.rst
@@ -1,7 +1,7 @@
.. _connection_generator:
Connection generator interface
-------------------------------
+==============================
.. admonition:: Availability
diff --git a/doc/htmldoc/synapses/index.rst b/doc/htmldoc/synapses/index.rst
index d47d7a5219..855d398095 100644
--- a/doc/htmldoc/synapses/index.rst
+++ b/doc/htmldoc/synapses/index.rst
@@ -3,8 +3,55 @@
All about synapses and connections in NEST
==========================================
+Guides on using synapses in NEST
+--------------------------------
+
+
+.. grid:: 1 1 2 2
+
+ .. grid-item-card:: Managing coonnections
+
+ * :ref:`connectivity_concepts`
+ * :ref:`connection_generator`
+ * :ref:`synapse_spec`
+
+ .. grid-item-card:: Weight normalization
+ :class-title: sd-d-flex-row sd-align-minor-center
+ :link: weight_normalization
+ :link-type: ref
+
+ .. grid-item-card:: Gap Junctions
+ :class-title: sd-d-flex-row sd-align-minor-center
+ :link: sim_gap_junctions
+ :link-type: ref
+
+ .. grid-item-card:: Connection functionality
+ :class-title: sd-d-flex-row sd-align-minor-center
+ :link: handling_connections
+ :link-type: ref
+
.. toctree::
:maxdepth: 1
:glob:
+ :hidden:
*
+
+
+.. dropdown:: List of synapse models
+ :color: info
+
+ {% for items in tag_dict %}
+ {% if items.tag == "synapse" %}
+ {% for item in items.models | sort %}
+ * :doc:`/models/{{ item | replace(".html", "") }}`
+ {% endfor %}
+ {% endif %}
+ {% endfor %}
+
+Naming conventions for synapse models
+-------------------------------------
+
+Synapse models include the word synapse or connection as the last word in the model name.
+
+Synapse models may begin with the author name (e.g., ``clopath_synapse``) or process (e.g., ``stdp_synapse``).
diff --git a/doc/htmldoc/synapses/synapse_specification.rst b/doc/htmldoc/synapses/synapse_specification.rst
index c42038fbf1..0a467cb1d0 100644
--- a/doc/htmldoc/synapses/synapse_specification.rst
+++ b/doc/htmldoc/synapses/synapse_specification.rst
@@ -368,7 +368,7 @@ concrete type of that node. In a multi-compartment neuron, for
instance, the different compartments could be addressed as different
receptors, while another neuron model might make sets of different
synaptic parameters available for each receptor. Please refer to the
-:doc:`model documentation <../models/index_neuron>` for details.
+:doc:`neuron models documentation <../neurons/index>` for details.
In order to connect a pre-synaptic node to a certain receptor on a
post-synaptic node, the integer ID of the target receptor can be
@@ -453,7 +453,7 @@ time. Other synapse models implement learning and adaptation in the
form of long-term or short-term plasticity. A list of available
synapse models is accessible via the command ``nest.synapse_models``.
More detailed information about each of them can be found in the
-:doc:`model directory <../models/index_synapse>`.
+:doc:`synapse model page <../synapses/index>`.
.. note::
Not all nodes can be connected via all available synapse types. The
diff --git a/doc/htmldoc/whats_new/v3.6/index.rst b/doc/htmldoc/whats_new/v3.6/index.rst
index 73ea9ada08..2fe8d42d06 100644
--- a/doc/htmldoc/whats_new/v3.6/index.rst
+++ b/doc/htmldoc/whats_new/v3.6/index.rst
@@ -31,7 +31,9 @@ See examples using astrocyte models:
See model docs:
-* :doc:`../../../models/index_astrocyte`
+* :doc:`/models/astrocyte_lr_1994`
+* :doc:`/models/aeif_cond_alpha_astro`
+* :doc:`/models/sic_connection`
New model: glif_psc_double_alpha
--------------------------------
diff --git a/doc/htmldoc/whats_new/v3.7/index.rst b/doc/htmldoc/whats_new/v3.7/index.rst
index c260b7b1fc..a168d2145b 100644
--- a/doc/htmldoc/whats_new/v3.7/index.rst
+++ b/doc/htmldoc/whats_new/v3.7/index.rst
@@ -27,7 +27,13 @@ candidate for efficient training of RSNNs in low-power neuromorphic hardware.
For further information, see:
* :doc:`/auto_examples/eprop_plasticity/index`
-* :doc:`/models/index_e-prop plasticity`
+* :doc:`/models/eprop_iaf_adapt_bsshslm_2020`
+* :doc:`/models/eprop_iaf_bsshslm_2020`
+* :doc:`/models/eprop_learning_signal_connection_bsshslm_2020`
+* :doc:`/models/eprop_readout_bsshslm_2020`
+* :doc:`/models/eprop_synapse_bsshslm_2020`
+
+
Connectivity concepts
---------------------
diff --git a/models/diffusion_connection.h b/models/diffusion_connection.h
index 3592f4b32a..d7b250853a 100644
--- a/models/diffusion_connection.h
+++ b/models/diffusion_connection.h
@@ -28,7 +28,7 @@
namespace nest
{
-/* BeginUserDocs: synapse, instantaneous rate
+/* BeginUserDocs: synapse, instantaneous, rate
Short description
+++++++++++++++++
diff --git a/models/iaf_tum_2000.h b/models/iaf_tum_2000.h
index b983bdb981..972f31e2e4 100644
--- a/models/iaf_tum_2000.h
+++ b/models/iaf_tum_2000.h
@@ -36,7 +36,7 @@ namespace nest
{
// clang-format off
-/* BeginUserDocs: neuron, integrate-and-fire, current-based, synapse, short-term plasticity
+/* BeginUserDocs: neuron, integrate-and-fire, current-based, short-term plasticity
Short description
+++++++++++++++++