diff --git a/doc/htmldoc/conf.py b/doc/htmldoc/conf.py
index 2a3ef0dc33..8a009fa729 100644
--- a/doc/htmldoc/conf.py
+++ b/doc/htmldoc/conf.py
@@ -42,6 +42,7 @@
source_suffix = ".rst"
master_doc = "index"
extensions = [
+ "add_button_notebook",
"sphinx_gallery.gen_gallery",
"list_examples",
"sphinx.ext.autodoc",
@@ -53,7 +54,6 @@
"sphinx.ext.mathjax",
"sphinx_carousel.carousel",
"sphinxcontrib.plantuml",
- "add_button_notebook",
"IPython.sphinxext.ipython_console_highlighting",
"model_tag_setup",
"nbsphinx",
@@ -80,7 +80,7 @@
"gallery_dirs": "auto_examples",
"plot_gallery": "False",
"download_all_examples": False,
- "copyfile_regex": r".*\.rst|.*\.png|.*\.svg|Snakefile|.*\.txt",
+ "copyfile_regex": r".*\.rst|.*\.png|.*\.svg|Snakefile",
}
# General information about the project.
diff --git a/doc/htmldoc/examples/index.rst b/doc/htmldoc/examples/index.rst
index 4a48fabadc..d5cd67195e 100644
--- a/doc/htmldoc/examples/index.rst
+++ b/doc/htmldoc/examples/index.rst
@@ -251,17 +251,17 @@ PyNEST examples
.. grid:: 1 1 2 3
+ .. grid-item-card:: Performance testing
+ :img-top: ../static/img/NetworkSketch_TwoPopulationNetworkPlastic.svg
+
+ * :doc:`../auto_examples/hpc_benchmark`
+ * :doc:`../auto_examples/ignore_and_fire/index`
+
.. grid-item-card:: SONATA network
:img-top: ../static/img/300_pointneurons.png
* :doc:`../auto_examples/sonata_example/sonata_network`
- .. grid-item-card:: HPC benchmark
- :img-top: ../auto_examples/hpc_benchmark_connectivity.svg
-
- * :doc:`../auto_examples/hpc_benchmark`
-
-
.. grid-item-card:: Connection set algebra
:img-top: ../static/img/nest_logo-faded.png
@@ -369,7 +369,9 @@ PyNEST examples
.. toctree::
:hidden:
+ :glob:
+ ../auto_examples/ignore_and_fire/*
../auto_examples/pong/run_simulations
../auto_examples/pong/pong
../auto_examples/pong/generate_gif
diff --git a/doc/htmldoc/hpc/benchmarking.rst b/doc/htmldoc/hpc/benchmarking.rst
index 870baf86a9..913e0c192b 100644
--- a/doc/htmldoc/hpc/benchmarking.rst
+++ b/doc/htmldoc/hpc/benchmarking.rst
@@ -43,7 +43,10 @@ For more details on the conceptual ideas behind beNNch, refer to Albers et al. (
For further details, see the accompanying `beNNch GitHub Page `_.
And for a detailed step-by-step walk though see `Walk through guide `_.
- Example PyNEST script: :doc:`../auto_examples/hpc_benchmark`
+ Example PyNEST scripts:
+
+ * :doc:`../auto_examples/hpc_benchmark`
+ * :doc:`../auto_examples/ignore_and_fire/index`
References
diff --git a/doc/htmldoc/static/css/custom.css b/doc/htmldoc/static/css/custom.css
index befdde6953..88ace7ba1c 100644
--- a/doc/htmldoc/static/css/custom.css
+++ b/doc/htmldoc/static/css/custom.css
@@ -106,16 +106,25 @@ section#kernel-attributes-nest-nestmodule dd {
box-shadow: 3px 3px lightgray !important;
}
.imgbutton:hover {
-
background-color: white;
- outline-color: var(--nest-orange) !important;
- outline-style: solid !important;
- outline-width: 1px !important;
- padding-top: 20px !important;
- padding-bottom: 20px !important;
+ outline-color: var(--nest-orange) !important;
+ outline-style: solid !important;
+ outline-width: 1px !important;
+ padding-top: 20px !important;
+ padding-bottom: 20px !important;
box-shadow: 3px 3px var(--nest-orange) !important;
}
+.md-typeset table:not([class]) th {
+ background-color: rgba(0, 0, 0, 0.0);
+ color: #666;
+}
+
+.md-typeset table:not([class]) tr{
+ border-bottom-color: #e3e3e3;
+ border-bottom-width: 1px;
+}
+
.center {
diff --git a/doc/htmldoc/static/img/NetworkSketch_TwoPopulationNetworkPlastic.svg b/doc/htmldoc/static/img/NetworkSketch_TwoPopulationNetworkPlastic.svg
new file mode 100644
index 0000000000..2572a62fb9
--- /dev/null
+++ b/doc/htmldoc/static/img/NetworkSketch_TwoPopulationNetworkPlastic.svg
@@ -0,0 +1,335 @@
+
+
+
+
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_connectivity_postsim.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_connectivity_postsim.png
new file mode 100644
index 0000000000..c7c32a4f43
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_connectivity_postsim.png differ
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_connectivity_presim.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_connectivity_presim.png
new file mode 100644
index 0000000000..193147a5a5
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_connectivity_presim.png differ
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_spikes.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_spikes.png
new file mode 100644
index 0000000000..d2a681cd9d
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_spikes.png differ
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_weight_distributions.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_weight_distributions.png
new file mode 100644
index 0000000000..266a553720
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_weight_distributions.png differ
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_connectivity_postsim.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_connectivity_postsim.png
new file mode 100644
index 0000000000..cc7f7cb31f
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_connectivity_postsim.png differ
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_connectivity_presim.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_connectivity_presim.png
new file mode 100644
index 0000000000..610d99f6bc
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_connectivity_presim.png differ
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_spikes.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_spikes.png
new file mode 100644
index 0000000000..4331884bad
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_spikes.png differ
diff --git a/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_weight_distributions.png b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_weight_distributions.png
new file mode 100644
index 0000000000..1a88582be3
Binary files /dev/null and b/doc/htmldoc/static/img/TwoPopulationNetworkPlastic_ignore_and_fire_weight_distributions.png differ
diff --git a/doc/htmldoc/static/img/scaling_ignore_and_fire.png b/doc/htmldoc/static/img/scaling_ignore_and_fire.png
new file mode 100644
index 0000000000..8abd7fbbd9
Binary files /dev/null and b/doc/htmldoc/static/img/scaling_ignore_and_fire.png differ
diff --git a/pynest/examples/ignore_and_fire/README.rst b/pynest/examples/ignore_and_fire/README.rst
new file mode 100644
index 0000000000..a3802a73cb
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/README.rst
@@ -0,0 +1,164 @@
+.. _experiment-ignore:
+
+Exact scaling experiments using the ``ignore_and_fire`` neuron
+===============================================================
+
+Background: (Non-) Scalability of recurrent neuronal networks
+---------------------------------------------------------------
+
+The verification and validation of neuronal simulation architectures (soft- and hardware) is typically based on
+models describing networks of neurons. Ideally, such test-case models are scalable with respect to the network size
+
+- to foster a comparison between different computing architectures with different computational resources,
+- to be able to extrapolate (up-scale) to networks at brain scale, even if data constrained and well tested
+ network models at this scale are not yet published or existing, and
+- to be able to study and compare different plasticity mechanisms with slow dynamics (down-scaling).
+
+Biological neuronal networks are characterized by a high degree of recurrency. As shown in [1]_, scaling the number of nodes or edges in a recurrent neuronal
+networks generally alters the network dynamics, such as the average activity level or the structure of correlations.
+Preserving certain dynamical features by adjusting other parameters can only be achieved in limited ranges or exceptional cases. Recurrent neuronal net
+works are hence not truly scalable. In this example, we demonstrate how the :doc:`ignore_and_fire ` neuron can help to perform
+exact scaling experiments with arbitrary types of networks.
+
+
+Network model
+-------------
+
+In this example, we employ a simple network model describing the dynamics
+of a local cortical circuit at the spatial scale of ~1mm within a single cortical layer. It is derived from the model
+proposed in [2]_, but accounts for the synaptic weight dynamics for connections between excitatory neurons. The weight
+dynamics are described by the spike-timing-dependent plasticity (STDP) model derived in [8]_. The model provides a
+mechanism underlying the formation of broad distributions of synaptic weights in combination with asynchronous
+irregular spiking activity (see figure below).
+
+A variant of this model, the :doc:`hpc_benchmark `, has been used in a number of
+benchmarking studies, in particular for weak-scaling experiments ([3]_, [4]_, [5]_, [6]_, [7]_). Due to its random
+homogeneous connectivity, the model represents a hard benchmarking scenario: each neuron projects with equal probability
+to any other neuron in the network. Implementations of this model can therefore not exploit any spatial connectivity
+patterns. In contrast to the model used here, the plasticity dynamics in the ``hpc_benchmark`` is parameterized such
+that it has only a weak effect on the synaptic weights and, hence, the network dynamics. Here, the effect of the
+synaptic plasticity is substantial and leads to a significant broadening of the weight distribution (see figure below).
+Synaptic weights thereby become a sensitive target metric for verification and validation studies.
+
+
+Comparison between the networks with ``integrate-and-fire`` and ``ignore-and-fire`` dynamics
+--------------------------------------------------------------------------------------------
+
+The model employed here can be configured into a truly scalable mode by replacing the integrate-and-fire neurons by an
+:doc:`ignore_and_fire ` dynamics. By doing so, the spike generation dynamics is decoupled
+from the input integration and the plasticity dynamics; the overall network activity, and, hence, the communication
+load, is fully controlled by the user. The firing rates and phases of the :doc:`ignore_and_fire `
+model are randomly drawn from uniform distributions to guarantee asynchronous spiking activity. The plasticity dynamics
+remains intact (see figure below).
+
+================== =====================
+``iaf_psc_alpha`` ``ignore_and_fire``
+================== =====================
+|iaf_spikes| |ign_spikes|
+|iaf_weight| |ign_weight|
+================== =====================
+
+
+.. |iaf_spikes| image:: /static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_spikes.png
+.. |iaf_weight| image:: /static/img/TwoPopulationNetworkPlastic_iaf_psc_alpha_weight_distributions.png
+.. |ign_spikes| image:: /static/img/TwoPopulationNetworkPlastic_ignore_and_fire_spikes.png
+.. |ign_weight| image:: /static/img/TwoPopulationNetworkPlastic_ignore_and_fire_weight_distributions.png
+
+
+Spiking activity (top) and distributions of excitatory synaptic weights (bottom) for the network with integrate-and-fire
+(``iaf_psc_alpha_nest``) and :doc:`ignore_and_fire ` dynamics (``ignore_and_fire``). Figures
+generated using :doc:`generate_reference_data-ignore_and_fire.py `
+and :doc:`generate_reference_figures-ignore_and_fire.py `.
+
+
+Scaling experiments
+-------------------
+
+The ``ignore_and_fire`` variant of the network model permits exact scaling experiments, without the need for any
+parameter tuning when changing the network size (see figure below). We demonstrate this here by simulating the network
+for various network sizes between :math:`N=1250` and :math:`N=13750`.
+The number of incoming connections per neuron, the in-degree, is kept constant at :math:`K=1250`.
+The total number of connections hence scales linearly with :math:`N`. For each simulation, we monitor the simulation
+(wall clock) time, the time and population averaged firing rate, and the mean standard deviation of the excitatory
+synaptic weights at the end of the simulation.
+
+For the integrate-and-fire variant of the network model, the firing rate and the synaptic-weight distribution depend
+on the network size :math:`N`. In particular for small :math:`N`, the firing rates and the synaptic weights increase
+due to the denser connectivity. For the ignore-and-fire version, in contrast, the dynamics is independent of the
+network size. The average firing rate is --by definition-- constant. As the firing phases of the ignore-and-fire
+neurons are chosen randomly, the spiking activity is asynchronous, irrespective of the connection density. As a
+consequence, the distribution of synaptic weights (which is shaped by cross-correlations in the spiking activity) remains constant, too.
+
+With the ignore-and-fire version of the model, performance benchmarking studies can thus be performed under better
+defined conditions. For example, the overall communication load, i.e., the total number of spikes that need to be sent
+across the network within each time interval, is fully controlled by the user.
+
+
+.. figure:: /static/img/scaling_ignore_and_fire.png
+ :scale: 50%
+
+Dependence of the simulation time (top), the time and population averaged firing rate (middle) and the excitatory
+synaptic weights (bottom) on the network size :math:`N` for the\ ``integrate-and-fire`` (black) and the
+``ignore-and-fire`` variant of the network model (gray). The in-degree :math:`K=1250` is fixed. Figure generated using
+:doc:`scaling.py `.
+
+Run the simulations
+-------------------
+
+You can run the simulations by using the provided Snakefile:
+
+ * :doc:`Snakefile `: Run simulation workflow
+
+.. seealso::
+
+ * :doc:`Detailed description of the network model and parameters `
+ * :doc:`NEST simulation details `
+ * :doc:`model-ignore_and_fire.py `: NEST implementation of the network model
+ * :doc:`parameter_dicts-ignore_and_fire.py `: parameter setting
+ * :doc:`ignore_and_fire model documentation `
+
+
+References
+----------
+
+.. [1] Van Albada S J, Helias M, Diesmann M (2015). Scalability of asynchronous
+ networks is limited by one-to-one mapping between effective connectivity
+ and correlations. PLoS computational biology, 11(9), e1004490.
+
+
+.. [2] Brunel N (2000). Dynamics of networks of randomly connected excitatory
+ and inhibitory spiking neurons. Journal of Physiology-Paris
+ 94(5-6):445-463.
+
+.. [3] Helias M, Kunkel S, Masumoto G, Igarashi J, Eppler JM, Ishii S, Fukai
+ T, Morrison A, Diesmann M (2012). Supercomputers ready for use as
+ discovery machines for neuroscience. Frontiers in Neuroinformatics
+ 6:26. .
+"""
+Generate reference data
+-----------------------
+"""
+import sys
+
+import model
+import psutil
+
+
+def generate_reference_data(neuron_model="ignore_and_fire"):
+ """
+ Generate set of reference data and store on disk (spike data and model paramaters).
+
+ Note: Data can be loaded from file using
+
+ parameters = model.get_default_parameters()
+ spikes = model.load_spike_data("./")
+
+ """
+
+ parameters = model.get_default_parameters()
+ parameters["neuron_model"] = neuron_model
+
+ parameters["record_spikes"] = True
+ # parameters["record_weights"] = True
+
+ model_instance = model.Model(parameters)
+
+ print("\nneuron model: %s" % model_instance.pars["neuron_model"])
+
+ model_instance.create()
+ model_instance.connect()
+
+ # connectivity at start of simulation
+ subset_size = 2000 # number of pre- and post-synaptic neurons weights are extracted from
+ pop_pre = model_instance.nodes["pop_E"][:subset_size]
+ pop_post = model_instance.nodes["pop_E"][:subset_size]
+ C = model_instance.get_connectivity(
+ pop_pre, pop_post, model_instance.pars["data_path"] + "/" + "connectivity_presim.dat"
+ )
+
+ # simulate
+ model_instance.simulate(model_instance.pars["T"])
+
+ # save parameters to file
+ model_instance.save_parameters("model_instance_parameters", model_instance.pars["data_path"])
+
+ # connectivity at end of simulation
+ C = model_instance.get_connectivity(
+ pop_pre, pop_post, model_instance.pars["data_path"] + "/" + "connectivity_postsim.dat"
+ )
+
+
+generate_reference_data(neuron_model=sys.argv[1])
diff --git a/pynest/examples/ignore_and_fire/generate_reference_figures-ignore_and_fire.py b/pynest/examples/ignore_and_fire/generate_reference_figures-ignore_and_fire.py
new file mode 100644
index 0000000000..07d7955f1d
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/generate_reference_figures-ignore_and_fire.py
@@ -0,0 +1,212 @@
+# -*- coding: utf-8 -*-
+#
+# generate_reference_figures-ignore_and_fire.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+"""
+Generate reference figures
+--------------------------
+"""
+# sys.path.append(r"../")
+import os
+import sys
+
+import matplotlib.pyplot as plt
+import model
+import nest
+import numpy as np
+from matplotlib import gridspec, rcParams
+
+
+def time_and_population_averaged_spike_rate(spikes, time_interval, pop_size):
+ D = time_interval[1] - time_interval[0]
+ n_events = sum((spikes[:, 1] >= time_interval[0]) * (spikes[:, 1] <= time_interval[1]))
+ rate = n_events / D * 1000.0 / pop_size
+ print("\n-----> average firing rate: nu=%.2f /s\n" % (rate))
+ return rate
+
+
+def plot_spikes(spikes, nodes, pars, path="./"):
+ """
+ Create raster plot of spiking activity.
+ """
+
+ pop_all = np.array(nodes["pop_all"])
+
+ rate = time_and_population_averaged_spike_rate(spikes, (0.0, pars["T"]), pars["N_rec_spikes"])
+
+ # plot spiking activity
+ plt.figure(num=1)
+ plt.clf()
+ plt.title(r"time and population averaged firing rate: $\nu=%.2f$ spikes/s" % rate)
+ plt.plot(spikes[:, 1], spikes[:, 0], "o", ms=1, lw=0, mfc="k", mec="k", mew=0, alpha=0.2, rasterized=True)
+
+ plt.xlim(0, pars["T"])
+ plt.ylim(0, pars["N_rec_spikes"])
+ plt.xlabel(r"time (ms)")
+ plt.ylabel(r"neuron id")
+
+ plt.subplots_adjust(bottom=0.14, top=0.9, left=0.15, right=0.95)
+ plt.savefig(path + "/TwoPopulationNetworkPlastic_%s_spikes.png" % pars["neuron_model"])
+
+
+# def center_axes_ticks(ax):
+# """
+# Shift axes ticks in matrix plots to the center of each box.
+# """
+# xticks = ax.get_xticks()
+# dx=(xticks[1]-xticks[0])/2.
+# ax.set_xticks(xticks+dx)
+# ax.set_xticklabels(xticks.astype("int"))
+
+# yticks = ax.get_yticks()
+# dy=(yticks[1]-yticks[0])/2.
+# ax.set_yticks(yticks+dy)
+# ax.set_yticklabels(yticks.astype("int"))
+
+
+def plot_connectivity_matrix(W, pop_pre, pop_post, pars, filename_label=None, path="./"):
+ """
+ Plot connectivity matrix "W" for source and target neurons contained in "pop_pre" and "pop_post",
+ and save figure to file.
+ """
+
+ wmin = 0
+ wmax = 150
+
+ print("\nPlotting connectivity matrix...")
+
+ fig = plt.figure(num=2)
+ plt.clf()
+ gs = gridspec.GridSpec(1, 2, width_ratios=[15, 1])
+
+ ###
+
+ matrix_ax = fig.add_subplot(gs[0])
+ cmap = plt.cm.gray_r
+ matrix = plt.pcolor(pop_pre, pop_post, W, cmap=cmap, rasterized=True, vmin=wmin, vmax=wmax)
+ plt.xlabel(r"source id")
+ plt.ylabel(r"target id")
+
+ # center_axes_ticks(matrix_ax)
+
+ plt.xlim(pop_pre[0], pop_pre[-1])
+ plt.ylim(pop_post[0], pop_post[-1])
+
+ ###
+
+ cb_ax = plt.subplot(gs[1])
+ cb = plt.colorbar(matrix, cax=cb_ax, extend="max")
+ cb.set_label(r"synaptic weight (pA)")
+
+ ###
+
+ plt.subplots_adjust(left=0.12, bottom=0.1, right=0.9, top=0.95, wspace=0.1)
+ plt.savefig(path + "/TwoPopulationNetworkPlastic_%s_connectivity%s.png" % (pars["neuron_model"], filename_label))
+
+
+def plot_weight_distributions(whist_presim, whist_postsim, weights, pars, path="./"):
+ """
+ Plot distributions of synaptic weights before and after simulation.
+ """
+ print("\nPlotting weight distributions...")
+
+ fig = plt.figure(num=3)
+ plt.clf()
+ lw = 3
+ clr = ["0.6", "0.0"]
+ plt.plot(weights[:-1], whist_presim, lw=lw, color=clr[0], label=r"pre sim.")
+ plt.plot(weights[:-1], whist_postsim, lw=lw, color=clr[1], label=r"post sim.")
+ # plt.setp(plt.gca(),xscale="log")
+ plt.setp(plt.gca(), yscale="log")
+ plt.legend(loc=1)
+ plt.xlabel(r"synaptic weight (pA)")
+ plt.ylabel(r"rel. frequency")
+ plt.xlim(weights[0], weights[-2])
+ plt.ylim(5e-5, 3)
+ # plt.subplots_adjust(left=0.12, bottom=0.12, right=0.95, top=0.95)
+ plt.subplots_adjust(bottom=0.14, top=0.9, left=0.15, right=0.95)
+ plt.savefig(path + "/TwoPopulationNetworkPlastic_%s_weight_distributions.png" % pars["neuron_model"])
+
+
+def generate_reference_figures(neuron_model="ignore_and_fire"):
+ """Generate and store set of reference data"""
+
+ # raster plot
+ parameters = model.get_default_parameters()
+
+ parameters["neuron_model"] = neuron_model
+
+ parameters["record_spikes"] = True
+ parameters["record_weights"] = True
+
+ # fetch node ids
+ model_instance = model.Model(parameters)
+ model_instance.create()
+
+ # create subfolder for figures (if necessary)
+ os.system("mkdir -p " + model_instance.pars["data_path"])
+
+ # load spikes from reference data
+ spikes = model.load_spike_data(
+ model_instance.pars["data_path"],
+ "spikes-%d" % (np.array(model_instance.nodes["spike_recorder"])[0]),
+ )
+ # plot_spikes(spikes, model_instance.nodes, model_instance.pars, model_instance.pars["data_path"])
+ plot_spikes(spikes, model_instance.nodes, model_instance.pars, "figures")
+
+ # load connectivity from reference data
+ connectivity_presim = model.load_connectivity_data(model_instance.pars["data_path"], "connectivity_presim")
+ connectivity_postsim = model.load_connectivity_data(model_instance.pars["data_path"], "connectivity_postsim")
+
+ # create connectivity matrices before and after simulation for a subset of neurons
+ subset_size = 100
+ pop_pre = np.array(model_instance.nodes["pop_E"])[:subset_size]
+ pop_post = np.array(model_instance.nodes["pop_E"])[:subset_size]
+ W_presim, pop_pre, pop_post = model.get_connectivity_matrix(connectivity_presim, pop_pre, pop_post)
+ W_postsim, pop_pre, pop_post = model.get_connectivity_matrix(connectivity_postsim, pop_pre, pop_post)
+
+ # plot connectivity matrices
+ # plot_connectivity_matrix(W_presim, pop_pre, pop_post, "_presim", model_instance.pars["data_path"])
+ # plot_connectivity_matrix(W_postsim, pop_pre, pop_post, "_postsim", model_instance.pars["data_path"])
+ plot_connectivity_matrix(W_presim, pop_pre, pop_post, model_instance.pars, "_presim", "figures")
+ plot_connectivity_matrix(W_postsim, pop_pre, pop_post, model_instance.pars, "_postsim", "figures")
+
+ # compute weight distributions
+ # weights = np.arange(29.5,34.1,0.05)
+ weights = np.arange(0.0, 150.1, 0.5)
+ whist_presim = model.get_weight_distribution(connectivity_presim, weights)
+ whist_postsim = model.get_weight_distribution(connectivity_postsim, weights)
+
+ # plot weight distributions
+ # plot_weight_distributions(whist_presim, whist_postsim, weights, model_instance.pars["data_path"])
+ plot_weight_distributions(whist_presim, whist_postsim, weights, model_instance.pars, "figures")
+
+
+rcParams["figure.figsize"] = (4, 3)
+rcParams["figure.dpi"] = 300
+rcParams["font.family"] = "sans-serif"
+rcParams["font.size"] = 8
+rcParams["legend.fontsize"] = 8
+rcParams["axes.titlesize"] = 8
+rcParams["axes.labelsize"] = 8
+rcParams["ytick.labelsize"] = 8
+rcParams["xtick.labelsize"] = 8
+rcParams["text.usetex"] = True
+
+generate_reference_figures(neuron_model=sys.argv[1])
diff --git a/pynest/examples/ignore_and_fire/model-ignore_and_fire.py b/pynest/examples/ignore_and_fire/model-ignore_and_fire.py
new file mode 100644
index 0000000000..812b0c90a6
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/model-ignore_and_fire.py
@@ -0,0 +1,719 @@
+# -*- coding: utf-8 -*-
+#
+# model-ignore_and_fire.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+"""
+Scalable two population STDP network model
+------------------------------------------
+
+In this example, we employ a simple network model describing the
+dynamics of a local cortical circuit at the spatial scale of ~1mm within
+a single cortical layer. It is derived from the model proposed in
+(Brunel [1]_), but accounts for the synaptic weight dynamics for
+connections between excitatory neurons. The weight dynamics are
+described by the spike-timing-dependent plasticity (STDP) model derived
+by Morrison et al. ( [2]_). The model provides a mechanism underlying the
+formation of broad distributions of synaptic weights in combination with
+asynchronous irregular spiking activity (see figure below). A detailed
+
+
+The model used here can be configured into a truly scalable mode by
+replacing the ``integrate-and-fire`` neurons by an ``ignore_and_fire``
+dynamics.
+By doing so, the spike generation dynamics is decoupled from the input
+integration and the plasticity dynamics; the overall network activity,
+and, hence, the communication load, is fully controlled by the user. The
+firing rates and phases of the ``ignore-and-fire`` model are randomly
+drawn from uniform distributions to guarantee asynchronous spiking
+activity. The plasticity dynamics remains intact.
+
+Default parameters are provided in "parameter_dicts.py".
+For more information see :doc:`/auto_examples/ignore_and_fire/index`.
+
+References
+~~~~~~~~~~
+
+.. [1] Brunel N (2000). Dynamics of networks of randomly connected excitatory
+ and inhibitory spiking neurons. Journal of Physiology-Paris
+ 94(5-6):445-463.
+
+.. [2] Morrison A. Aertsen, A. and Diesmann M. 2007.
+ Spike-timing-dependent plasticity in balanced random networks.
+ Neural Computation. 19(6):1437–1467.
+
+"""
+
+import copy
+import os
+import sys
+
+import nest
+import numpy as np
+import scipy.special
+
+###############################################################################
+# Instantiation of the TwoPopulationNetworkPlastic model
+# and its PyNEST implementation.
+
+
+class Model:
+ """
+ Instatitation of model
+ """
+
+ def __init__(self, parameters):
+ """
+ Intialise model and simulation instance, including
+
+ 1) parameter setting,
+ 2) configuration of the NEST kernel,
+ 3) setting random-number generator seed, and
+ 4) configuration of neuron and synapse models.
+
+ Arguments
+ ---------
+
+ parameters: dict
+ Parameter dictionary
+
+ Returns
+ -------
+
+ none
+
+ """
+
+ print("\nInitialising model and simulation...")
+
+ # set parameters derived from base parameters
+ self.__derived_parameters(parameters)
+
+ self.pars["data_path"] += "/" + self.pars["neuron_model"]
+
+ # create data directory (if necessary)
+ os.system("mkdir -p " + self.pars["data_path"])
+
+ # initialize NEST kernel
+ nest.ResetKernel()
+ nest.SetKernelStatus(
+ {
+ "tics_per_ms": 1.0 * self.pars["tics_per_step"] / self.pars["dt"],
+ "resolution": self.pars["dt"],
+ "print_time": self.pars["print_simulation_progress"],
+ "local_num_threads": self.pars["n_threads"],
+ "rng_seed": self.pars["seed"],
+ "dict_miss_is_error": True,
+ "data_path": self.pars["data_path"],
+ "overwrite_files": True,
+ }
+ )
+ np.random.seed(self.pars["seed"])
+
+ nest.set_verbosity(self.pars["nest_verbosity"])
+
+ # configure neuron and synapse models
+ if self.pars["neuron_model"] == "iaf_psc_alpha":
+ self.__neuron_params = {
+ "V_th": self.pars["theta"],
+ "E_L": self.pars["E_L"],
+ "V_reset": self.pars["V_reset"],
+ "tau_m": self.pars["tau_m"],
+ "t_ref": self.pars["t_ref"],
+ "C_m": self.pars["C_m"],
+ "tau_syn_ex": self.pars["tau_s"],
+ "tau_syn_in": self.pars["tau_s"],
+ "I_e": self.pars["I_DC"],
+ "tau_minus": self.pars["stdp_tau_minus"],
+ "V_m": self.pars["V_init_min"],
+ }
+ elif self.pars["neuron_model"] == "ignore_and_fire":
+ self.__neuron_params = {}
+
+ def __derived_parameters(self, parameters):
+ """
+ Set additional parameters derived from base parameters.
+
+ A dictionary containing all (base and derived) parameters is stored as model attribute self.pars.
+
+ Arguments
+ ---------
+
+ parameters: dict
+ Dictionary containing base parameters
+
+ """
+
+ self.pars = copy.deepcopy(parameters)
+
+ self.pars["N_E"] = int(self.pars["beta"] * self.pars["N"]) # number of excitatory neurons
+ self.pars["N_I"] = self.pars["N"] - self.pars["N_E"] # number of inhibitory neurons
+ self.pars["K_E"] = int(self.pars["beta"] * self.pars["K"]) # number of excitatory inputs per neuron
+ self.pars["K_I"] = self.pars["K"] - self.pars["K_E"] # number of inhibitory inputs per neuron
+
+ # conversion of PSP amplitudes to PSC amplitudes
+ self.pars["J_unit"] = unit_psp_amplitude(
+ self.pars["tau_m"], self.pars["C_m"], self.pars["tau_s"]
+ ) # unit PSP amplitude
+ self.pars["I_E"] = self.pars["J_E"] / self.pars["J_unit"] # EPSC amplitude for local inputs (pA)
+ self.pars["I_I"] = -self.pars["g"] * self.pars["I_E"] # IPSC amplitude (pA)
+ self.pars["I_X"] = self.pars["I_E"] # EPSC amplitude for external inputs (pA)
+
+ # rate of external Poissonian sources
+ self.pars["nu_theta"] = (
+ 1000.0
+ * self.pars["theta"]
+ * self.pars["C_m"]
+ / (self.pars["I_X"] * np.exp(1.0) * self.pars["tau_m"] * self.pars["tau_s"])
+ )
+ self.pars["nu_X"] = self.pars["eta"] * self.pars["nu_theta"]
+
+ # number of neurons spikes are recorded from
+ if self.pars["N_rec_spikes"] == "all":
+ self.pars["N_rec_spikes"] = self.pars["N"]
+
+ def create(self):
+ """
+ Create and configure all network nodes (neurons + recording and stimulus devices),
+ incl. setting of initial conditions.
+
+ A dictionary containing all node IDs is stored as model attribute self.nodes.
+
+ """
+ print("\nCreating and configuring nodes...")
+
+ # create neuron populations
+ pop_all = nest.Create(self.pars["neuron_model"], self.pars["N"], self.__neuron_params) # overall population
+
+ if self.pars["neuron_model"] == "iaf_psc_alpha":
+ # pop_all = nest.Create("iaf_psc_alpha", self.pars["N"], self.__neuron_params) # overall population
+ # set random initial membrane potentials
+ random_vm = nest.random.uniform(self.pars["V_init_min"], self.pars["V_init_max"])
+ nest.GetLocalNodeCollection(pop_all).V_m = random_vm
+ elif self.pars["neuron_model"] == "ignore_and_fire":
+ # pop_all = nest.Create("ignore_and_fire", self.pars["N"]) # overall population
+ # pop_all.rate = np.random.uniform(low=self.pars["ignore_and_fire_pars"]["rate_dist"][0],high=self.pars
+ # ["ignore_and_fire_pars"]["rate_dist"][1],size=self.pars["N"])
+ # pop_all.phase = np.random.uniform(low=self.pars["ignore_and_fire_pars"]["phase_dist"][0],
+ # high=self.pars["ignore_and_fire_pars"]["phase_dist"][1],size=self.pars["N"])
+
+ # better, but not working yet:
+ pop_all.rate = nest.random.uniform(
+ min=self.pars["ignore_and_fire_pars"]["rate_dist"][0],
+ max=self.pars["ignore_and_fire_pars"]["rate_dist"][1],
+ )
+ pop_all.phase = nest.random.uniform(
+ min=self.pars["ignore_and_fire_pars"]["phase_dist"][0],
+ max=self.pars["ignore_and_fire_pars"]["phase_dist"][1],
+ )
+
+ pop_E = pop_all[: self.pars["N_E"]] # population of exitatory neurons
+ pop_I = pop_all[self.pars["N_E"] :] # population of inhibitory neurons
+
+ # create external Poissonian sources (stimulus)
+ poisson = nest.Create("poisson_generator", params={"rate": self.pars["nu_X"]})
+
+ # create recording devices
+ if self.pars["record_spikes"]:
+ # create, configure and connect spike detectors
+ spike_recorder = nest.Create("spike_recorder", {"record_to": "ascii", "label": "spikes"})
+ else:
+ spike_recorder = None
+
+ # configure connections
+ nest.CopyModel(
+ "stdp_pl_synapse_hom_hpc",
+ "excitatory_plastic",
+ {
+ "weight": self.pars["I_E"],
+ "delay": self.pars["delay"],
+ "alpha": self.pars["stdp_alpha"],
+ "lambda": self.pars["stdp_lambda"],
+ "mu": self.pars["stdp_mu_plus"],
+ "tau_plus": self.pars["stdp_tau_plus"],
+ },
+ )
+
+ # if self.pars["record_weights"]:
+ # nest.SetDefaults("excitatory_plastic",{"weight_recorder": weight_recorder})
+
+ nest.CopyModel(
+ "static_synapse_hpc", "excitatory_static", {"weight": self.pars["I_E"], "delay": self.pars["delay"]}
+ )
+ nest.CopyModel("static_synapse_hpc", "inhibitory", {"weight": self.pars["I_I"], "delay": self.pars["delay"]})
+ nest.CopyModel("static_synapse_hpc", "external", {"weight": self.pars["I_X"], "delay": self.pars["delay"]})
+
+ # store nodes in model instance
+ self.nodes = {}
+ self.nodes["pop_all"] = pop_all
+ self.nodes["pop_E"] = pop_E
+ self.nodes["pop_I"] = pop_I
+ self.nodes["poisson"] = poisson
+ self.nodes["spike_recorder"] = spike_recorder
+ # self.nodes["weight_recorder"] = weight_recorder
+
+ def connect(self):
+ """
+ Connect network and devices.
+
+ """
+
+ print("\nConnecting network and devices...")
+ # fetch neuron populations and device ids
+ pop_all = self.nodes["pop_all"]
+ pop_E = self.nodes["pop_E"]
+ pop_I = self.nodes["pop_I"]
+ poisson = self.nodes["poisson"]
+ spike_recorder = self.nodes["spike_recorder"]
+
+ # connect network
+ # EE connections (plastic)
+ nest.Connect(
+ pop_E,
+ pop_E,
+ conn_spec={
+ "rule": "fixed_indegree",
+ "indegree": self.pars["K_E"],
+ "allow_autapses": self.pars["allow_autapses"],
+ "allow_multapses": self.pars["allow_multapses"],
+ },
+ syn_spec="excitatory_plastic",
+ )
+
+ # EI connections (static)
+ nest.Connect(
+ pop_E,
+ pop_I,
+ conn_spec={
+ "rule": "fixed_indegree",
+ "indegree": self.pars["K_E"],
+ "allow_autapses": self.pars["allow_autapses"],
+ "allow_multapses": self.pars["allow_multapses"],
+ },
+ syn_spec="excitatory_static",
+ )
+
+ # IE and II connections (static)
+ nest.Connect(
+ pop_I,
+ pop_all,
+ conn_spec={
+ "rule": "fixed_indegree",
+ "indegree": self.pars["K_I"],
+ "allow_autapses": self.pars["allow_autapses"],
+ "allow_multapses": self.pars["allow_multapses"],
+ },
+ syn_spec="inhibitory",
+ )
+
+ # connect external Poissonian sources (stimulus)
+ nest.Connect(poisson, pop_all, syn_spec="external")
+
+ # connect recording devices (to the first N_rec_spikes neurons)
+ if self.pars["record_spikes"]:
+ nest.Connect(pop_all[: self.pars["N_rec_spikes"]], spike_recorder)
+
+ """
+ Since the introduction of the 5g kernel in NEST 2.16.0
+ the full connection infrastructure, including presynaptic connectivity,
+ is set up in the preparation phase of the simulation.
+ The preparation phase is usually induced by the first
+ "nest.Simulate()" call. For including this phase in measurements of the
+ connection time, we induce it here explicitly by calling ``nest.Prepare()``.
+ """
+ nest.Prepare()
+ nest.Cleanup()
+
+ def simulate(self, t_sim):
+ """
+ Run simulation.
+
+ Arguments
+ ---------
+ t_sim: float
+ Simulation time (ms).
+
+ """
+
+ print("\nSimulating...")
+
+ nest.Simulate(t_sim)
+
+ def save_parameters(self, filename_root, path):
+ """
+ Save model-instance parameters to file.
+
+ Arguments
+ ---------
+ filename_root: str
+ Root of file name.
+
+ path: str
+ File path
+
+ """
+
+ import json
+
+ json.dump(self.pars, open("%s/%s.json" % (path, filename_root), "w"), indent=4)
+
+ def get_connectivity(self, pop_pre, pop_post, filename=None):
+ """
+ Extract connectivity for subpopulations pop_pre and pop_post
+ and store in file filename (unless filename=None [default]).
+
+ Arguments
+ ---------
+ pop_pre: NodeCollection
+ Presynaptic neuron population.
+
+ pop_post: NodeCollection
+ Postsynaptic neuron population.
+
+ filename: str
+ Name of file to store connectivity data. If filename ends in ".gz",
+ the file will be compressed in gzip format. Set filename=None (default)
+ to prevent storage on disk.
+
+ Returns
+ -------
+ C: numpy.ndarray
+ Lx4 array containing connectivity information:
+
+ C[:,0]: source id
+ C[:,1]: target id
+ C[:,2]: synaptic weight
+ C[:,3]: delay (ms)
+
+ (L = len(pop_pre)*len(pop_post) = number of connections.
+
+ """
+ print("Extracting connectivity...")
+
+ conns = nest.GetConnections(source=pop_pre, target=pop_post)
+
+ C = np.zeros((len(conns), 4))
+ C[:, 0] = conns.get()["source"]
+ C[:, 1] = conns.get()["target"]
+ C[:, 2] = conns.get()["weight"]
+ C[:, 3] = conns.get()["delay"]
+
+ if filename:
+ np.savetxt(filename, C, fmt="%d\t%d\t%.3e\t%.3e", header=" source \t target \t weight \tdelay (ms)")
+
+ return C
+
+
+def get_default_parameters():
+ """
+ Import default model-parameter file.
+
+ Returns
+ -------
+ pars: dict
+ Parameter dictionary.
+
+ """
+
+ import parameter_dicts
+
+ pars = parameter_dicts.pars
+
+ return pars
+
+
+def get_data_file_list(path, label):
+ """
+ Searches for files with extension "*.dat" in directory "path" with names starting with "label",
+ and returns list of file names.
+
+ Arguments
+ ---------
+ path: str
+ Path containing spike files.
+
+ label: str
+ Spike file label (file name root).
+
+ Returns
+ -------
+ files: list(str)
+ List of file names
+
+
+ """
+
+ # get list of files names
+ files = []
+ for file_name in os.listdir(path):
+ if file_name.endswith(".dat") and file_name.startswith(label):
+ files += [file_name]
+ files.sort()
+
+ assert len(files) > 0, "No files of type '%s*.dat' found in path '%s'." % (label, path)
+
+ return files
+
+
+def load_spike_data(path, label, time_interval=None, pop=None, skip_rows=3):
+ """
+ Load spike data from files.
+
+ Arguments
+ ---------
+ path: str
+ Path containing spike files.
+
+ label: str
+ Spike file label (file name root).
+
+ time_interval: None (default) or tuple (optional)
+ Start and stop of observation interval (ms). All spikes outside this interva are discarded.
+ If None, all recorded spikes are loaded.
+
+ pop: None (default) or nest.NodeCollection (optional)
+ Oberserved neuron population. All spike sendes that are not part of this population are discarded.
+ If None, all recorded spikes are loaded.
+
+ skip_rows: int (optional)
+ Number of rows to be skipped while reading spike files (to remove file headers). The default is 3.
+
+ Returns
+ -------
+ spikes: numpy.ndarray
+ Lx2 array of spike senders spikes[:,0] and spike times spikes[:,1] (L = number of spikes).
+
+ """
+
+ if type(time_interval) is tuple:
+ print("Loading spike data in interval (%.1f ms, %.1f ms] ..." % (time_interval[0], time_interval[1]))
+ else:
+ print("Loading spike data...")
+
+ files = get_data_file_list(path, label)
+
+ # open spike files and read data
+ spikes = []
+ for file_name in files:
+ try:
+ spikes += [
+ np.loadtxt("%s/%s" % (path, file_name), skiprows=skip_rows)
+ ] # load spike file while skipping the header
+ except ValueError:
+ print("Error: %s" % sys.exc_info()[1])
+ print(
+ "Remove non-numeric entries from file %s (e.g. in file header) \
+ by specifying (optional) parameter 'skip_rows'.\n"
+ % (file_name)
+ )
+
+ spikes = np.concatenate(spikes)
+
+ # extract spikes in specified time interval
+ if time_interval is not None:
+ if type(time_interval) is tuple:
+ ind = (spikes[:, 1] >= time_interval[0]) * (spikes[:, 1] <= time_interval[1])
+ spikes = spikes[ind, :]
+ else:
+ print("Warning: time_interval must be a tuple or None. All spikes are loaded.")
+
+ if type(pop) is nest.NodeCollection:
+ spikes_subset = []
+ for cn, nid in enumerate(pop): # loop over all neurons
+ print(
+ "Spike extraction from %d/%d (%d%%) neurons completed"
+ % (cn + 1, len(pop), 1.0 * (cn + 1) / len(pop) * 100),
+ end="\r",
+ )
+ ind = np.where(spikes[:, 0] == nid)[0]
+ spikes_subset += list(spikes[ind, :])
+ spikes = np.array(spikes_subset)
+ elif pop is None:
+ pass
+ else:
+ print("Warning: pop must be a NEST NodeCollection or None. All spikes are loaded.")
+ print()
+
+ return spikes
+
+
+def load_connectivity_data(path, label, skip_rows=1):
+ """
+ Load connectivity data (weights and delays) from files.
+
+ Arguments
+ ---------
+ path: str
+ Path containing connectivity files.
+
+ label: str
+ Connectivity file label (file name root).
+
+ skip_rows: int, optional
+ Number of rows to be skipped while reading connectivity files (to remove file headers).
+ The default is 1.
+
+ Returns
+ -------
+ C: numpy.ndarray
+ Lx4 array containing connectivity information:
+
+ C[:,0]: source id
+ C[:,1]: target id
+ C[:,2]: synaptic weight
+ C[:,3]: delay (ms)
+
+ (L = len(pop_pre)*len(pop_post) = number of connections.
+
+ """
+
+ files = get_data_file_list(path, label)
+
+ # open weight files and read data
+ C = []
+ for file_name in files:
+ try:
+ C += [np.loadtxt("%s/%s" % (path, file_name), skiprows=skip_rows)] # load file while skipping the header
+ except ValueError:
+ print("Error: %s" % sys.exc_info()[1])
+ print(
+ "Remove non-numeric entries from file %s (e.g. in file header) by specifying (optional) parameter \
+ 'skip_rows'.\n"
+ % (file_name)
+ )
+
+ C = np.concatenate(C)
+
+ return C
+
+
+def unit_psp_amplitude(tau_m, C_m, tau_s):
+ """
+ Compute PSP maximum (mV) for LIF with alpha-shaped PSCs with unit amplitude 1.
+
+ Arguments
+ ---------
+ tau_m: float
+ Membrane time constant (ms).
+
+ C_m: float
+ Membrane capacitance (pF).
+
+ tau_s: float
+ Synaptic time constant (ms).
+
+
+ Returns
+ -------
+ J_unit: float
+ Unit-PSP amplitude (mV).
+
+ """
+
+ a = tau_s / tau_m
+ b = 1.0 / tau_s - 1.0 / tau_m
+
+ # time of PSP maximum
+ t_max = 1.0 / b * (-LambertWm1(-a * np.exp(a)) - a)
+
+ J_unit = (
+ np.exp(1.0)
+ / C_m
+ / (1.0 - tau_s / tau_m)
+ * ((np.exp(-t_max / tau_m) - np.exp(-t_max / tau_s)) / b - t_max * np.exp(-t_max / tau_s))
+ )
+
+ return J_unit
+
+
+def LambertWm1(x):
+ y = scipy.special.lambertw(x, k=-1 if x < 0 else 0).real
+
+ return y
+
+
+def get_index(x, y):
+ """
+ Return indices of x where x==y.
+
+ Arguments
+ ---------
+ x: list or numpy.ndarray of int, float, str
+
+ y: int, float, str
+
+ Returns
+ -------
+ ind: numpy.ndarray
+ Index array
+
+ """
+ return np.where(x == y)[0]
+
+
+def get_connectivity_matrix(connectivity, pop_pre=[], pop_post=[]):
+ """
+ Generate connectivity matrix from connectivity data in "connectivity"
+ for the (sub-)set of source and target neurons in "pop_pre" and "pop_post".
+ If "pop_pre" or "pop_post" are empty (default), the arrays of source and
+ target neurons will be constructed from "connectivity".
+
+ Arguments
+ ---------
+ connectivity: numpy.ndarray
+ Lx4 array containing connectivity information:
+
+ connectivity[:,0]: source id
+ connectivity[:,1]: target id
+ connectivity[:,2]: synaptic weight
+ connectivity[:,3]: delay (ms)
+
+ (L = len(pop_pre)*len(pop_post) = number of connections
+
+ pop_pre: numpy.ndarray
+ Array of source ids (default: [])
+
+ pop_post: numpy.ndarray
+ Array of target ids (default: [])
+
+ Returns
+ -------
+ W: numpy.ndarray
+ Connectivity matrix of shape LTxLS, with number of targets LT and number of sources LS
+
+ """
+
+ print("\nGenerating connectivity matrix...")
+
+ if len(pop_pre) == 0:
+ pop_pre = np.unique(connectivity[:, 0])
+ if len(pop_post) == 0:
+ pop_post = np.unique(connectivity[:, 1])
+
+ # initialise weight matrix
+ W = np.zeros([len(pop_post), len(pop_pre)]) # convention: pre = columns, post = rows
+
+ # fill weight matrix
+ for c in range(connectivity.shape[0]):
+ W[get_index(pop_post, connectivity[c, 1]), get_index(pop_pre, connectivity[c, 0])] = connectivity[c, 2]
+
+ return W, pop_pre, pop_post
+
+
+def get_weight_distribution(connectivity, weights):
+ return np.histogram(connectivity[:, 2], weights, density=True)[0]
diff --git a/pynest/examples/ignore_and_fire/parameter_dict-ignore_and_fire.py b/pynest/examples/ignore_and_fire/parameter_dict-ignore_and_fire.py
new file mode 100644
index 0000000000..5b8cd3b408
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/parameter_dict-ignore_and_fire.py
@@ -0,0 +1,99 @@
+# -*- coding: utf-8 -*-
+#
+# parameter_dict-ignore_and_fire.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+"""
+Parameter dictionary
+--------------------
+
+Default parameters for Two population STDP network model (TwoPopulationNetworkPlastic)
+
+"""
+
+
+pars = {}
+
+pars["model_name"] = "TwoPopulationNetworkPlastic" # Network model name
+
+# network and connectivity parameters
+pars["N"] = 12500 # total number of neurons
+pars["K"] = 1250 # total number of inputs per neuron from local network
+pars["beta"] = 0.8 # fraction of excitatory neurons/inputs
+
+pars["allow_autapses"] = False
+pars["allow_multapses"] = True
+
+# neuron parameters
+
+# pars["neuron_model"] = "iaf_psc_alpha"
+pars["neuron_model"] = "ignore_and_fire"
+
+pars["E_L"] = 0.0 # resting membrane potential(mV)
+pars["C_m"] = 250.0 # membrane capacity (pF)
+pars["tau_m"] = 20.0 # membrane time constant (ms)
+pars["t_ref"] = 2.0 # duration of refractory period (ms)
+pars["theta"] = 20.0 # spike threshold(mV)
+pars["V_reset"] = 0.0 # reset potential(mV)
+
+# needed for ignore_and_fire version of the model
+pars["ignore_and_fire_pars"] = {}
+pars["ignore_and_fire_pars"]["rate_dist"] = [0.5, 1.5]
+pars["ignore_and_fire_pars"]["phase_dist"] = [0.01, 1.0]
+
+# stimulus parameters
+pars["I_DC"] = 0.0 # (constant) external input current (pA)
+pars["eta"] = 1.2 # rate of external Poissonian sources relative to threshold rate
+
+# synapse parameters
+pars["J_E"] = 0.5 # EPSP amplitude (mV)
+pars["g"] = 10.0 # relative IPSP amplitude (JI=-g*JE)
+pars["delay"] = 1.5 # spike transmission delay (ms)
+pars["tau_s"] = 2.0 # synaptic time constant (ms)
+
+pars["stdp_alpha"] = 0.1 # relative magnitude of weight update for acausal firing
+pars["stdp_lambda"] = 20.0 # magnitude of weight update for causal firing
+pars["stdp_mu_plus"] = 0.4 # weight dependence exponent for causal firing
+pars["stdp_tau_plus"] = 15.0 # time constant of weight update for causal firing (ms)
+pars["stdp_tau_minus"] = 30.0 # time constant of weight update for acausal firing (ms)
+pars["stdp_w_0"] = 1.0 # reference weight (pA)
+
+# initial conditions
+pars["V_init_min"] = pars["E_L"] # min of initial membrane potential (mV)
+pars["V_init_max"] = pars["theta"] # min of initial membrane potential (mV)
+
+# data recording
+pars["record_spikes"] = False # if True: set up spike detectors and record spikes
+pars["N_rec_spikes"] = "all" # number of neurons to record spikes from; if "all", spikes from all neurons are recorded
+
+# pars["record_weights"] = False # True: record weights of plastic synapses
+# pars["weight_recording_start_time"] = 0. # start time of weight recording (ms)
+
+# simulation parameters
+pars["T"] = 10000.0 # simulation time
+pars["dt"] = 2**-3 # simulation resolution (ms) !!! revise documentation (incl delay)
+pars["tics_per_step"] = 2**7 # number of tics per time step (defines resolution of time variables in NEST)
+pars["seed"] = 1 # seed for random number generator
+pars["n_threads"] = 4 # number of threads for simulation
+# (note: varying the number of threads leads to different random-number sequences,
+# and, hence, to different results)
+
+pars["print_simulation_progress"] = True # print network time and realtime factor
+pars["nest_verbosity"] = "M_WARNING" # "M_FATAL", "M_ERROR", "M_WARNING", "M_DEPRECATED", "M_INFO", "M_ALL"
+
+pars["data_path"] = "data"
diff --git a/pynest/examples/ignore_and_fire/scaling-ignore_and_fire.py b/pynest/examples/ignore_and_fire/scaling-ignore_and_fire.py
new file mode 100644
index 0000000000..0115a2224a
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/scaling-ignore_and_fire.py
@@ -0,0 +1,333 @@
+# -*- coding: utf-8 -*-
+#
+# scaling-ignore_and_fire.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+"""
+Scaling experiment
+------------------
+
+* demonstrates scaling experiments with a plastic two-population network,
+* illustrates problems arising with standard ``integrate-and-fire`` dynamics, and
+* describes how to perform exact scaling experiments by using ``ignore_and_fire`` neurons.
+"""
+
+import json
+import os
+import sys
+import time
+
+import matplotlib.pyplot as plt
+import model
+import numpy
+from matplotlib import gridspec
+
+##########################################################
+# parameters
+neuron_models = ["iaf_psc_alpha", "ignore_and_fire"] # neuron models
+Ns = numpy.arange(1250, 15000, 1250) # network sizes
+
+data_path_root = "./data/" # root of path to simulation data
+
+simulate = True
+# simulate = False
+analyse = True
+# analyse = False
+
+
+##########################################################
+# network construction and simulation
+def run_model(neuron_model, N, data_path_root):
+ """
+ Builds an instance of the model for a given network size and neuron model,
+ simulates it, records spiking activity and synaptic weight data, measures
+ model construction and simulation times, and stores this data in a json file.
+ """
+
+ parameters = model.get_default_parameters()
+
+ # overwrite default parameters
+ parameters["record_spikes"] = True
+ parameters["neuron_model"] = neuron_model # choose model flavor
+ parameters["N"] = N # specify scale (network sizes)
+ parameters["data_path"] = data_path_root + "/N" + str(N)
+
+ # create and simulate network
+ model_instance = model.Model(parameters)
+
+ start = time.time()
+ model_instance.create()
+ model_instance.connect()
+ stop = time.time()
+ build_time = stop - start
+
+ start = time.time()
+ model_instance.simulate(model_instance.pars["T"])
+ stop = time.time()
+ sim_time = stop - start
+
+ # store model instance parameters
+ model_instance.save_parameters("model_instance_parameters", model_instance.pars["data_path"])
+
+ # record connectivity at end of simulation
+ subset_size = 200 # number of pre- and post-synaptic neurons weights are extracted from
+ pop_pre = model_instance.nodes["pop_E"][:subset_size]
+ pop_post = model_instance.nodes["pop_E"][:subset_size]
+ C = model_instance.get_connectivity(
+ pop_pre, pop_post, model_instance.pars["data_path"] + "/" + "connectivity_postsim.dat"
+ )
+
+ # data analysis
+ # time and population averaged firing rate
+ spikes = model.load_spike_data(
+ model_instance.pars["data_path"], "spikes-%d" % (numpy.array(model_instance.nodes["spike_recorder"])[0])
+ )
+ rate = time_and_population_averaged_spike_rate(
+ spikes, (0.0, model_instance.pars["T"]), model_instance.pars["N_rec_spikes"]
+ )
+
+ # synaptic weight statistics after simulation
+ connectivity_postsim = model.load_connectivity_data(model_instance.pars["data_path"], "connectivity_postsim")
+ weight_stats = data_statistics(connectivity_postsim[:, 2])
+ # weights = numpy.arange(0.,150.1,0.5)
+ # whist_postsim = model.get_weight_distribution(connectivity_postsim,weights)
+ # print(whist_postsim)
+
+ ##############
+ print()
+ print("model: %s" % model_instance.pars["neuron_model"])
+ print("\tN = %d" % model_instance.pars["N"])
+ print("\t\taverage firing rate: nu=%.2f/s" % (rate))
+ print(
+ "\t\tweight distribution [min,mean,median,sd,max] = [%.1f, %.1f, %.1f, %.1f, %.1f] pA"
+ % (weight_stats["min"], weight_stats["mean"], weight_stats["median"], weight_stats["sd"], weight_stats["max"])
+ )
+ print()
+ ##############
+
+ data = {}
+ data["build_time"] = build_time
+ data["sim_time"] = sim_time
+ data["rate"] = rate
+ data["weight_mean"] = weight_stats["mean"]
+ data["weight_sd"] = weight_stats["sd"]
+ data["weight_min"] = weight_stats["min"]
+ data["weight_max"] = weight_stats["max"]
+ data["weight_median"] = weight_stats["median"]
+
+ save_dict_as_json(data, "data", model_instance.pars["data_path"])
+
+ return data
+
+
+##########################################################
+
+
+def time_and_population_averaged_spike_rate(spikes, time_interval, pop_size):
+ """
+ Calculates the time and population averaged firing rate for an ensemble
+ of spike trains within a specified time interval.
+ """
+ D = time_interval[1] - time_interval[0]
+ n_events = sum((spikes[:, 1] >= time_interval[0]) * (spikes[:, 1] <= time_interval[1]))
+ rate = n_events / D * 1000.0 / pop_size
+ return rate
+
+
+##########################################################
+def data_statistics(data):
+ """
+ Calculates and collet simple statistics for a list of data samples.
+ """
+ stats = {}
+ stats["mean"] = numpy.nanmean(data)
+ stats["sd"] = numpy.nanstd(data)
+ stats["min"] = numpy.nanmin(data)
+ stats["max"] = numpy.nanmax(data)
+ stats["median"] = numpy.nanmedian(data)
+ return stats
+
+
+##############################################
+def save_dict_as_json(data_dict, filename_root, path):
+ """
+ Save python dictionary as json file.
+
+ Arguments
+ ---------
+ data_dict: dict
+ Data dictionary.
+
+ filename_root: str
+ Root of file name.
+
+ path: str
+ File path
+
+ """
+
+ json.dump(data_dict, open("%s/%s.json" % (path, filename_root), "w"))
+
+
+def load_data(neuron_model, N, data_path_root):
+ """
+ Loads a data dictionary from a json file.
+ """
+ data = {}
+
+ # read data from json file
+ data_path = data_path_root + "/N" + str(N) + "/" + neuron_model
+ with open(data_path + "/data.json") as f:
+ data = json.load(f)
+
+ return data
+
+
+if analyse:
+ build_time = numpy.zeros((len(neuron_models), len(Ns)))
+ sim_time = numpy.zeros_like(build_time)
+ rate = numpy.zeros_like(build_time)
+ weight_mean = numpy.zeros_like(build_time)
+ weight_sd = numpy.zeros_like(build_time)
+ weight_min = numpy.zeros_like(build_time)
+ weight_max = numpy.zeros_like(build_time)
+ weight_median = numpy.zeros_like(build_time)
+
+for cm, neuron_model in enumerate(neuron_models):
+ print("%s\n" % neuron_model)
+
+ for cN, N in enumerate(Ns):
+ print("N = %d" % N)
+
+ if simulate:
+ data = run_model(neuron_model, int(N), data_path_root)
+
+ if analyse:
+ data = load_data(neuron_model, int(N), data_path_root)
+
+ build_time[cm, cN] = data["build_time"]
+ sim_time[cm, cN] = data["sim_time"]
+ rate[cm, cN] = data["rate"]
+ weight_mean[cm, cN] = data["weight_mean"]
+ weight_sd[cm, cN] = data["weight_sd"]
+ weight_min[cm, cN] = data["weight_min"]
+ weight_max[cm, cN] = data["weight_max"]
+ weight_median[cm, cN] = data["weight_median"]
+
+ print()
+ print()
+
+if analyse:
+ # plotting
+
+ from matplotlib import rcParams
+
+ rcParams["figure.figsize"] = (3, 4)
+ rcParams["figure.dpi"] = 300
+ rcParams["font.family"] = "sans-serif"
+ rcParams["font.size"] = 8
+ rcParams["legend.fontsize"] = 8
+ rcParams["axes.titlesize"] = 8
+ rcParams["axes.labelsize"] = 8
+ rcParams["ytick.labelsize"] = 8
+ rcParams["xtick.labelsize"] = 8
+ rcParams["text.usetex"] = True
+
+ fig = plt.figure(num=3)
+ plt.clf()
+
+ gs = gridspec.GridSpec(3, 1)
+
+ # build and sim times
+ ax1 = fig.add_subplot(gs[0, 0])
+
+ # firing rate
+ ax2 = fig.add_subplot(gs[1, 0])
+
+ # weight stat
+ ax3 = fig.add_subplot(gs[2, 0])
+
+ ms = 4
+ lw = 2
+ clrs = ["0", "0.8"]
+
+ for cm, neuron_model in enumerate(neuron_models):
+ # sim time
+ ax1.plot(
+ Ns,
+ sim_time[cm, :],
+ "-o",
+ mfc=clrs[cm],
+ mec=clrs[cm],
+ ms=ms,
+ lw=lw,
+ color=clrs[cm],
+ label=r"\texttt{%s}" % neuron_model,
+ )
+ ax1.set_xlim(Ns[0], Ns[-1])
+ ax1.set_xticklabels([])
+ ax1.set_ylabel(r"simulation time (s)")
+ # ax1.set_title(r"fixed in-degree $K=1250$")
+ ax1.legend(loc=2)
+
+ # firing rate
+ ax2.plot(
+ Ns,
+ rate[cm, :],
+ "-o",
+ mfc=clrs[cm],
+ mec=clrs[cm],
+ ms=ms,
+ lw=lw,
+ color=clrs[cm],
+ label=r"\texttt{%s}" % neuron_model,
+ )
+ ax2.set_xlim(Ns[0], Ns[-1])
+ ax2.set_xticklabels([])
+ ax2.set_ylim(0.5, 2)
+ ax2.set_ylabel(r"firing rate (1/s)")
+ # ax2.legend(loc=1)
+
+ # weight stat
+ if cm == 0:
+ lbl1 = "mean"
+ lbl2 = "mean + SD"
+ else:
+ lbl1 = ""
+ lbl2 = ""
+
+ ax3.plot(Ns, weight_mean[cm, :], "-o", mfc=clrs[cm], mec=clrs[cm], lw=lw, color=clrs[cm], ms=ms, label=lbl1)
+ ax3.plot(
+ Ns,
+ weight_mean[cm, :] + weight_sd[cm, :],
+ "--",
+ mfc=clrs[cm],
+ mec=clrs[cm],
+ lw=lw,
+ color=clrs[cm],
+ ms=ms,
+ label=lbl2,
+ )
+ ax3.set_xlim(Ns[0], Ns[-1])
+ ax3.set_ylim(10, 100)
+ ax3.set_xlabel(r"network size $N$")
+ ax3.set_ylabel(r"synaptic weight (pA)")
+ ax3.legend(loc=1)
+
+ plt.subplots_adjust(left=0.17, bottom=0.1, right=0.95, top=0.95, hspace=0.1)
+ plt.savefig("figures/scaling.png")
diff --git a/pynest/examples/ignore_and_fire/simulation_details.rst b/pynest/examples/ignore_and_fire/simulation_details.rst
new file mode 100644
index 0000000000..9cb166d5d3
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/simulation_details.rst
@@ -0,0 +1,51 @@
+Simulation details
+==================
+
+By default, this implementation is based on the LIF neuron,
+:doc:`iaf_psc_alpha ` and the
+:doc:`stdp_pl_synapse_hom `
+synapse models provided in NEST. With
+``pars['neuron_model']='ignore_and_fire'``, the model is configured into
+a truly scalable mode where the ``integrate-and-fire`` dynamics are replaced
+by an ``ignore_and_fire`` dynamics, while the plasticity dynamics
+remains intact.
+
+The network is connected according to the
+``fixed_indegree`` :ref:`connection rule ` in NEST.
+
+The neuron dynamics is propagated in time using exact integration
+based on Rotter and Diesmann [1]_ with a simulation step
+size :math:`\Delta{}t`. The synapse dynamics is updated in an
+event-based manner as described by Morrison et
+al. [2]_.
+
+
+ .. The model implementation runs with `NEST 3.6 `__ and `NESTML 6.0.0 `__.
+
+Simulation parameters
+---------------------
+
++-----------------------+---------------------------------+-----------------------------+
+| Name | Value | Description |
++=======================+=================================+=============================+
+| :math:`T` | :math:`1000\,\text{ms}` | total simulation time |
++-----------------------+---------------------------------+-----------------------------+
+| :math:`\Delta{}t` | :math:`2^{-3}=0.125\,\text{ms}` | duration pof simulation step|
++-----------------------+---------------------------------+-----------------------------+
+| ``tics_per_step`` | :math:`2^7=128` | number of tics per |
+| | | simulation step |
+| | | :math:`\Delta{t}` |
+| | | (time resolution) |
++-----------------------+---------------------------------+-----------------------------+
+
+References
+----------
+
+.. [1] Rotter & Diesmann (1999). Exact digital simulation of time-invariant
+ linear systems with applications to neuronal modeling. Biological
+ Cybernetics 81(5-6):381-402.
+ doi:10.1007/s004220050570 https://doi.org/10.1007/s004220050570
+
+.. [2] Morrison et al. (2007). Spike-timing dependent plasticity in
+ balanced random networks. Neural Computation
+ 19:1437-1467 10.1162/neco.2007.19.6.1437
diff --git a/pynest/examples/ignore_and_fire/twopop_stdp_network_model.rst b/pynest/examples/ignore_and_fire/twopop_stdp_network_model.rst
new file mode 100644
index 0000000000..ad961e3794
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/twopop_stdp_network_model.rst
@@ -0,0 +1,608 @@
+.. _sec_model_description:
+
+Two population STDP network model
+=================================
+
+Network model
+-------------
+
+In this example, we employ a simple network model describing the dynamics of a local cortical circuit at the spatial scale of
+~1mm within a single cortical layer. It is derived from the model proposed in [1]_,
+but accounts for the synaptic weight dynamics for connections between excitatory neurons. The weight dynamics are described
+by the spike-timing-dependent plasticity (STDP) model derived in [2]_.
+This network model is used as the basis for scaling experiments comparing the model ``ignore-and-fire`` with the ``iaf_psc_alpha``.
+You can find
+
+Summary of network model
+------------------------
+
+.. list-table::
+ :stub-columns: 1
+
+ * - **Populations**
+ - excitatory population :math:`\mathcal{E}`, inhibitory population :math:`\mathcal{I}`, external Poissonian spike sources :math:`\mathcal{X}`
+ * - **Connectivity**
+ - sparse random connectivity respecting Dale’s principle
+ * - **Neurons**
+ - leaky integrate-and-fire (LIF)
+ * - **Synapses**
+ - linear input integration with alpha-function-shaped postsynaptic currents (PSCs), spike-timing dependent plasticity (STDP) for connections between excitatory neurons
+ * - **Input**
+ - stationary, uncorrelated Poissonian spike trains
+
+ * -
+ - .. figure:: /static/img/NetworkSketch_TwoPopulationNetworkPlastic.svg
+
+ Network sketch (see `Fig. 8 `_
+ in Senk et al. [3]_).
+
+Detailed desciption of network model
+------------------------------------
+
+Populations
+~~~~~~~~~~~
+
+.. table::
+
+ +---------------------+----------------------+----------------------------------+
+ | **Name** | **Elements** | **Size** |
+ +=====================+======================+==================================+
+ | :math:`\mathcal{E}` | LIF neurons | :math:`N_\text{E}=\beta{}N` |
+ | | | |
+ +---------------------+----------------------+----------------------------------+
+ | :math:`\mathcal{I}` | LIF neurons | :math:`N_\text{I}=N-N_\text{E}` |
+ | | | |
+ +---------------------+----------------------+----------------------------------+
+ | :math:`\mathcal{X}` | realizations of a | :math:`N` |
+ | | Poisson point | |
+ | | process | |
+ +---------------------+----------------------+----------------------------------+
+
+Connectivity
+~~~~~~~~~~~~
+
+.. table::
+
+ +---------------------+----------------------+---------------------------------------------------------+
+ | **Source** | **Target** | **Pattern** |
+ +=====================+======================+=========================================================+
+ | :math:`\mathcal{E}` | :math:`\mathcal{E}` | - random, |
+ | | | independent; |
+ | | | homogeneous |
+ | | | in-degree |
+ | | | :math:`K_{\text{E},i}=K_\text{E}` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{E}`) |
+ | | | |
+ | | | |
+ | | | - plastic synaptic weights |
+ | | | :math:`J_{ij}(t)` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{E},j\in\mathcal{E}`) |
+ | | | |
+ | | | - homogeneous |
+ | | | |
+ | | | spike-transmission |
+ | | | delays |
+ | | | :math:`d_{ij}=d` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{E},j\in\mathcal{E}`) |
+ | | | |
+ +---------------------+----------------------+---------------------------------------------------------+
+ | :math:`\mathcal{E}` | :math:`\mathcal{I}` | - random, |
+ | | | independent; |
+ | | | homogeneous |
+ | | | in-degree |
+ | | | :math:`K_{\text{E},i}=K_\text{E}` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{I}`) |
+ | | | |
+ | | | |
+ | | | - fixed synaptic |
+ | | | weights |
+ | | | :math:`J_{ij}\in\{0,J\}` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{I},j\in\mathcal{E}`) |
+ | | | |
+ | | | |
+ | | | |
+ | | | - homogeneous |
+ | | | |
+ | | | spike-transmission |
+ | | | delays |
+ | | | :math:`d_{ij}=d` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{I},j\in\mathcal{E}`) |
+ | | | |
+ +---------------------+----------------------+---------------------------------------------------------+
+ | :math:`\mathcal{I}` | :math:`\mathcal \ | - random, |
+ | | {E}\cup\mathcal{I}` | independent; |
+ | | | homogeneous |
+ | | | in-degree |
+ | | | :math:`K_{\text{I},i}=K_\text{I}` |
+ | | | |
+ | | | (:math:`forall{}i\in\mathcal{E}\cup\mathcal{I}`)\ |
+ | | | j\in\mathcal{I}`) |
+ | | | |
+ | | | - fixed synaptic |
+ | | | weights |
+ | | | :math:`J_{ij}\in\{-gJ,0\}` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{E}\cup\mathcal{I}, \ |
+ | | | j\in\mathcal{I}`) |
+ | | | |
+ | | | |
+ | | | - homogeneous |
+ | | | |
+ | | | spike-transmission |
+ | | | delays |
+ | | | :math:`d_{ij}=d` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{E}\cup\mathcal{I}, \ |
+ | | | j\in\mathcal{I}`) |
+ | | | |
+ | | | |
+ +---------------------+----------------------+---------------------------------------------------------+
+ | :math:`\mathcal{X}` | :math:`\mathcal \ | - one-to-one |
+ | | {E}\cup\mathcal{I}` | |
+ | | | - fixed synaptic |
+ | | | weights |
+ | | | :math:`J_{ij}=J` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{E}\cup\mathcal{I}, \ |
+ | | | j\in\mathcal{X}`) |
+ | | | |
+ | | | - homogeneous |
+ | | | |
+ | | | spike-transmission |
+ | | | delays |
+ | | | :math:`d_{ij}=d` |
+ | | | |
+ | | | (:math:`\forall{}i\in\mathcal{E}\cup\mathcal{I}, \ |
+ | | | j\in\mathcal{X}`) |
+ | | | |
+ +---------------------+----------------------+---------------------------------------------------------+
+
+
+
+Neuron
+~~~~~~
+
+.. list-table::
+
+ * - **Leaky integrate-and-fire (iaf) dynamics**
+ - Dynamics of membrane potential :math:`V_{i}(t)` and
+ spiking activity :math:`s_i(t)` of neuro :math:`i\in\left\{1,\ldots,N\right\}`:
+
+ * emission of :math:`k`\ th (:math:`k=1,2,\ldots`) spike of neuron
+ :math:`i` at time :math:`t_{i}^{k}` if
+
+ .. math::
+ V_{i}\left(t_{i}^{k}\right)\geq\theta
+
+ with spike threshold :math:`\theta`
+
+ * reset and refractoriness:
+
+ .. math:: \forall{}k,\ \forall t \in \left(t_{k}^{i},\,t_{k}^{i}+\tau_\text{ref}\right]:\quad V_{i}(t)=V_\text{reset}
+
+ with refractory period :math:`\tau_\text{ref}` and reset potential
+ :math:`V_\text{reset}`
+
+ * spike train :math:`\displaystyle s_i(t)=\sum_k \delta(t-t_i^k)`
+
+ * subthreshold dynamics of membrane potential :math:`V_{i}(t)`:
+
+ .. math::
+
+ \begin{aligned}
+ &\forall{}k,\ \forall t \notin \left[t_{i}^{k},\,t_{i}^{k}+\tau_\text{ref}\right):\\
+ &\qquad\tau_\text{m}\frac{\text{d}{}V_i(t)}{\text{d}{}t} =
+ \Bigl[E_\text{L}-V_i(t)\Bigr]+R_\text{m}I_i(t)
+ \end{aligned}
+
+ with membrane time constant :math:`\tau_\text{m}`, membrane
+ resistance :math:`R_\text{m}`, resting potential :math:`E_\text{L}`,
+ and total synaptic input current :math:`I_i(t)`
+
+
+Synapse: transmission
+~~~~~~~~~~~~~~~~~~~~~
+
+.. list-table::
+
+ * - **Current-based synapses with alpha-function shaped postsynaptic currents (PSCs)**
+
+
+ - Total synaptic input current of neuron :math:`i`
+
+ .. math:: I_i(t)=I_{\text{E},i}(t)+I_{\text{I},i}(t)+I_{\text{X},i}(t)
+
+ * excitatory, inhibitory and external synaptic input currents
+
+ .. math::
+
+ %I_{P,i}(t)=\sum_{j\in\mathcal{P}}(\text{PSC}_{ij}*s_j)(t)
+ %\quad\text{for}\quad
+ %(P,\mathcal{P})\in\{(\exc,\Epop),(\inh,\Ipop),(\ext,\Xpop)\}
+ %,
+ \begin{aligned}
+ I_{\text{E},i}(t)&=\sum_{j\in\mathcal{E}}\bigl(\text{PSC}_{ij}*s_j\bigr)(t-d_{ij})\\
+ I_{\text{I},i}(t)&=\sum_{j\in\mathcal{I}}\bigl(\text{PSC}_{ij}*s_j\bigr)(t-d_{ij})\\
+ I_{\text{X},i}(t)&=\sum_{j\in\mathcal{X}}\bigl(\text{PSC}_{ij}*s_j\bigr)(t-d_{ij})
+ \end{aligned}
+
+ with spike trains :math:`s_j(t)` of local
+ (:math:`j\in\mathcal{E}\cup\mathcal{I}`) and external sources
+ (:math:`j\in\mathcal{X}`), spike transmission delays :math:`d_{ij}`,
+ and convolution operator “:math:`*`”:
+ :math:`\displaystyle\bigl(f*g\bigr)(t)=\int_{-\infty}^\infty\text{d}s\,f(s)g(t-s)`)
+
+ * alpha-function shaped postsynaptic currents
+
+ .. math:: \text{PSC}_{ij}(t)=\hat{I}_{ij}e\tau_\text{s}^{-1}te^{-t/\tau_\text{s}}\Theta(t)
+
+ with synaptic time constant :math:`\tau_\text{s}` and Heaviside
+ function :math:`\Theta(\cdot)`
+
+ * postsynaptic potential triggered by a single presynaptic spike
+
+ .. math::
+
+ \text{PSP}_{ij}(t)=
+ \hat{I}_{ij}\frac{e}{\tau_\text{s}C_\text{m}}
+ \left(\frac{1}{\tau_\text{m}}-\frac{1}{\tau_\text{s}}\right)^{-2}
+ \left(\left(\frac{1}{\tau_\text{m}}-\frac{1}{\tau_\text{s}}\right) t e^{-t/\tau_\text{s}} - e^{-t/\tau_\text{s}} + e^{-t/\tau_\text{m}} \right) \Theta(t)
+
+ * PSC amplitude (synaptic weight)
+
+ .. math::
+
+ \hat{I}_{ij}=\text{max}_t\bigl(\text{PSC}_{ij}(t)\bigr)
+ =\frac{J_{ij}}{J_\text{unit}(\tau_\text{m},\tau_\text{s},C_\text{m})}
+
+ parameterized by PSP amplitude
+ :math:`J_{ij}=\text{max}_t\bigl(\text{PSP}_{ij}(t)\bigr)`
+
+ with unit PSP amplitude (PSP amplitude for :math:`\hat{I}_{ij}=1`):
+
+ .. math::
+
+ J_\text{unit}(\tau_\text{m},\tau_\text{s},C_\text{m})
+ = \frac{e}{C_\text{m}\left(1-\frac{\tau_\text{s}}{\tau_\text{m}}\right)}\left( \frac{e^{-t_\text{max}/\tau_\text{m}} - e^{-t_\text{max}/\tau_\text{s}}}{\frac{1}{\tau_\text{s}} - \frac{1}{\tau_\text{m}}} - t_\text{max}e^{-t_\text{max}/\tau_\text{s}} \right),
+
+ time to PSP maximum
+
+ .. math::
+
+ t_\text{max} =
+ \frac{1}{\frac{1}{\tau_\text{s}} - \frac{1}{\tau_\text{m}}}\left(-W_{-1}\left(\frac{-\tau_\text{s}e^{-\frac{\tau_\text{s}}{\tau_\text{m}}}}{\tau_\text{m}}\right) - \frac{\tau_\text{s}}{\tau_\text{m}}\right),
+
+ and Lambert-W function :math:`\displaystyle W_{-1}(x)` for
+ :math:`\displaystyle x \ge -1/e`
+
+
+
+Synapse: plasticity
+~~~~~~~~~~~~~~~~~~~
+
+.. list-table::
+
+ * - **Spike-timing dependent plasticity (STDP) with power-law weight dependence and all-to-all spike pairing scheme.**
+ See Morrison et al. [2]_ for connections between excitatory neurons.
+
+
+ - Dynamics of synaptic weights :math:`J_{ij}(t)` :math:`\forall{}i\in\mathcal{E}, j\in\mathcal{E}`:
+
+ .. math::
+
+ \begin{aligned}
+ &\forall J_{ij}\ge{}0: \\[1ex]
+ &\quad
+ \frac{\text{d}}{}J_{ij}{\text{d}{}t}=
+ \lambda^+f^+(J_{ij})\sum_k x^+_j(t)\delta\Bigl(t-[t_i^k+d_{ij}]\Bigr)
+ + \lambda^-f^-(J_{ij})\sum_l x^-_i(t)\delta\Big(t-[t_j^l-d_{ij}]\Bigr)\\[1ex]
+ &\forall{}\{t|J_{ij}(t)<0\}: \quad J_{ij}(t)=0 \quad \text{(clipping)}
+ \end{aligned}
+
+ with
+
+ - pre- and postsynaptic spike times :math:`\{t_j^l|l=1,2,\ldots\}` and
+ :math:`\{t_i^k|k=1,2,\ldots\}`,
+
+ - magnitude :math:`\lambda^+=\lambda` of weight update for causal
+ firing (postsynaptic spike following presynaptic spikes:
+ :math:`t_i^k>t_j^l`),
+
+ - magnitude :math:`\lambda^-=-\alpha\lambda` of weight update for
+ acausal firing (presynaptic spike following postsynaptic spikes:
+ :math:`t_i^k
+
+.. [2] Morrison A. Aertsen, A. and Diesmann M. 2007.
+ Spike-timing-dependent plasticity in balanced random networks.
+ Neural Computation. 19(6):1437–1467.
+
+.. [3] Senk J, Kriener B, Djurfeldt M, Voges N, Jiang H-J, Schüttler L, et al. 2022.
+ Connectivity concepts in neuronal network modeling. PLoS Comput Biol 18(9): e1010086.
+ https://doi.org/10.1371/journal.pcbi.1010086
diff --git a/pynest/examples/ignore_and_fire/workflow.rst b/pynest/examples/ignore_and_fire/workflow.rst
new file mode 100644
index 0000000000..7b478b34ab
--- /dev/null
+++ b/pynest/examples/ignore_and_fire/workflow.rst
@@ -0,0 +1,30 @@
+Workflow using Snakemake
+========================
+
+A workflow that will run the scaling experiments comparing ``ignore_and_fire`` with the ``iaf_psc_alpha`` is available as a Snakefile.
+If you want to reproduce the results, you can use this workflow.
+
+1. Ensure :ref:`NEST ` and `snakemake `_ are installed in your active environment
+
+2. Clone the NEST repository if you have not already done so.
+
+ .. code-block:: sh
+
+ git clone git@github.com:/nest-simulator.git
+
+3. In the nest-simulator repository, go to ``pynest/examples/ignore_and_fire/`` directory.
+
+ .. code-block:: sh
+
+ cd nest-simulator/pynest/examples/ignore_and_fire
+
+4. Run ``snakemake -cN`` where N is the number of cores you want to use.
+
+
+Get the :download:`Snakefile`
+
+
+.. seealso::
+
+ * :ref:`sec_model_description`
+ * :doc:`/auto_examples/ignore_and_fire/index`