diff --git a/pynestml/codegeneration/nest_code_generator.py b/pynestml/codegeneration/nest_code_generator.py index cac3bb601..64ad13f17 100644 --- a/pynestml/codegeneration/nest_code_generator.py +++ b/pynestml/codegeneration/nest_code_generator.py @@ -91,7 +91,7 @@ class NESTCodeGenerator(CodeGenerator): Options: - - **neuron_parent_class**: The C++ class from which the generated NESTML neuron class inherits. Examples: ``"ArchivingNode"``, ``"StructuralPlasticityNode"``. Default: ``"ArchivingNode"``. + - **neuron_parent_class**: The C++ class from which the generated NESTML neuron class inherits. Examples: ``"ArchivingNode"``, ``"StructuralPlasticityNode"``. To generate a model that has the smallest memory footprint, use ``"StructuralPlasticityNode"``. To ensure compatibility with the NEST built-in plastic synapses (like the ``stdp_synapse``), choose ``"ArchivingNode"``. Default: ``"ArchivingNode"``. - **neuron_parent_class_include**: The C++ header filename to include that contains **neuron_parent_class**. Default: ``"archiving_node.h"``. - **neuron_synapse_pairs**: List of pairs of (neuron, synapse) model names. - **neuron_models**: List of neuron model names. Instructs the code generator that models with these names are neuron models. @@ -169,6 +169,7 @@ def __init__(self, options: Optional[Mapping[str, Any]] = None): def run_nest_target_specific_cocos(self, neurons: Sequence[ASTModel], synapses: Sequence[ASTModel]): for synapse in synapses: synapse_name_stripped = removesuffix(removesuffix(synapse.name.split("_with_")[0], "_"), FrontendConfiguration.suffix) + assert synapse_name_stripped in self.get_option("delay_variable").keys(), "Please specify a ``delay_variable`` for the synapse '" + synapse_name_stripped + "'" delay_variable = self.get_option("delay_variable")[synapse_name_stripped] CoCoNESTSynapseDelayNotAssignedTo.check_co_co(delay_variable, synapse) if Logger.has_errors(synapse): diff --git a/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronClass.jinja2 b/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronClass.jinja2 index 35c86d69b..1f513d8a7 100644 --- a/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronClass.jinja2 +++ b/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronClass.jinja2 @@ -280,7 +280,7 @@ std::vector< std::tuple< int, int > > {{neuronName}}::rport_to_nestml_buffer_idx {%- if paired_synapse is defined %} n_incoming_ = __n.n_incoming_; max_delay_ = __n.max_delay_; - last_spike_ = __n.last_spike_; + last_spike_nestml_ = __n.last_spike_nestml_; // cache initial values {%- for var_name in transferred_variables %} @@ -386,7 +386,7 @@ void {{neuronName}}::init_state_internal_() // state variables for archiving state for paired synapse n_incoming_ = 0; max_delay_ = 0; - last_spike_ = -1.; + last_spike_nestml_ = -1.; // cache initial values {%- for var_name in transferred_variables %} @@ -1004,19 +1004,23 @@ void inline double {{neuronName}}::get_spiketime_ms() const { - return last_spike_; + return last_spike_nestml_; } void {{neuronName}}::register_stdp_connection( double t_first_read, double delay ) { +{%- if neuron_parent_class in ["ArchivingNode", "Archiving_Node"] %} + {{ neuron_parent_class }}::register_stdp_connection(t_first_read, delay); +{%- endif %} + // Mark all entries in the deque, which we will not read in future as read by // this input input, so that we safely increment the incoming number of // connections afterwards without leaving spikes in the history. // For details see bug #218. MH 08-04-22 - for ( std::deque< histentry__{{neuronName}} >::iterator runner = history_.begin(); - runner != history_.end() and ( t_first_read - runner->t_ > -1.0 * nest::kernel().connection_manager.get_stdp_eps() ); + for ( std::deque< histentry__{{neuronName}} >::iterator runner = history_nestml_.begin(); + runner != history_nestml_.end() and ( t_first_read - runner->t_ > -1.0 * nest::kernel().connection_manager.get_stdp_eps() ); ++runner ) { ( runner->access_counter_ )++; @@ -1029,26 +1033,26 @@ void void -{{neuronName}}::get_history__( double t1, +{{neuronName}}::get_history_nestml_( double t1, double t2, std::deque< histentry__{{neuronName}} >::iterator* start, std::deque< histentry__{{neuronName}} >::iterator* finish ) { - *finish = history_.end(); - if ( history_.empty() ) + *finish = history_nestml_.end(); + if ( history_nestml_.empty() ) { *start = *finish; return; } - std::deque< histentry__{{neuronName}} >::reverse_iterator runner = history_.rbegin(); + std::deque< histentry__{{neuronName}} >::reverse_iterator runner = history_nestml_.rbegin(); const double t2_lim = t2 + nest::kernel().connection_manager.get_stdp_eps(); const double t1_lim = t1 + nest::kernel().connection_manager.get_stdp_eps(); - while ( runner != history_.rend() and runner->t_ >= t2_lim ) + while ( runner != history_nestml_.rend() and runner->t_ >= t2_lim ) { ++runner; } *finish = runner.base(); - while ( runner != history_.rend() and runner->t_ >= t1_lim ) + while ( runner != history_nestml_.rend() and runner->t_ >= t1_lim ) { runner->access_counter_++; ++runner; @@ -1076,16 +1080,16 @@ void // STDP synapses, and // - there is another, later spike, that is strictly more than // (min_global_delay + max_delay_ + eps) away from the new spike (at t_sp_ms) - while ( history_.size() > 1 ) + while ( history_nestml_.size() > 1 ) { - const double next_t_sp = history_[ 1 ].t_; + const double next_t_sp = history_nestml_[ 1 ].t_; // Note that ``access_counter`` now has an extra multiplicative factor equal (``n_incoming_``) to the number of trace values that exist, so that spikes are removed from the history only after they have been read out for the sake of computing each trace. // see https://www.frontiersin.org/files/Articles/1382/fncom-04-00141-r1/image_m/fncom-04-00141-g003.jpg (Potjans et al. 2010) - if ( history_.front().access_counter_ >= n_incoming_ * num_transferred_variables + if ( history_nestml_.front().access_counter_ >= n_incoming_ * num_transferred_variables and t_sp_ms - next_t_sp > max_delay_ + nest::Time::delay_steps_to_ms(nest::kernel().connection_manager.get_min_delay()) + nest::kernel().connection_manager.get_stdp_eps() ) { - history_.pop_front(); + history_nestml_.pop_front(); } else { @@ -1093,15 +1097,15 @@ void } } - if (history_.size() > 0) + if (history_nestml_.size() > 0) { - assert(history_.back().t_ == last_spike_); + assert(history_nestml_.back().t_ == last_spike_nestml_); {%- for var in purely_numeric_state_variables_moved|sort %} - {{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_.back().{{var}}_; + {{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_nestml_.back().{{var}}_; {%- endfor %} {%- for var in analytic_state_variables_moved|sort %} - {{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_.back().{{var}}_; + {{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_nestml_.back().{{var}}_; {%- endfor %} } else @@ -1115,13 +1119,13 @@ void } /** - * update state variables transferred from synapse from `last_spike_` to `t_sp_ms` + * update state variables transferred from synapse from `last_spike_nestml_` to `t_sp_ms` * * variables that will be integrated: {{ purely_numeric_state_variables_moved + analytic_state_variables_moved }} **/ const double old___h = V_.__h; - V_.__h = t_sp_ms - last_spike_; + V_.__h = t_sp_ms - last_spike_nestml_; if (V_.__h > 1E-12) { recompute_internal_variables(true); @@ -1155,8 +1159,8 @@ void {{ printer.print(utils.get_variable_by_name(astnode, spike_update.get_variable().get_complete_name())) }} += 1.; {%- endfor %} - last_spike_ = t_sp_ms; - history_.push_back( histentry__{{neuronName}}( last_spike_ + last_spike_nestml_ = t_sp_ms; + history_nestml_.push_back( histentry__{{neuronName}}( last_spike_nestml_ {%- for var in purely_numeric_state_variables_moved|sort %} , get_{{var}}() {%- endfor %} @@ -1168,7 +1172,7 @@ void } else { - last_spike_ = t_sp_ms; + last_spike_nestml_ = t_sp_ms; } } @@ -1176,8 +1180,12 @@ void void {{neuronName}}::clear_history() { - last_spike_ = -1.0; - history_.clear(); +{%- if neuron_parent_class in ["ArchivingNode", "Archiving_Node"] %} + {{ neuron_parent_class }}::clear_history(); +{%- endif %} + + last_spike_nestml_ = -1.0; + history_nestml_.clear(); } @@ -1200,7 +1208,7 @@ double #endif // case when the neuron has not yet spiked - if ( history_.empty() ) + if ( history_nestml_.empty() ) { #ifdef DEBUG std::cout << "{{neuronName}}::get_{{var}}: \thistory empty, returning initial value = " << {{var}}__iv << std::endl; @@ -1210,7 +1218,7 @@ double } // search for the latest post spike in the history buffer that came strictly before `t` - int i = history_.size() - 1; + int i = history_nestml_.size() - 1; double eps = 0.; if ( before_increment ) { @@ -1218,17 +1226,17 @@ double } while ( i >= 0 ) { - if ( t - history_[ i ].t_ >= eps ) + if ( t - history_nestml_[ i ].t_ >= eps ) { #ifdef DEBUG - std::cout<<"{{neuronName}}::get_{{var}}: \tspike occurred at history[i].t_ = " << history_[i].t_ << std::endl; + std::cout<<"{{neuronName}}::get_{{var}}: \tspike occurred at history[i].t_ = " << history_nestml_[i].t_ << std::endl; #endif {%- for var_ in purely_numeric_state_variables_moved %} - {{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_[ i ].{{var_}}_; + {{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_nestml_[ i ].{{var_}}_; {%- endfor %} {%- for var_ in analytic_state_variables_moved %} - {{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_[ i ].{{var_}}_; + {{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_nestml_[ i ].{{var_}}_; {%- endfor %} /** @@ -1237,10 +1245,10 @@ double * variables that will be integrated: {{ purely_numeric_state_variables_moved + analytic_state_variables_moved }} **/ - if ( t - history_[ i ].t_ >= nest::kernel().connection_manager.get_stdp_eps() ) + if ( t - history_nestml_[ i ].t_ >= nest::kernel().connection_manager.get_stdp_eps() ) { const double old___h = V_.__h; - V_.__h = t - history_[i].t_; + V_.__h = t - history_nestml_[i].t_; assert(V_.__h > 0); recompute_internal_variables(true); @@ -1262,13 +1270,13 @@ double } // this case occurs when the trace was requested at a time precisely at that of the first spike in the history - if ( (!before_increment) and t == history_[ 0 ].t_) + if ( (!before_increment) and t == history_nestml_[ 0 ].t_) { {%- for var_ in purely_numeric_state_variables_moved %} - {{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_[ 0 ].{{var_}}_; + {{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_nestml_[ 0 ].{{var_}}_; {%- endfor %} {%- for var_ in analytic_state_variables_moved %} - {{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_[ 0 ].{{var_}}_; + {{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_nestml_[ 0 ].{{var_}}_; {%- endfor %} #ifdef DEBUG diff --git a/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronHeader.jinja2 b/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronHeader.jinja2 index 4be445ce2..8d9eeccf3 100644 --- a/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronHeader.jinja2 +++ b/pynestml/codegeneration/resources_nest/point_neuron/common/NeuronHeader.jinja2 @@ -321,14 +321,9 @@ public: // support for spike archiving /** - * \fn void get_history(long t1, long t2, - * std::deque::iterator* start, - * std::deque::iterator* finish) - * return the spike times (in steps) of spikes which occurred in the range - * (t1,t2]. - * XXX: two underscores to differentiate it from nest::Node::get_history() + * Return the spike times (in steps) of spikes which occurred in the range (t1,t2]. */ - void get_history__( double t1, + void get_history_nestml_( double t1, double t2, std::deque< histentry__{{neuronName}} >::iterator* start, std::deque< histentry__{{neuronName}} >::iterator* finish ); @@ -445,10 +440,10 @@ private: double max_delay_; - double last_spike_; + double last_spike_nestml_; // spiking history needed by stdp synapses - std::deque< histentry__{{neuronName}} > history_; + std::deque< histentry__{{neuronName}} > history_nestml_; // cache for initial values {%- for var in transferred_variables %} diff --git a/pynestml/codegeneration/resources_nest/point_neuron/common/SynapseHeader.h.jinja2 b/pynestml/codegeneration/resources_nest/point_neuron/common/SynapseHeader.h.jinja2 index e50c37573..eebdc3ebd 100644 --- a/pynestml/codegeneration/resources_nest/point_neuron/common/SynapseHeader.h.jinja2 +++ b/pynestml/codegeneration/resources_nest/point_neuron/common/SynapseHeader.h.jinja2 @@ -723,7 +723,7 @@ public: // history[0, ..., t_last_spike - dendritic_delay] have been // incremented by Archiving_Node::register_stdp_connection(). See bug #218 for // details. - __target->get_history__( t_lastspike_ - __dendritic_delay, + __target->get_history_nestml_( t_lastspike_ - __dendritic_delay, __t_spike - __dendritic_delay, &start, &finish ); @@ -1269,7 +1269,7 @@ inline void // get spike history in relevant range (t_last_update, t_trig] from postsyn. neuron std::deque< histentry__{{paired_neuron}} >::iterator start; std::deque< histentry__{{paired_neuron}} >::iterator finish; - static_cast<{{paired_neuron}}*>(get_target(t))->get_history__( t_last_update_ - dendritic_delay, t_trig - dendritic_delay, &start, &finish ); + static_cast<{{paired_neuron}}*>(get_target(t))->get_history_nestml_( t_last_update_ - dendritic_delay, t_trig - dendritic_delay, &start, &finish ); // facilitation due to postsyn. spikes since last update double t0 = t_last_update_; diff --git a/tests/nest_tests/resources/iaf_psc_exp_nonlineardendrite_neuron.nestml b/tests/nest_tests/resources/iaf_psc_exp_nonlineardendrite_neuron.nestml new file mode 100644 index 000000000..38e22066a --- /dev/null +++ b/tests/nest_tests/resources/iaf_psc_exp_nonlineardendrite_neuron.nestml @@ -0,0 +1,125 @@ +""" +iaf_psc_exp_nonlineardendrite_neuron.nestml +########################################### + + +Copyright statement ++++++++++++++++++++ + +This file is part of NEST. + +Copyright (C) 2004 The NEST Initiative + +NEST is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +NEST is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with NEST. If not, see . +""" +model iaf_psc_exp_nonlineardendrite_neuron: + + state: + V_m mV = 0mV # membrane potential in mV + z pA = 0pA # dAP trace + active_dendrite boolean = false + dAP_counts integer = 0 + ref_counts integer = 0 + + equations: + # exponential shaped postsynaptic current kernel + kernel I_kernel1 = exp(-1/tau_syn1*t) + + # alpha shaped postsynaptic current kernel + kernel I_kernel2 = (e/tau_syn2) * t * exp(-t/tau_syn2) + + # exponential shaped postsynaptic current kernel + kernel I_kernel3 = exp(-1/tau_syn3*t) + + # exponential shaped postsynaptic current kernel + kernel I_kernel4 = exp(-1/tau_syn4*t) + + # diff. eq. for membrane potential + recordable inline I_dend pA = convolve(I_kernel2, I_2) * pA + inline I_syn pA = convolve(I_kernel1, I_1) * pA + I_dend - convolve(I_kernel3, I_3) * pA + convolve(I_kernel4, I_4) * pA + I_e + V_m' = -(V_m-E_L)/tau_m + I_syn/C_m + + # diff. eq. for dAP trace + z' = -z/tau_h + + parameters: + C_m pF = 250 pF # capacity of the membrane + tau_m ms = 20 ms # membrane time constant. + tau_syn1 ms = 10 ms # time constant of synaptic current, port 1 + tau_syn2 ms = 10 ms # time constant of synaptic current, port 2 + tau_syn3 ms = 10 ms # time constant of synaptic current, port 3 + tau_syn4 ms = 10 ms # time constant of synaptic current, port 4 + tau_h ms = 400 ms # time constant of the dAP trace + V_th mV = 25 mV # spike threshold + V_reset mV = 0 mV # reset voltage + I_e pA = 0pA # external current. + E_L mV = 0mV # resting potential. + + # dendritic action potential + theta_dAP pA = 60pA # current threshold for a dendritic action potential + I_p pA = 250pA # current clamp value for I_dAP during a dendritic action potential + tau_dAP ms = 60ms # time window over which the dendritic current clamp is active + + # refractory parameters + t_ref ms = 10ms # refractory period + + internals: + dAP_timeout_ticks integer = steps(tau_dAP) + ref_timeout_ticks integer = steps(t_ref) + + input: + I_1 <- spike + I_2 <- spike + I_3 <- spike + I_4 <- spike + + output: + spike + + update: + # solve ODEs + integrate_odes() + + # current-threshold, emit a dendritic action potential + if I_dend > theta_dAP or active_dendrite: + if dAP_counts == 0: + + if active_dendrite == false: + z += 1pA + active_dendrite = true + I_dend = I_p + dAP_counts = dAP_timeout_ticks + else: + I_dend = 0pA + active_dendrite = false + + else: + dAP_counts -= 1 + I_dend = I_p + + # threshold crossing and refractoriness + if ref_counts == 0: + if V_m > V_th: + emit_spike() + ref_counts = ref_timeout_ticks + V_m = V_reset + dAP_counts = 0 + I_dend = 0pA + active_dendrite = false + else: + ref_counts -= 1 + V_m = V_reset + active_dendrite = false + dAP_counts = 0 + I_dend = 0pA diff --git a/tests/nest_tests/resources/stdsp_no_permanence_synapse.nestml b/tests/nest_tests/resources/stdsp_no_permanence_synapse.nestml new file mode 100644 index 000000000..b099d4dfe --- /dev/null +++ b/tests/nest_tests/resources/stdsp_no_permanence_synapse.nestml @@ -0,0 +1,84 @@ +""" +stdsp_no_permanence_synapse.nestml +################################## + + +Copyright statement ++++++++++++++++++++ + +This file is part of NEST. + +Copyright (C) 2004 The NEST Initiative + +NEST is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 2 of the License, or +(at your option) any later version. + +NEST is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with NEST. If not, see . +""" +model stdsp_no_permanence_synapse: + + state: + w real = 1 + t_last_pre_spike ms = -1 ms + + parameters: + d ms = 2.0 ms + lambda real = .01 + lambda_minus real = .01 + tau_tr_pre ms = 20 ms + tau_tr_post ms = 20 ms + tau_perm ms = 18500 ms + mu_plus real = 1 + Wmax real = 100. + Wmin real = 0. + dt_min ms = - 5. ms + dt_max ms = - 50. ms + + equations: + kernel pre_trace_kernel = exp(-t / tau_tr_pre) + inline pre_trace real = convolve(pre_trace_kernel, pre_spikes) + + # all-to-all trace of postsynaptic neuron + kernel post_trace_kernel = exp(-t / tau_tr_post) + inline post_trace real = convolve(post_trace_kernel, post_spikes) + + w' = (Wmin-w) / tau_perm + + input: + pre_spikes <- spike + post_spikes <- spike + z_post pA <- continuous + + output: + spike + + onReceive(post_spikes): + delta_t ms = t_last_pre_spike - ( t + d ) + # potentiate synapse + w_ real = 0. + if delta_t > dt_max and delta_t < dt_min: + w_ = w + Wmax * lambda * ( w / Wmax )**mu_plus * pre_trace + w = min(Wmax, w_) + elif delta_t > dt_min: + w_ = w + Wmax * lambda * ( w / Wmax )**mu_plus * ( pre_trace - exp( delta_t / tau_tr_pre) ) + w = min(Wmax, w_) + + onReceive(pre_spikes): + t_last_pre_spike = t + # depress synapse + w_ real = w - lambda_minus * Wmax + w = max(Wmin, w_) + + # deliver spike to postsynaptic partner + emit_spike(w, d) + + update: + integrate_odes() diff --git a/tests/nest_tests/test_built_in_and_nestml_plastic_synapse.py b/tests/nest_tests/test_built_in_and_nestml_plastic_synapse.py new file mode 100644 index 000000000..6b99fa08f --- /dev/null +++ b/tests/nest_tests/test_built_in_and_nestml_plastic_synapse.py @@ -0,0 +1,143 @@ +# -*- coding: utf-8 -*- +# +# test_built_in_and_nestml_plastic_synapse.py +# +# This file is part of NEST. +# +# Copyright (C) 2004 The NEST Initiative +# +# NEST is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 2 of the License, or +# (at your option) any later version. +# +# NEST is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with NEST. If not, see . + +import os +import os.path + +import nest +import numpy as np +import pytest +from pynestml.codegeneration.nest_tools import NESTTools + +from pynestml.frontend.pynestml_frontend import generate_nest_target + + +@pytest.mark.skipif(NESTTools.detect_nest_version().startswith("v2"), + reason="This test does not support NEST 2") +class TestBuiltInAndNESTMLPlasticSynapse: + r"""Test that synaptic plasticity works with both a NEST built-in plastic synapse and a NESTML custom plastic synapse attached to the same neuron.""" + + neuron_model = "iaf_psc_exp_nonlineardendrite_neuron" + synapse_model = "stdsp_no_permanence_synapse" + + def setup_nest(self): + files = [f"{TestBuiltInAndNESTMLPlasticSynapse.neuron_model}.nestml", + f"{TestBuiltInAndNESTMLPlasticSynapse.synapse_model}.nestml"] + input_path = [os.path.realpath(os.path.join(os.path.dirname(__file__), "resources", s)) for s in files] + + generate_nest_target( + input_path=input_path, + logging_level="DEBUG", + module_name=f"nestml_{TestBuiltInAndNESTMLPlasticSynapse.neuron_model}_{TestBuiltInAndNESTMLPlasticSynapse.synapse_model}_module", + suffix="_nestml", + codegen_opts={ + "neuron_synapse_pairs": [ + { + "neuron": TestBuiltInAndNESTMLPlasticSynapse.neuron_model, + "synapse": TestBuiltInAndNESTMLPlasticSynapse.synapse_model, + "post_ports": ["post_spikes", ["z_post", "z"]], + } + ], + "delay_variable": {"stdsp_no_permanence_synapse": "d"}, + "weight_variable": {"stdsp_no_permanence_synapse": "w"} + } + ) + + def _test_plasticity(self, neuron_model, synapse_model): + + print("testing plasticity for synapse mode " + str(synapse_model)) + + # parameters + Jns = 3000.0 + t_stop = 500.0 # [ms] + initial_weight = 123. + + nest.ResetKernel() + nest.Install(f"nestml_{TestBuiltInAndNESTMLPlasticSynapse.neuron_model}_{TestBuiltInAndNESTMLPlasticSynapse.synapse_model}_module") + + # create pre and post neurons + pre_neuron = nest.Create(neuron_model) + post_neuron = nest.Create(neuron_model) + + syn_spec = { + "synapse_model": synapse_model, + "receptor_type": 1, # external input + "lambda": 1E-3, + "weight": initial_weight, + } + + if synapse_model != "stdp_synapse": + syn_spec["lambda_minus"] = 1E-4 + + # connect pre and post + nest.Connect( + pre_neuron, + post_neuron, + syn_spec=syn_spec, + ) + + # create and connect stimulus source + pre_stimulus = nest.Create( + "spike_generator", {"spike_times": [float(5 * i) for i in range(1, 200)]} + ) + post_stimulus = nest.Create( + "spike_generator", {"spike_times": [float(10 + 5 * i) for i in range(1, 200)]} + ) + sr_pre = nest.Create("spike_recorder") + nest.Connect(pre_neuron, sr_pre) + sr_post = nest.Create("spike_recorder") + nest.Connect(post_neuron, sr_post) + + nest.Connect(pre_stimulus, pre_neuron, syn_spec={"weight": Jns, "receptor_type": 1}) + nest.Connect( + post_stimulus, post_neuron, syn_spec={"weight": Jns, "receptor_type": 1} + ) + + connection_before = nest.GetConnections(synapse_model=synapse_model) + weight_before = connection_before.get("weight") + np.testing.assert_allclose(initial_weight, weight_before) + + print("\nconnections before learning:") + print(connection_before) + + # simulate + nest.Simulate(t_stop) + + connection_after = nest.GetConnections(synapse_model=synapse_model) + weight_after = connection_after.get("weight") + + print("\nconnections after learning:") + print(connection_after) + + assert np.abs(weight_before - weight_after) > 1., "Weight did not change during STDP induction protocol!" + + def test_plasticity(self): + self.setup_nest() + + self._test_plasticity( + neuron_model=f"{self.neuron_model}_nestml__with_{self.synapse_model}_nestml", + synapse_model=f"{self.synapse_model}_nestml__with_{self.neuron_model}_nestml", + ) + + self._test_plasticity( + neuron_model=f"{self.neuron_model}_nestml__with_{self.synapse_model}_nestml", + synapse_model="stdp_synapse", + )