diff --git a/at_time/neuron/example.cpp b/at_time/neuron/example.cpp index 7a73114d..e8926442 100644 --- a/at_time/neuron/example.cpp +++ b/at_time/neuron/example.cpp @@ -550,10 +550,9 @@ namespace neuron { static void nrn_alloc_example(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 1); /*initialize range parameters*/ } diff --git a/cnexp/neuron/cnexp_array.cpp b/cnexp/neuron/cnexp_array.cpp index 9797e947..7e1114fa 100644 --- a/cnexp/neuron/cnexp_array.cpp +++ b/cnexp/neuron/cnexp_array.cpp @@ -568,10 +568,9 @@ namespace neuron { static void nrn_alloc_cnexp_array(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 7); /*initialize range parameters*/ } diff --git a/cnexp/neuron/cnexp_scalar.cpp b/cnexp/neuron/cnexp_scalar.cpp index 7fb2d69a..8cca3c4c 100644 --- a/cnexp/neuron/cnexp_scalar.cpp +++ b/cnexp/neuron/cnexp_scalar.cpp @@ -559,10 +559,9 @@ namespace neuron { static void nrn_alloc_cnexp_scalar(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ } diff --git a/function/neuron/compile_only.cpp b/function/neuron/compile_only.cpp index fc8d51a0..642c4db0 100644 --- a/function/neuron/compile_only.cpp +++ b/function/neuron/compile_only.cpp @@ -556,10 +556,9 @@ namespace neuron { static void nrn_alloc_func_in_breakpoint(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 3); /*initialize range parameters*/ } diff --git a/function/neuron/functions.cpp b/function/neuron/functions.cpp index 879660f9..e124fffd 100644 --- a/function/neuron/functions.cpp +++ b/function/neuron/functions.cpp @@ -553,10 +553,9 @@ namespace neuron { static void nrn_alloc_functions(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 2); /*initialize range parameters*/ } diff --git a/function/neuron/point_functions.cpp b/function/neuron/point_functions.cpp index 12ab24c5..c6854e33 100644 --- a/function/neuron/point_functions.cpp +++ b/function/neuron/point_functions.cpp @@ -553,8 +553,7 @@ namespace neuron { static void nrn_alloc_point_functions(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -562,7 +561,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 2); /*initialize range parameters*/ } diff --git a/function/neuron/recursion.cpp b/function/neuron/recursion.cpp index 16e86fb8..85d86a77 100644 --- a/function/neuron/recursion.cpp +++ b/function/neuron/recursion.cpp @@ -550,10 +550,9 @@ namespace neuron { static void nrn_alloc_recursion(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 1); /*initialize range parameters*/ } diff --git a/global/neuron/read_only.cpp b/global/neuron/read_only.cpp index 17199b7c..8678abef 100644 --- a/global/neuron/read_only.cpp +++ b/global/neuron/read_only.cpp @@ -559,10 +559,9 @@ namespace neuron { static void nrn_alloc_read_only(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ } diff --git a/global/neuron/thread_newton.cpp b/global/neuron/thread_newton.cpp index a0e37863..3d991383 100644 --- a/global/neuron/thread_newton.cpp +++ b/global/neuron/thread_newton.cpp @@ -581,10 +581,9 @@ namespace neuron { static void nrn_alloc_thread_newton(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 5); /*initialize range parameters*/ } diff --git a/global/neuron/thread_variable.cpp b/global/neuron/thread_variable.cpp index ef72cd47..39cafc5a 100644 --- a/global/neuron/thread_variable.cpp +++ b/global/neuron/thread_variable.cpp @@ -597,10 +597,9 @@ namespace neuron { static void nrn_alloc_shared_global(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 5); /*initialize range parameters*/ } diff --git a/global/neuron/top_local.cpp b/global/neuron/top_local.cpp index c9614930..e85c7d55 100644 --- a/global/neuron/top_local.cpp +++ b/global/neuron/top_local.cpp @@ -577,10 +577,9 @@ namespace neuron { static void nrn_alloc_top_local(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ } diff --git a/hodgkin_huxley/neuron/hodhux.cpp b/hodgkin_huxley/neuron/hodhux.cpp index 1f3a130b..d8c3e7bf 100644 --- a/hodgkin_huxley/neuron/hodhux.cpp +++ b/hodgkin_huxley/neuron/hodhux.cpp @@ -628,12 +628,11 @@ namespace neuron { static void nrn_alloc_hodhux(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _ppvar = nrn_prop_datum_alloc(mech_type, 6, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 23); /*initialize range parameters*/ _lmc.template fpfield<0>(_iml) = 0.12; /* gnabar */ @@ -643,11 +642,13 @@ namespace neuron { _nrn_mechanism_access_dparam(_prop) = _ppvar; Symbol * na_sym = hoc_lookup("na_ion"); Prop * na_prop = need_memb(na_sym); + nrn_promote(na_prop, 0, 1); _ppvar[0] = _nrn_mechanism_get_param_handle(na_prop, 0); _ppvar[1] = _nrn_mechanism_get_param_handle(na_prop, 3); _ppvar[2] = _nrn_mechanism_get_param_handle(na_prop, 4); Symbol * k_sym = hoc_lookup("k_ion"); Prop * k_prop = need_memb(k_sym); + nrn_promote(k_prop, 0, 1); _ppvar[3] = _nrn_mechanism_get_param_handle(k_prop, 0); _ppvar[4] = _nrn_mechanism_get_param_handle(k_prop, 3); _ppvar[5] = _nrn_mechanism_get_param_handle(k_prop, 4); diff --git a/kinetic/neuron/X2Y.cpp b/kinetic/neuron/X2Y.cpp index 729023e6..3bc15a2f 100644 --- a/kinetic/neuron/X2Y.cpp +++ b/kinetic/neuron/X2Y.cpp @@ -570,10 +570,9 @@ namespace neuron { static void nrn_alloc_X2Y(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 8); /*initialize range parameters*/ } diff --git a/morphology/neuron/two_radii.cpp b/morphology/neuron/two_radii.cpp index 8003de5f..88d49822 100644 --- a/morphology/neuron/two_radii.cpp +++ b/morphology/neuron/two_radii.cpp @@ -562,12 +562,11 @@ namespace neuron { static void nrn_alloc_two_radii(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ _nrn_mechanism_access_dparam(_prop) = _ppvar; diff --git a/net_event/neuron/receiver.cpp b/net_event/neuron/receiver.cpp index 5a630e30..67dc8ea7 100644 --- a/net_event/neuron/receiver.cpp +++ b/net_event/neuron/receiver.cpp @@ -555,8 +555,7 @@ namespace neuron { static void nrn_alloc_receiver(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -564,7 +563,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 3); /*initialize range parameters*/ } diff --git a/net_event/neuron/spiker.cpp b/net_event/neuron/spiker.cpp index eb1bcae5..17c39f50 100644 --- a/net_event/neuron/spiker.cpp +++ b/net_event/neuron/spiker.cpp @@ -555,8 +555,7 @@ namespace neuron { static void nrn_alloc_spiker(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -564,7 +563,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 3); /*initialize range parameters*/ } diff --git a/net_move/neuron/art_spiker.cpp b/net_move/neuron/art_spiker.cpp index 42b3889e..2f5d6fbf 100644 --- a/net_move/neuron/art_spiker.cpp +++ b/net_move/neuron/art_spiker.cpp @@ -559,8 +559,7 @@ namespace neuron { static void nrn_alloc_art_spiker(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -568,7 +567,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 3, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ } diff --git a/net_move/neuron/spiker.cpp b/net_move/neuron/spiker.cpp index 627e38ee..2c529523 100644 --- a/net_move/neuron/spiker.cpp +++ b/net_move/neuron/spiker.cpp @@ -559,8 +559,7 @@ namespace neuron { static void nrn_alloc_spiker(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -568,7 +567,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 3, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ } diff --git a/net_receive/neuron/NetReceiveCalls.cpp b/net_receive/neuron/NetReceiveCalls.cpp index b5c224da..05e29277 100644 --- a/net_receive/neuron/NetReceiveCalls.cpp +++ b/net_receive/neuron/NetReceiveCalls.cpp @@ -558,8 +558,7 @@ namespace neuron { static void nrn_alloc_NetReceiveCalls(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -567,7 +566,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ } diff --git a/net_receive/neuron/snapsyn.cpp b/net_receive/neuron/snapsyn.cpp index 217f1297..e854f600 100644 --- a/net_receive/neuron/snapsyn.cpp +++ b/net_receive/neuron/snapsyn.cpp @@ -562,8 +562,7 @@ namespace neuron { static void nrn_alloc_SnapSyn(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -571,7 +570,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 6); /*initialize range parameters*/ _lmc.template fpfield<0>(_iml) = 10; /* e */ diff --git a/net_send/neuron/art_toggle.cpp b/net_send/neuron/art_toggle.cpp index 69dcd358..f54502fc 100644 --- a/net_send/neuron/art_toggle.cpp +++ b/net_send/neuron/art_toggle.cpp @@ -556,8 +556,7 @@ namespace neuron { static void nrn_alloc_art_toggle(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -565,7 +564,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 3, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 3); /*initialize range parameters*/ } diff --git a/net_send/neuron/toggle.cpp b/net_send/neuron/toggle.cpp index b830ee7d..ce4271e8 100644 --- a/net_send/neuron/toggle.cpp +++ b/net_send/neuron/toggle.cpp @@ -556,8 +556,7 @@ namespace neuron { static void nrn_alloc_toggle(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -565,7 +564,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 3, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 3); /*initialize range parameters*/ } diff --git a/neuron_variables/neuron/neuron_variables.cpp b/neuron_variables/neuron/neuron_variables.cpp index ad83e830..ac060333 100644 --- a/neuron_variables/neuron/neuron_variables.cpp +++ b/neuron_variables/neuron/neuron_variables.cpp @@ -558,10 +558,9 @@ namespace neuron { static void nrn_alloc_NeuronVariables(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 3); /*initialize range parameters*/ } diff --git a/nonspecific_current/neuron/leonhard.cpp b/nonspecific_current/neuron/leonhard.cpp index a7b0d6a4..0116e378 100644 --- a/nonspecific_current/neuron/leonhard.cpp +++ b/nonspecific_current/neuron/leonhard.cpp @@ -558,10 +558,9 @@ namespace neuron { static void nrn_alloc_leonhard(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ _lmc.template fpfield<0>(_iml) = 0.005; /* c */ diff --git a/parameter/neuron/range_parameter.cpp b/parameter/neuron/range_parameter.cpp index 2a464933..8ad50e0b 100644 --- a/parameter/neuron/range_parameter.cpp +++ b/parameter/neuron/range_parameter.cpp @@ -556,8 +556,7 @@ namespace neuron { static void nrn_alloc_range_parameter(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -565,7 +564,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 3); /*initialize range parameters*/ _lmc.template fpfield<0>(_iml) = 42; /* x */ diff --git a/point_process/neuron/pp.cpp b/point_process/neuron/pp.cpp index 74fc59ba..9e284863 100644 --- a/point_process/neuron/pp.cpp +++ b/point_process/neuron/pp.cpp @@ -550,8 +550,7 @@ namespace neuron { static void nrn_alloc_pp(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -559,7 +558,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 1); /*initialize range parameters*/ } diff --git a/procedure/neuron/point_procedures.cpp b/procedure/neuron/point_procedures.cpp index 2b44512f..51d0fb72 100644 --- a/procedure/neuron/point_procedures.cpp +++ b/procedure/neuron/point_procedures.cpp @@ -553,8 +553,7 @@ namespace neuron { static void nrn_alloc_point_procedures(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -562,7 +561,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 2); /*initialize range parameters*/ } diff --git a/procedure/neuron/procedures.cpp b/procedure/neuron/procedures.cpp index e3106330..833448c3 100644 --- a/procedure/neuron/procedures.cpp +++ b/procedure/neuron/procedures.cpp @@ -553,10 +553,9 @@ namespace neuron { static void nrn_alloc_procedures(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 2); /*initialize range parameters*/ } diff --git a/spike_travel/neuron/expsyn2.cpp b/spike_travel/neuron/expsyn2.cpp index 3435e315..275f5f7a 100644 --- a/spike_travel/neuron/expsyn2.cpp +++ b/spike_travel/neuron/expsyn2.cpp @@ -570,8 +570,7 @@ namespace neuron { static void nrn_alloc_ExpSyn2(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -579,7 +578,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 8); /*initialize range parameters*/ _lmc.template fpfield<0>(_iml) = 0.1; /* tau */ diff --git a/spike_travel/neuron/hodhux.cpp b/spike_travel/neuron/hodhux.cpp index 1f3a130b..d8c3e7bf 100644 --- a/spike_travel/neuron/hodhux.cpp +++ b/spike_travel/neuron/hodhux.cpp @@ -628,12 +628,11 @@ namespace neuron { static void nrn_alloc_hodhux(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _ppvar = nrn_prop_datum_alloc(mech_type, 6, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 23); /*initialize range parameters*/ _lmc.template fpfield<0>(_iml) = 0.12; /* gnabar */ @@ -643,11 +642,13 @@ namespace neuron { _nrn_mechanism_access_dparam(_prop) = _ppvar; Symbol * na_sym = hoc_lookup("na_ion"); Prop * na_prop = need_memb(na_sym); + nrn_promote(na_prop, 0, 1); _ppvar[0] = _nrn_mechanism_get_param_handle(na_prop, 0); _ppvar[1] = _nrn_mechanism_get_param_handle(na_prop, 3); _ppvar[2] = _nrn_mechanism_get_param_handle(na_prop, 4); Symbol * k_sym = hoc_lookup("k_ion"); Prop * k_prop = need_memb(k_sym); + nrn_promote(k_prop, 0, 1); _ppvar[3] = _nrn_mechanism_get_param_handle(k_prop, 0); _ppvar[4] = _nrn_mechanism_get_param_handle(k_prop, 3); _ppvar[5] = _nrn_mechanism_get_param_handle(k_prop, 4); diff --git a/suffix/neuron/no_suffix.cpp b/suffix/neuron/no_suffix.cpp index a86edf4c..716c8a1c 100644 --- a/suffix/neuron/no_suffix.cpp +++ b/suffix/neuron/no_suffix.cpp @@ -553,10 +553,9 @@ namespace neuron { static void nrn_alloc_no_suffix(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 2); /*initialize range parameters*/ } diff --git a/suffix/neuron/point_suffix.cpp b/suffix/neuron/point_suffix.cpp index 947dbdb9..c14cf45f 100644 --- a/suffix/neuron/point_suffix.cpp +++ b/suffix/neuron/point_suffix.cpp @@ -553,8 +553,7 @@ namespace neuron { static void nrn_alloc_point_suffix(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; if (nrn_point_prop_) { _nrn_mechanism_access_alloc_seq(_prop) = _nrn_mechanism_access_alloc_seq(nrn_point_prop_); _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_); @@ -562,7 +561,7 @@ namespace neuron { _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 2); /*initialize range parameters*/ } diff --git a/table/neuron/table.cpp b/table/neuron/table.cpp index 3318d13c..2a92582e 100644 --- a/table/neuron/table.cpp +++ b/table/neuron/table.cpp @@ -582,10 +582,9 @@ namespace neuron { static void nrn_alloc_tbl(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 7); /*initialize range parameters*/ } diff --git a/useion/coreneuron/read_cai.cpp b/useion/coreneuron/read_cai.cpp new file mode 100644 index 00000000..a6cc491f --- /dev/null +++ b/useion/coreneuron/read_cai.cpp @@ -0,0 +1,262 @@ +/********************************************************* +Model Name : read_cai +Filename : read_cai.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "read_cai", + 0, + "x_read_cai", + 0, + 0, + 0 + }; + + + /** all global variables */ + struct read_cai_Store { + int ca_type{}; + int reset{}; + int mech_type{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + read_cai_Store read_cai_global; + + + /** all mechanism instance variables and global variables */ + struct read_cai_Instance { + double* x{}; + double* cai{}; + double* v_unused{}; + const double* ion_cai{}; + const double* ion_cao{}; + read_cai_Store* global{&read_cai_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + static inline int float_variables_size() { + return 3; + } + + + static inline int int_variables_size() { + return 2; + } + + + static inline int get_mech_type() { + return read_cai_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + // Allocate instance structure + static void nrn_private_constructor_read_cai(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new read_cai_Instance{}; + assert(inst->global == &read_cai_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(read_cai_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_read_cai(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &read_cai_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(read_cai_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &read_cai_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(read_cai_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->x = ml->data+0*pnodecount; + inst->cai = ml->data+1*pnodecount; + inst->v_unused = ml->data+2*pnodecount; + inst->ion_cai = nt->_data; + inst->ion_cao = nt->_data; + } + + + + static void nrn_alloc_read_cai(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_read_cai(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_read_cai(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + /** initialize channel */ + void nrn_init_read_cai(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->cai[id] = inst->ion_cai[indexes[0*pnodecount + id]]; + inst->x[id] = inst->cai[id]; + } + } + } + + + /** register channel with the simulator */ + void _read_cai_reg() { + + int mech_type = nrn_get_mechtype("read_cai"); + read_cai_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_read_cai, nullptr, nullptr, nullptr, nrn_init_read_cai, nrn_private_constructor_read_cai, nrn_private_destructor_read_cai, first_pointer_var_index(), 1); + read_cai_global.ca_type = nrn_get_mechtype("ca_ion"); + + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/useion/coreneuron/read_cao.cpp b/useion/coreneuron/read_cao.cpp new file mode 100644 index 00000000..51e9137c --- /dev/null +++ b/useion/coreneuron/read_cao.cpp @@ -0,0 +1,262 @@ +/********************************************************* +Model Name : read_cao +Filename : read_cao.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "read_cao", + 0, + "x_read_cao", + 0, + 0, + 0 + }; + + + /** all global variables */ + struct read_cao_Store { + int ca_type{}; + int reset{}; + int mech_type{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + read_cao_Store read_cao_global; + + + /** all mechanism instance variables and global variables */ + struct read_cao_Instance { + double* x{}; + double* cao{}; + double* v_unused{}; + const double* ion_cao{}; + const double* ion_cai{}; + read_cao_Store* global{&read_cao_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + static inline int float_variables_size() { + return 3; + } + + + static inline int int_variables_size() { + return 2; + } + + + static inline int get_mech_type() { + return read_cao_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + // Allocate instance structure + static void nrn_private_constructor_read_cao(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new read_cao_Instance{}; + assert(inst->global == &read_cao_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(read_cao_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_read_cao(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &read_cao_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(read_cao_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &read_cao_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(read_cao_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->x = ml->data+0*pnodecount; + inst->cao = ml->data+1*pnodecount; + inst->v_unused = ml->data+2*pnodecount; + inst->ion_cao = nt->_data; + inst->ion_cai = nt->_data; + } + + + + static void nrn_alloc_read_cao(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_read_cao(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_read_cao(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + /** initialize channel */ + void nrn_init_read_cao(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->cao[id] = inst->ion_cao[indexes[0*pnodecount + id]]; + inst->x[id] = inst->cao[id]; + } + } + } + + + /** register channel with the simulator */ + void _read_cao_reg() { + + int mech_type = nrn_get_mechtype("read_cao"); + read_cao_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_read_cao, nullptr, nullptr, nullptr, nrn_init_read_cao, nrn_private_constructor_read_cao, nrn_private_destructor_read_cao, first_pointer_var_index(), 1); + read_cao_global.ca_type = nrn_get_mechtype("ca_ion"); + + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/useion/coreneuron/read_eca.cpp b/useion/coreneuron/read_eca.cpp new file mode 100644 index 00000000..f4f01c1c --- /dev/null +++ b/useion/coreneuron/read_eca.cpp @@ -0,0 +1,259 @@ +/********************************************************* +Model Name : read_eca +Filename : read_eca.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "read_eca", + 0, + "x_read_eca", + 0, + 0, + 0 + }; + + + /** all global variables */ + struct read_eca_Store { + int ca_type{}; + int reset{}; + int mech_type{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + read_eca_Store read_eca_global; + + + /** all mechanism instance variables and global variables */ + struct read_eca_Instance { + double* x{}; + double* eca{}; + double* v_unused{}; + const double* ion_eca{}; + read_eca_Store* global{&read_eca_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + static inline int float_variables_size() { + return 3; + } + + + static inline int int_variables_size() { + return 1; + } + + + static inline int get_mech_type() { + return read_eca_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + // Allocate instance structure + static void nrn_private_constructor_read_eca(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new read_eca_Instance{}; + assert(inst->global == &read_eca_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(read_eca_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_read_eca(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &read_eca_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(read_eca_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &read_eca_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(read_eca_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->x = ml->data+0*pnodecount; + inst->eca = ml->data+1*pnodecount; + inst->v_unused = ml->data+2*pnodecount; + inst->ion_eca = nt->_data; + } + + + + static void nrn_alloc_read_eca(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_read_eca(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_read_eca(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + /** initialize channel */ + void nrn_init_read_eca(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->eca[id] = inst->ion_eca[indexes[0*pnodecount + id]]; + inst->x[id] = inst->eca[id]; + } + } + } + + + /** register channel with the simulator */ + void _read_eca_reg() { + + int mech_type = nrn_get_mechtype("read_eca"); + read_eca_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_read_eca, nullptr, nullptr, nullptr, nrn_init_read_eca, nrn_private_constructor_read_eca, nrn_private_destructor_read_eca, first_pointer_var_index(), 1); + read_eca_global.ca_type = nrn_get_mechtype("ca_ion"); + + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/useion/coreneuron/write_cai.cpp b/useion/coreneuron/write_cai.cpp new file mode 100644 index 00000000..54edc7c1 --- /dev/null +++ b/useion/coreneuron/write_cai.cpp @@ -0,0 +1,269 @@ +/********************************************************* +Model Name : write_cai +Filename : write_cai.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "write_cai", + 0, + 0, + 0, + 0 + }; + + + /** all global variables */ + struct write_cai_Store { + int ca_type{}; + int reset{}; + int mech_type{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + write_cai_Store write_cai_global; + + + /** all mechanism instance variables and global variables */ + struct write_cai_Instance { + double* cai{}; + double* v_unused{}; + const double* ion_cao{}; + double* ion_cai{}; + double* ion_ca_erev{}; + const int* style_ca{}; + write_cai_Store* global{&write_cai_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + static inline int float_variables_size() { + return 2; + } + + + static inline int int_variables_size() { + return 4; + } + + + static inline int get_mech_type() { + return write_cai_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + // Allocate instance structure + static void nrn_private_constructor_write_cai(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new write_cai_Instance{}; + assert(inst->global == &write_cai_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(write_cai_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_write_cai(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &write_cai_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(write_cai_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &write_cai_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(write_cai_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->cai = ml->data+0*pnodecount; + inst->v_unused = ml->data+1*pnodecount; + inst->ion_cao = nt->_data; + inst->ion_cai = nt->_data; + inst->ion_ca_erev = nt->_data; + inst->style_ca = ml->pdata; + } + + + + static void nrn_alloc_write_cai(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_write_cai(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_write_cai(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + /** initialize channel */ + void nrn_init_write_cai(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->cai[id] = inst->ion_cai[indexes[1*pnodecount + id]]; + inst->cai[id] = 1124.0; + inst->ion_cai[indexes[1*pnodecount + id]] = inst->cai[id]; + int ca_type = inst->global->ca_type; + nrn_wrote_conc(ca_type, &(inst->ion_cai[indexes[1*pnodecount + id]]), 1, inst->style_ca[3], nrn_ion_global_map, celsius, nt->_ml_list[ca_type]->_nodecount_padded); + } + } + } + + + /** register channel with the simulator */ + void _write_cai_reg() { + + int mech_type = nrn_get_mechtype("write_cai"); + write_cai_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_write_cai, nullptr, nullptr, nullptr, nrn_init_write_cai, nrn_private_constructor_write_cai, nrn_private_destructor_write_cai, first_pointer_var_index(), 1); + write_cai_global.ca_type = nrn_get_mechtype("ca_ion"); + + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 2, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 3, "#ca_ion"); + nrn_writes_conc(mech_type, 0); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/useion/coreneuron/write_cao.cpp b/useion/coreneuron/write_cao.cpp new file mode 100644 index 00000000..dcea85f5 --- /dev/null +++ b/useion/coreneuron/write_cao.cpp @@ -0,0 +1,269 @@ +/********************************************************* +Model Name : write_cao +Filename : write_cao.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "write_cao", + 0, + 0, + 0, + 0 + }; + + + /** all global variables */ + struct write_cao_Store { + int ca_type{}; + int reset{}; + int mech_type{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + write_cao_Store write_cao_global; + + + /** all mechanism instance variables and global variables */ + struct write_cao_Instance { + double* cao{}; + double* v_unused{}; + const double* ion_cai{}; + double* ion_cao{}; + double* ion_ca_erev{}; + const int* style_ca{}; + write_cao_Store* global{&write_cao_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + static inline int float_variables_size() { + return 2; + } + + + static inline int int_variables_size() { + return 4; + } + + + static inline int get_mech_type() { + return write_cao_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + // Allocate instance structure + static void nrn_private_constructor_write_cao(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new write_cao_Instance{}; + assert(inst->global == &write_cao_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(write_cao_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_write_cao(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &write_cao_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(write_cao_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &write_cao_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(write_cao_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->cao = ml->data+0*pnodecount; + inst->v_unused = ml->data+1*pnodecount; + inst->ion_cai = nt->_data; + inst->ion_cao = nt->_data; + inst->ion_ca_erev = nt->_data; + inst->style_ca = ml->pdata; + } + + + + static void nrn_alloc_write_cao(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_write_cao(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_write_cao(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + /** initialize channel */ + void nrn_init_write_cao(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->cao[id] = inst->ion_cao[indexes[1*pnodecount + id]]; + inst->cao[id] = 1124.0; + inst->ion_cao[indexes[1*pnodecount + id]] = inst->cao[id]; + int ca_type = inst->global->ca_type; + nrn_wrote_conc(ca_type, &(inst->ion_cao[indexes[1*pnodecount + id]]), 2, inst->style_ca[3], nrn_ion_global_map, celsius, nt->_ml_list[ca_type]->_nodecount_padded); + } + } + } + + + /** register channel with the simulator */ + void _write_cao_reg() { + + int mech_type = nrn_get_mechtype("write_cao"); + write_cao_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_write_cao, nullptr, nullptr, nullptr, nrn_init_write_cao, nrn_private_constructor_write_cao, nrn_private_destructor_write_cao, first_pointer_var_index(), 1); + write_cao_global.ca_type = nrn_get_mechtype("ca_ion"); + + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 2, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 3, "#ca_ion"); + nrn_writes_conc(mech_type, 0); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/useion/coreneuron/write_eca.cpp b/useion/coreneuron/write_eca.cpp new file mode 100644 index 00000000..143e67a5 --- /dev/null +++ b/useion/coreneuron/write_eca.cpp @@ -0,0 +1,256 @@ +/********************************************************* +Model Name : write_eca +Filename : write_eca.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "write_eca", + 0, + 0, + 0, + 0 + }; + + + /** all global variables */ + struct write_eca_Store { + int ca_type{}; + int reset{}; + int mech_type{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + write_eca_Store write_eca_global; + + + /** all mechanism instance variables and global variables */ + struct write_eca_Instance { + double* eca{}; + double* v_unused{}; + double* ion_eca{}; + write_eca_Store* global{&write_eca_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + static inline int float_variables_size() { + return 2; + } + + + static inline int int_variables_size() { + return 1; + } + + + static inline int get_mech_type() { + return write_eca_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + // Allocate instance structure + static void nrn_private_constructor_write_eca(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new write_eca_Instance{}; + assert(inst->global == &write_eca_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(write_eca_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_write_eca(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &write_eca_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(write_eca_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &write_eca_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(write_eca_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->eca = ml->data+0*pnodecount; + inst->v_unused = ml->data+1*pnodecount; + inst->ion_eca = nt->_data; + } + + + + static void nrn_alloc_write_eca(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_write_eca(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_write_eca(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + /** initialize channel */ + void nrn_init_write_eca(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->eca[id] = 1124.0; + inst->ion_eca[indexes[0*pnodecount + id]] = inst->eca[id]; + } + } + } + + + /** register channel with the simulator */ + void _write_eca_reg() { + + int mech_type = nrn_get_mechtype("write_eca"); + write_eca_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_write_eca, nullptr, nullptr, nullptr, nrn_init_write_eca, nrn_private_constructor_write_eca, nrn_private_destructor_write_eca, first_pointer_var_index(), 1); + write_eca_global.ca_type = nrn_get_mechtype("ca_ion"); + + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/useion/neuron/ionic.cpp b/useion/neuron/ionic.cpp index d8e389a5..5ec184ff 100644 --- a/useion/neuron/ionic.cpp +++ b/useion/neuron/ionic.cpp @@ -561,17 +561,17 @@ namespace neuron { static void nrn_alloc_ionic(Prop* _prop) { - Prop *prop_ion{}; - Datum *_ppvar{}; + Datum *_ppvar = nullptr; _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); _nrn_mechanism_access_dparam(_prop) = _ppvar; _nrn_mechanism_cache_instance _lmc{_prop}; - size_t const _iml{}; + size_t const _iml = 0; assert(_nrn_mechanism_get_num_vars(_prop) == 4); /*initialize range parameters*/ _nrn_mechanism_access_dparam(_prop) = _ppvar; Symbol * na_sym = hoc_lookup("na_ion"); Prop * na_prop = need_memb(na_sym); + nrn_promote(na_prop, 0, 3); _ppvar[0] = _nrn_mechanism_get_param_handle(na_prop, 3); _ppvar[1] = _nrn_mechanism_get_param_handle(na_prop, 0); } diff --git a/useion/neuron/read_cai.cpp b/useion/neuron/read_cai.cpp new file mode 100644 index 00000000..2af2c27e --- /dev/null +++ b/useion/neuron/read_cai.cpp @@ -0,0 +1,674 @@ +/********************************************************* +Model Name : read_cai +Filename : read_cai.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 2; +static constexpr auto number_of_floating_point_variables = 3; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "read_cai", + 0, + "x_read_cai", + 0, + 0, + 0 + }; + + + /* NEURON global variables */ + static Symbol* _ca_sym; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct read_cai_Store { + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + read_cai_Store read_cai_global; + + + /** all mechanism instance variables and global variables */ + struct read_cai_Instance { + double* x{}; + double* cai{}; + double* v_unused{}; + const double* const* ion_cai{}; + const double* const* ion_cao{}; + read_cai_Store* global{&read_cai_global}; + }; + + + struct read_cai_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static read_cai_Instance make_instance_read_cai(_nrn_mechanism_cache_range& _lmc) { + return read_cai_Instance { + _lmc.template fpfield_ptr<0>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template fpfield_ptr<2>(), + _lmc.template dptr_field_ptr<0>(), + _lmc.template dptr_field_ptr<1>() + }; + } + + + static read_cai_NodeData make_node_data_read_cai(NrnThread& nt, Memb_list& _ml_arg) { + return read_cai_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_read_cai(Prop* _prop) { + Datum *_ppvar = nullptr; + _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); + _nrn_mechanism_access_dparam(_prop) = _ppvar; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 3); + /*initialize range parameters*/ + _nrn_mechanism_access_dparam(_prop) = _ppvar; + Symbol * ca_sym = hoc_lookup("ca_ion"); + Prop * ca_prop = need_memb(ca_sym); + nrn_promote(ca_prop, 1, 0); + _ppvar[0] = _nrn_mechanism_get_param_handle(ca_prop, 1); + _ppvar[1] = _nrn_mechanism_get_param_handle(ca_prop, 2); + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_read_cai", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_read_cai(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_read_cai(_lmc); + auto node_data = make_node_data_read_cai(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.cai[id] = (*inst.ion_cai[id]); + inst.x[id] = inst.cai[id]; + } + } + + + static void nrn_jacob_read_cai(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_read_cai(_lmc); + auto node_data = make_node_data_read_cai(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + } + } + + + static void _initlists() { + } + + + /** register channel with the simulator */ + extern "C" void _read_cai_reg() { + _initlists(); + + ion_reg("ca", -10000.); + + _ca_sym = hoc_lookup("ca_ion"); + + register_mech(mechanism_info, nrn_alloc_read_cai, nullptr, nullptr, nullptr, nrn_init_read_cai, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"x"} /* 0 */, + _nrn_mechanism_field{"cai"} /* 1 */, + _nrn_mechanism_field{"v_unused"} /* 2 */, + _nrn_mechanism_field{"ion_cai", "ca_ion"} /* 0 */, + _nrn_mechanism_field{"ion_cao", "ca_ion"} /* 1 */ + ); + + hoc_register_prop_size(mech_type, 3, 2); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +} diff --git a/useion/neuron/read_cao.cpp b/useion/neuron/read_cao.cpp new file mode 100644 index 00000000..0f07b050 --- /dev/null +++ b/useion/neuron/read_cao.cpp @@ -0,0 +1,674 @@ +/********************************************************* +Model Name : read_cao +Filename : read_cao.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 2; +static constexpr auto number_of_floating_point_variables = 3; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "read_cao", + 0, + "x_read_cao", + 0, + 0, + 0 + }; + + + /* NEURON global variables */ + static Symbol* _ca_sym; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct read_cao_Store { + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + read_cao_Store read_cao_global; + + + /** all mechanism instance variables and global variables */ + struct read_cao_Instance { + double* x{}; + double* cao{}; + double* v_unused{}; + const double* const* ion_cao{}; + const double* const* ion_cai{}; + read_cao_Store* global{&read_cao_global}; + }; + + + struct read_cao_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static read_cao_Instance make_instance_read_cao(_nrn_mechanism_cache_range& _lmc) { + return read_cao_Instance { + _lmc.template fpfield_ptr<0>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template fpfield_ptr<2>(), + _lmc.template dptr_field_ptr<0>(), + _lmc.template dptr_field_ptr<1>() + }; + } + + + static read_cao_NodeData make_node_data_read_cao(NrnThread& nt, Memb_list& _ml_arg) { + return read_cao_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_read_cao(Prop* _prop) { + Datum *_ppvar = nullptr; + _ppvar = nrn_prop_datum_alloc(mech_type, 2, _prop); + _nrn_mechanism_access_dparam(_prop) = _ppvar; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 3); + /*initialize range parameters*/ + _nrn_mechanism_access_dparam(_prop) = _ppvar; + Symbol * ca_sym = hoc_lookup("ca_ion"); + Prop * ca_prop = need_memb(ca_sym); + nrn_promote(ca_prop, 1, 0); + _ppvar[0] = _nrn_mechanism_get_param_handle(ca_prop, 2); + _ppvar[1] = _nrn_mechanism_get_param_handle(ca_prop, 1); + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_read_cao", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_read_cao(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_read_cao(_lmc); + auto node_data = make_node_data_read_cao(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.cao[id] = (*inst.ion_cao[id]); + inst.x[id] = inst.cao[id]; + } + } + + + static void nrn_jacob_read_cao(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_read_cao(_lmc); + auto node_data = make_node_data_read_cao(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + } + } + + + static void _initlists() { + } + + + /** register channel with the simulator */ + extern "C" void _read_cao_reg() { + _initlists(); + + ion_reg("ca", -10000.); + + _ca_sym = hoc_lookup("ca_ion"); + + register_mech(mechanism_info, nrn_alloc_read_cao, nullptr, nullptr, nullptr, nrn_init_read_cao, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"x"} /* 0 */, + _nrn_mechanism_field{"cao"} /* 1 */, + _nrn_mechanism_field{"v_unused"} /* 2 */, + _nrn_mechanism_field{"ion_cao", "ca_ion"} /* 0 */, + _nrn_mechanism_field{"ion_cai", "ca_ion"} /* 1 */ + ); + + hoc_register_prop_size(mech_type, 3, 2); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +} diff --git a/useion/neuron/read_eca.cpp b/useion/neuron/read_eca.cpp new file mode 100644 index 00000000..3fce0b19 --- /dev/null +++ b/useion/neuron/read_eca.cpp @@ -0,0 +1,669 @@ +/********************************************************* +Model Name : read_eca +Filename : read_eca.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 1; +static constexpr auto number_of_floating_point_variables = 3; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "read_eca", + 0, + "x_read_eca", + 0, + 0, + 0 + }; + + + /* NEURON global variables */ + static Symbol* _ca_sym; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct read_eca_Store { + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + read_eca_Store read_eca_global; + + + /** all mechanism instance variables and global variables */ + struct read_eca_Instance { + double* x{}; + double* eca{}; + double* v_unused{}; + const double* const* ion_eca{}; + read_eca_Store* global{&read_eca_global}; + }; + + + struct read_eca_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static read_eca_Instance make_instance_read_eca(_nrn_mechanism_cache_range& _lmc) { + return read_eca_Instance { + _lmc.template fpfield_ptr<0>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template fpfield_ptr<2>(), + _lmc.template dptr_field_ptr<0>() + }; + } + + + static read_eca_NodeData make_node_data_read_eca(NrnThread& nt, Memb_list& _ml_arg) { + return read_eca_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_read_eca(Prop* _prop) { + Datum *_ppvar = nullptr; + _ppvar = nrn_prop_datum_alloc(mech_type, 1, _prop); + _nrn_mechanism_access_dparam(_prop) = _ppvar; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 3); + /*initialize range parameters*/ + _nrn_mechanism_access_dparam(_prop) = _ppvar; + Symbol * ca_sym = hoc_lookup("ca_ion"); + Prop * ca_prop = need_memb(ca_sym); + nrn_promote(ca_prop, 0, 1); + _ppvar[0] = _nrn_mechanism_get_param_handle(ca_prop, 0); + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_read_eca", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_read_eca(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_read_eca(_lmc); + auto node_data = make_node_data_read_eca(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.eca[id] = (*inst.ion_eca[id]); + inst.x[id] = inst.eca[id]; + } + } + + + static void nrn_jacob_read_eca(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_read_eca(_lmc); + auto node_data = make_node_data_read_eca(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + } + } + + + static void _initlists() { + } + + + /** register channel with the simulator */ + extern "C" void _read_eca_reg() { + _initlists(); + + ion_reg("ca", -10000.); + + _ca_sym = hoc_lookup("ca_ion"); + + register_mech(mechanism_info, nrn_alloc_read_eca, nullptr, nullptr, nullptr, nrn_init_read_eca, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"x"} /* 0 */, + _nrn_mechanism_field{"eca"} /* 1 */, + _nrn_mechanism_field{"v_unused"} /* 2 */, + _nrn_mechanism_field{"ion_eca", "ca_ion"} /* 0 */ + ); + + hoc_register_prop_size(mech_type, 3, 1); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +} diff --git a/useion/neuron/write_cai.cpp b/useion/neuron/write_cai.cpp new file mode 100644 index 00000000..1e4c544d --- /dev/null +++ b/useion/neuron/write_cai.cpp @@ -0,0 +1,684 @@ +/********************************************************* +Model Name : write_cai +Filename : write_cai.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 4; +static constexpr auto number_of_floating_point_variables = 2; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "write_cai", + 0, + 0, + 0, + 0 + }; + + + /* NEURON global variables */ + static Symbol* _ca_sym; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct write_cai_Store { + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + write_cai_Store write_cai_global; + + + /** all mechanism instance variables and global variables */ + struct write_cai_Instance { + double* cai{}; + double* v_unused{}; + const double* const* ion_cao{}; + double* const* ion_cai{}; + double* const* ion_ca_erev{}; + const int* const* style_ca{}; + write_cai_Store* global{&write_cai_global}; + }; + + + struct write_cai_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static write_cai_Instance make_instance_write_cai(_nrn_mechanism_cache_range& _lmc) { + return write_cai_Instance { + _lmc.template fpfield_ptr<0>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template dptr_field_ptr<0>(), + _lmc.template dptr_field_ptr<1>(), + _lmc.template dptr_field_ptr<2>() + }; + } + + + static write_cai_NodeData make_node_data_write_cai(NrnThread& nt, Memb_list& _ml_arg) { + return write_cai_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_write_cai(Prop* _prop) { + Datum *_ppvar = nullptr; + _ppvar = nrn_prop_datum_alloc(mech_type, 4, _prop); + _nrn_mechanism_access_dparam(_prop) = _ppvar; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 2); + /*initialize range parameters*/ + _nrn_mechanism_access_dparam(_prop) = _ppvar; + Symbol * ca_sym = hoc_lookup("ca_ion"); + Prop * ca_prop = need_memb(ca_sym); + nrn_check_conc_write(_prop, ca_prop, 1); + nrn_promote(ca_prop, 3, 0); + _ppvar[0] = _nrn_mechanism_get_param_handle(ca_prop, 2); + _ppvar[1] = _nrn_mechanism_get_param_handle(ca_prop, 1); + _ppvar[2] = _nrn_mechanism_get_param_handle(ca_prop, 0); + _ppvar[3] = {neuron::container::do_not_search, &(_nrn_mechanism_access_dparam(ca_prop)[0].literal_value())}; + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_write_cai", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_write_cai(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_write_cai(_lmc); + auto node_data = make_node_data_write_cai(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.cai[id] = (*inst.ion_cai[id]); + inst.cai[id] = 1124.0; + (*inst.ion_cai[id]) = inst.cai[id]; + int _style_ca = *(_ppvar[3].get()); + nrn_wrote_conc(_ca_sym, (*inst.ion_ca_erev[id]), (*inst.ion_cai[id]), (*inst.ion_cao[id]), _style_ca); + } + } + + + static void nrn_jacob_write_cai(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_write_cai(_lmc); + auto node_data = make_node_data_write_cai(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + } + } + + + static void _initlists() { + } + + + /** register channel with the simulator */ + extern "C" void _write_cai_reg() { + _initlists(); + + ion_reg("ca", -10000.); + + _ca_sym = hoc_lookup("ca_ion"); + + register_mech(mechanism_info, nrn_alloc_write_cai, nullptr, nullptr, nullptr, nrn_init_write_cai, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"cai"} /* 0 */, + _nrn_mechanism_field{"v_unused"} /* 1 */, + _nrn_mechanism_field{"ion_cao", "ca_ion"} /* 0 */, + _nrn_mechanism_field{"ion_cai", "ca_ion"} /* 1 */, + _nrn_mechanism_field{"ion_ca_erev", "ca_ion"} /* 2 */, + _nrn_mechanism_field{"style_ca", "#ca_ion"} /* 3 */ + ); + + hoc_register_prop_size(mech_type, 2, 4); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 2, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 3, "#ca_ion"); + nrn_writes_conc(mech_type, 0); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +} diff --git a/useion/neuron/write_cao.cpp b/useion/neuron/write_cao.cpp new file mode 100644 index 00000000..8ec60110 --- /dev/null +++ b/useion/neuron/write_cao.cpp @@ -0,0 +1,684 @@ +/********************************************************* +Model Name : write_cao +Filename : write_cao.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 4; +static constexpr auto number_of_floating_point_variables = 2; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "write_cao", + 0, + 0, + 0, + 0 + }; + + + /* NEURON global variables */ + static Symbol* _ca_sym; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct write_cao_Store { + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + write_cao_Store write_cao_global; + + + /** all mechanism instance variables and global variables */ + struct write_cao_Instance { + double* cao{}; + double* v_unused{}; + const double* const* ion_cai{}; + double* const* ion_cao{}; + double* const* ion_ca_erev{}; + const int* const* style_ca{}; + write_cao_Store* global{&write_cao_global}; + }; + + + struct write_cao_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static write_cao_Instance make_instance_write_cao(_nrn_mechanism_cache_range& _lmc) { + return write_cao_Instance { + _lmc.template fpfield_ptr<0>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template dptr_field_ptr<0>(), + _lmc.template dptr_field_ptr<1>(), + _lmc.template dptr_field_ptr<2>() + }; + } + + + static write_cao_NodeData make_node_data_write_cao(NrnThread& nt, Memb_list& _ml_arg) { + return write_cao_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_write_cao(Prop* _prop) { + Datum *_ppvar = nullptr; + _ppvar = nrn_prop_datum_alloc(mech_type, 4, _prop); + _nrn_mechanism_access_dparam(_prop) = _ppvar; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 2); + /*initialize range parameters*/ + _nrn_mechanism_access_dparam(_prop) = _ppvar; + Symbol * ca_sym = hoc_lookup("ca_ion"); + Prop * ca_prop = need_memb(ca_sym); + nrn_check_conc_write(_prop, ca_prop, 0); + nrn_promote(ca_prop, 3, 0); + _ppvar[0] = _nrn_mechanism_get_param_handle(ca_prop, 1); + _ppvar[1] = _nrn_mechanism_get_param_handle(ca_prop, 2); + _ppvar[2] = _nrn_mechanism_get_param_handle(ca_prop, 0); + _ppvar[3] = {neuron::container::do_not_search, &(_nrn_mechanism_access_dparam(ca_prop)[0].literal_value())}; + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_write_cao", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_write_cao(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_write_cao(_lmc); + auto node_data = make_node_data_write_cao(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.cao[id] = (*inst.ion_cao[id]); + inst.cao[id] = 1124.0; + (*inst.ion_cao[id]) = inst.cao[id]; + int _style_ca = *(_ppvar[3].get()); + nrn_wrote_conc(_ca_sym, (*inst.ion_ca_erev[id]), (*inst.ion_cai[id]), (*inst.ion_cao[id]), _style_ca); + } + } + + + static void nrn_jacob_write_cao(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_write_cao(_lmc); + auto node_data = make_node_data_write_cao(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + } + } + + + static void _initlists() { + } + + + /** register channel with the simulator */ + extern "C" void _write_cao_reg() { + _initlists(); + + ion_reg("ca", -10000.); + + _ca_sym = hoc_lookup("ca_ion"); + + register_mech(mechanism_info, nrn_alloc_write_cao, nullptr, nullptr, nullptr, nrn_init_write_cao, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"cao"} /* 0 */, + _nrn_mechanism_field{"v_unused"} /* 1 */, + _nrn_mechanism_field{"ion_cai", "ca_ion"} /* 0 */, + _nrn_mechanism_field{"ion_cao", "ca_ion"} /* 1 */, + _nrn_mechanism_field{"ion_ca_erev", "ca_ion"} /* 2 */, + _nrn_mechanism_field{"style_ca", "#ca_ion"} /* 3 */ + ); + + hoc_register_prop_size(mech_type, 2, 4); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 1, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 2, "ca_ion"); + hoc_register_dparam_semantics(mech_type, 3, "#ca_ion"); + nrn_writes_conc(mech_type, 0); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +} diff --git a/useion/neuron/write_eca.cpp b/useion/neuron/write_eca.cpp new file mode 100644 index 00000000..691742a3 --- /dev/null +++ b/useion/neuron/write_eca.cpp @@ -0,0 +1,665 @@ +/********************************************************* +Model Name : write_eca +Filename : write_eca.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 1; +static constexpr auto number_of_floating_point_variables = 2; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "write_eca", + 0, + 0, + 0, + 0 + }; + + + /* NEURON global variables */ + static Symbol* _ca_sym; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct write_eca_Store { + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + write_eca_Store write_eca_global; + + + /** all mechanism instance variables and global variables */ + struct write_eca_Instance { + double* eca{}; + double* v_unused{}; + double* const* ion_eca{}; + write_eca_Store* global{&write_eca_global}; + }; + + + struct write_eca_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static write_eca_Instance make_instance_write_eca(_nrn_mechanism_cache_range& _lmc) { + return write_eca_Instance { + _lmc.template fpfield_ptr<0>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template dptr_field_ptr<0>() + }; + } + + + static write_eca_NodeData make_node_data_write_eca(NrnThread& nt, Memb_list& _ml_arg) { + return write_eca_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_write_eca(Prop* _prop) { + Datum *_ppvar = nullptr; + _ppvar = nrn_prop_datum_alloc(mech_type, 1, _prop); + _nrn_mechanism_access_dparam(_prop) = _ppvar; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 2); + /*initialize range parameters*/ + _nrn_mechanism_access_dparam(_prop) = _ppvar; + Symbol * ca_sym = hoc_lookup("ca_ion"); + Prop * ca_prop = need_memb(ca_sym); + nrn_promote(ca_prop, 0, 3); + _ppvar[0] = _nrn_mechanism_get_param_handle(ca_prop, 0); + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_write_eca", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_write_eca(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_write_eca(_lmc); + auto node_data = make_node_data_write_eca(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.eca[id] = 1124.0; + (*inst.ion_eca[id]) = inst.eca[id]; + } + } + + + static void nrn_jacob_write_eca(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_write_eca(_lmc); + auto node_data = make_node_data_write_eca(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + } + } + + + static void _initlists() { + } + + + /** register channel with the simulator */ + extern "C" void _write_eca_reg() { + _initlists(); + + ion_reg("ca", -10000.); + + _ca_sym = hoc_lookup("ca_ion"); + + register_mech(mechanism_info, nrn_alloc_write_eca, nullptr, nullptr, nullptr, nrn_init_write_eca, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"eca"} /* 0 */, + _nrn_mechanism_field{"v_unused"} /* 1 */, + _nrn_mechanism_field{"ion_eca", "ca_ion"} /* 0 */ + ); + + hoc_register_prop_size(mech_type, 2, 1); + hoc_register_dparam_semantics(mech_type, 0, "ca_ion"); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +}