From f1f98152cd9459d3e2dde859175b55c498c6221f Mon Sep 17 00:00:00 2001 From: GitHub Actions Bot <> Date: Fri, 26 Jul 2024 09:55:54 +0000 Subject: [PATCH] References for 'https://github.com/BlueBrain/nmodl/pull/1366'. --- {cnexp => solve}/coreneuron/cnexp_array.cpp | 0 {cnexp => solve}/coreneuron/cnexp_scalar.cpp | 0 solve/coreneuron/derivimplicit_array.cpp | 382 ++++++++++ solve/coreneuron/derivimplicit_scalar.cpp | 366 +++++++++ {cnexp => solve}/neuron/cnexp_array.cpp | 0 {cnexp => solve}/neuron/cnexp_scalar.cpp | 0 solve/neuron/derivimplicit_array.cpp | 740 +++++++++++++++++++ solve/neuron/derivimplicit_scalar.cpp | 723 ++++++++++++++++++ 8 files changed, 2211 insertions(+) rename {cnexp => solve}/coreneuron/cnexp_array.cpp (100%) rename {cnexp => solve}/coreneuron/cnexp_scalar.cpp (100%) create mode 100644 solve/coreneuron/derivimplicit_array.cpp create mode 100644 solve/coreneuron/derivimplicit_scalar.cpp rename {cnexp => solve}/neuron/cnexp_array.cpp (100%) rename {cnexp => solve}/neuron/cnexp_scalar.cpp (100%) create mode 100644 solve/neuron/derivimplicit_array.cpp create mode 100644 solve/neuron/derivimplicit_scalar.cpp diff --git a/cnexp/coreneuron/cnexp_array.cpp b/solve/coreneuron/cnexp_array.cpp similarity index 100% rename from cnexp/coreneuron/cnexp_array.cpp rename to solve/coreneuron/cnexp_array.cpp diff --git a/cnexp/coreneuron/cnexp_scalar.cpp b/solve/coreneuron/cnexp_scalar.cpp similarity index 100% rename from cnexp/coreneuron/cnexp_scalar.cpp rename to solve/coreneuron/cnexp_scalar.cpp diff --git a/solve/coreneuron/derivimplicit_array.cpp b/solve/coreneuron/derivimplicit_array.cpp new file mode 100644 index 00000000..fdc35180 --- /dev/null +++ b/solve/coreneuron/derivimplicit_array.cpp @@ -0,0 +1,382 @@ +/********************************************************* +Model Name : derivimplicit_array +Filename : derivimplicit_array.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "derivimplicit_array", + 0, + "z_derivimplicit_array[3]", + 0, + "x_derivimplicit_array", + "s_derivimplicit_array[2]", + 0, + 0 + }; + + + /** all global variables */ + struct derivimplicit_array_Store { + double x0{}; + double s0{}; + int reset{}; + int mech_type{}; + int slist1[1]{3}; + int dlist1[1]{6}; + int slist2[1]{3}; + ThreadDatum ext_call_thread[3]{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + derivimplicit_array_Store derivimplicit_array_global; + + + /** all mechanism instance variables and global variables */ + struct derivimplicit_array_Instance { + double* z{}; + double* x{}; + double* s{}; + double* Dx{}; + double* Ds{}; + double* v_unused{}; + double* g_unused{}; + derivimplicit_array_Store* global{&derivimplicit_array_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + /** thread specific helper routines for derivimplicit */ + + static inline int* deriv1_advance(ThreadDatum* thread) { + return &(thread[0].i); + } + + static inline int dith1() { + return 1; + } + + static inline void** newtonspace1(ThreadDatum* thread) { + return &(thread[2]._pvoid); + } + + + static inline int float_variables_size() { + return 11; + } + + + static inline int int_variables_size() { + return 0; + } + + + static inline int get_mech_type() { + return derivimplicit_array_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + + /** thread memory allocation callback */ + static void thread_mem_init(ThreadDatum* thread) { + thread[dith1()].pval = nullptr; + } + + + /** thread memory cleanup callback */ + static void thread_mem_cleanup(ThreadDatum* thread) { + free(thread[dith1()].pval); + nrn_destroy_newtonspace(static_cast(*newtonspace1(thread))); + } + + // Allocate instance structure + static void nrn_private_constructor_derivimplicit_array(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new derivimplicit_array_Instance{}; + assert(inst->global == &derivimplicit_array_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(derivimplicit_array_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_derivimplicit_array(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &derivimplicit_array_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(derivimplicit_array_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &derivimplicit_array_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(derivimplicit_array_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->z = ml->data+0*pnodecount; + inst->x = ml->data+3*pnodecount; + inst->s = ml->data+4*pnodecount; + inst->Dx = ml->data+6*pnodecount; + inst->Ds = ml->data+7*pnodecount; + inst->v_unused = ml->data+9*pnodecount; + inst->g_unused = ml->data+10*pnodecount; + } + + + + static void nrn_alloc_derivimplicit_array(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_derivimplicit_array(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_derivimplicit_array(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + namespace { + struct _newton_dX_derivimplicit_array { + int operator()(int id, int pnodecount, double* data, Datum* indexes, ThreadDatum* thread, NrnThread* nt, Memb_list* ml, double v) const { + auto* const inst = static_cast(ml->instance); + double* savstate1 = static_cast(thread[dith1()].pval); + auto const& slist1 = inst->global->slist1; + auto const& dlist1 = inst->global->dlist1; + double* dlist2 = static_cast(thread[dith1()].pval) + (1*pnodecount); + inst->Dx[id] = ((inst->s+id*2)[static_cast(0)] + (inst->s+id*2)[static_cast(1)]) * ((inst->z+id*3)[static_cast(0)] * (inst->z+id*3)[static_cast(1)] * (inst->z+id*3)[static_cast(2)]) * inst->x[id]; + int counter = -1; + for (int i=0; i<1; i++) { + if (*deriv1_advance(thread)) { + dlist2[(++counter)*pnodecount+id] = data[dlist1[i]*pnodecount+id]-(data[slist1[i]*pnodecount+id]-savstate1[i*pnodecount+id])/nt->_dt; + } else { + dlist2[(++counter)*pnodecount+id] = data[slist1[i]*pnodecount+id]-savstate1[i*pnodecount+id]; + } + } + return 0; + } + }; + } + + int dX_derivimplicit_array(int id, int pnodecount, double* data, Datum* indexes, ThreadDatum* thread, NrnThread* nt, Memb_list* ml, double v) { + auto* const inst = static_cast(ml->instance); + double* savstate1 = (double*) thread[dith1()].pval; + auto const& slist1 = inst->global->slist1; + auto& slist2 = inst->global->slist2; + double* dlist2 = static_cast(thread[dith1()].pval) + (1*pnodecount); + for (int i=0; i<1; i++) { + savstate1[i*pnodecount+id] = data[slist1[i]*pnodecount+id]; + } + int reset = nrn_newton_thread(static_cast(*newtonspace1(thread)), 1, slist2, _newton_dX_derivimplicit_array{}, dlist2, id, pnodecount, data, indexes, thread, nt, ml, v); + return reset; + } + + + + + /** initialize channel */ + void nrn_init_derivimplicit_array(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + + int& deriv_advance_flag = *deriv1_advance(thread); + deriv_advance_flag = 0; + auto ns = newtonspace1(thread); + auto& th = thread[dith1()]; + if (*ns == nullptr) { + int vec_size = 2*1*pnodecount*sizeof(double); + double* vec = makevector(vec_size); + th.pval = vec; + *ns = nrn_cons_newtonspace(1, pnodecount); + } + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->x[id] = inst->global->x0; + (inst->s+id*2)[0] = inst->global->s0; + (inst->s+id*2)[1] = inst->global->s0; + inst->x[id] = 42.0; + (inst->s+id*2)[static_cast(0)] = 0.1; + (inst->s+id*2)[static_cast(1)] = -1.0; + (inst->z+id*3)[static_cast(0)] = 0.7; + (inst->z+id*3)[static_cast(1)] = 0.8; + (inst->z+id*3)[static_cast(2)] = 0.9; + } + } + deriv_advance_flag = 1; + } + + + /** update state */ + void nrn_state_derivimplicit_array(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + dX_derivimplicit_array(id, pnodecount, data, indexes, thread, nt, ml, v); + } + } + + + /** register channel with the simulator */ + void _derivimplicit_array_reg() { + + int mech_type = nrn_get_mechtype("derivimplicit_array"); + derivimplicit_array_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_derivimplicit_array, nullptr, nullptr, nrn_state_derivimplicit_array, nrn_init_derivimplicit_array, nrn_private_constructor_derivimplicit_array, nrn_private_destructor_derivimplicit_array, first_pointer_var_index(), 4); + + thread_mem_init(derivimplicit_array_global.ext_call_thread); + _nrn_thread_reg0(mech_type, thread_mem_cleanup); + _nrn_thread_reg1(mech_type, thread_mem_init); + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/solve/coreneuron/derivimplicit_scalar.cpp b/solve/coreneuron/derivimplicit_scalar.cpp new file mode 100644 index 00000000..4346878b --- /dev/null +++ b/solve/coreneuron/derivimplicit_scalar.cpp @@ -0,0 +1,366 @@ +/********************************************************* +Model Name : derivimplicit_scalar +Filename : derivimplicit_scalar.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : CoreNEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace coreneuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "derivimplicit_scalar", + 0, + 0, + "x_derivimplicit_scalar", + 0, + 0 + }; + + + /** all global variables */ + struct derivimplicit_scalar_Store { + double x0{}; + int reset{}; + int mech_type{}; + int slist1[1]{0}; + int dlist1[1]{1}; + int slist2[1]{0}; + ThreadDatum ext_call_thread[3]{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + derivimplicit_scalar_Store derivimplicit_scalar_global; + + + /** all mechanism instance variables and global variables */ + struct derivimplicit_scalar_Instance { + double* x{}; + double* Dx{}; + double* v_unused{}; + double* g_unused{}; + derivimplicit_scalar_Store* global{&derivimplicit_scalar_global}; + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + static inline int first_pointer_var_index() { + return -1; + } + + + static inline int first_random_var_index() { + return -1; + } + + + /** thread specific helper routines for derivimplicit */ + + static inline int* deriv1_advance(ThreadDatum* thread) { + return &(thread[0].i); + } + + static inline int dith1() { + return 1; + } + + static inline void** newtonspace1(ThreadDatum* thread) { + return &(thread[2]._pvoid); + } + + + static inline int float_variables_size() { + return 4; + } + + + static inline int int_variables_size() { + return 0; + } + + + static inline int get_mech_type() { + return derivimplicit_scalar_global.mech_type; + } + + + static inline Memb_list* get_memb_list(NrnThread* nt) { + if (!nt->_ml_list) { + return nullptr; + } + return nt->_ml_list[get_mech_type()]; + } + + + static inline void* mem_alloc(size_t num, size_t size, size_t alignment = 16) { + void* ptr; + posix_memalign(&ptr, alignment, num*size); + memset(ptr, 0, size); + return ptr; + } + + + static inline void mem_free(void* ptr) { + free(ptr); + } + + + static inline void coreneuron_abort() { + abort(); + } + + + /** thread memory allocation callback */ + static void thread_mem_init(ThreadDatum* thread) { + thread[dith1()].pval = nullptr; + } + + + /** thread memory cleanup callback */ + static void thread_mem_cleanup(ThreadDatum* thread) { + free(thread[dith1()].pval); + nrn_destroy_newtonspace(static_cast(*newtonspace1(thread))); + } + + // Allocate instance structure + static void nrn_private_constructor_derivimplicit_scalar(NrnThread* nt, Memb_list* ml, int type) { + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + auto* const inst = new derivimplicit_scalar_Instance{}; + assert(inst->global == &derivimplicit_scalar_global); + ml->instance = inst; + ml->global_variables = inst->global; + ml->global_variables_size = sizeof(derivimplicit_scalar_Store); + } + + // Deallocate the instance structure + static void nrn_private_destructor_derivimplicit_scalar(NrnThread* nt, Memb_list* ml, int type) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &derivimplicit_scalar_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(derivimplicit_scalar_Store)); + delete inst; + ml->instance = nullptr; + ml->global_variables = nullptr; + ml->global_variables_size = 0; + } + + /** initialize mechanism instance variables */ + static inline void setup_instance(NrnThread* nt, Memb_list* ml) { + auto* const inst = static_cast(ml->instance); + assert(inst); + assert(inst->global); + assert(inst->global == &derivimplicit_scalar_global); + assert(inst->global == ml->global_variables); + assert(ml->global_variables_size == sizeof(derivimplicit_scalar_Store)); + int pnodecount = ml->_nodecount_padded; + Datum* indexes = ml->pdata; + inst->x = ml->data+0*pnodecount; + inst->Dx = ml->data+1*pnodecount; + inst->v_unused = ml->data+2*pnodecount; + inst->g_unused = ml->data+3*pnodecount; + } + + + + static void nrn_alloc_derivimplicit_scalar(double* data, Datum* indexes, int type) { + // do nothing + } + + + void nrn_constructor_derivimplicit_scalar(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + void nrn_destructor_derivimplicit_scalar(NrnThread* nt, Memb_list* ml, int type) { + #ifndef CORENEURON_BUILD + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #endif + } + + + namespace { + struct _newton_dX_derivimplicit_scalar { + int operator()(int id, int pnodecount, double* data, Datum* indexes, ThreadDatum* thread, NrnThread* nt, Memb_list* ml, double v) const { + auto* const inst = static_cast(ml->instance); + double* savstate1 = static_cast(thread[dith1()].pval); + auto const& slist1 = inst->global->slist1; + auto const& dlist1 = inst->global->dlist1; + double* dlist2 = static_cast(thread[dith1()].pval) + (1*pnodecount); + inst->Dx[id] = -inst->x[id]; + int counter = -1; + for (int i=0; i<1; i++) { + if (*deriv1_advance(thread)) { + dlist2[(++counter)*pnodecount+id] = data[dlist1[i]*pnodecount+id]-(data[slist1[i]*pnodecount+id]-savstate1[i*pnodecount+id])/nt->_dt; + } else { + dlist2[(++counter)*pnodecount+id] = data[slist1[i]*pnodecount+id]-savstate1[i*pnodecount+id]; + } + } + return 0; + } + }; + } + + int dX_derivimplicit_scalar(int id, int pnodecount, double* data, Datum* indexes, ThreadDatum* thread, NrnThread* nt, Memb_list* ml, double v) { + auto* const inst = static_cast(ml->instance); + double* savstate1 = (double*) thread[dith1()].pval; + auto const& slist1 = inst->global->slist1; + auto& slist2 = inst->global->slist2; + double* dlist2 = static_cast(thread[dith1()].pval) + (1*pnodecount); + for (int i=0; i<1; i++) { + savstate1[i*pnodecount+id] = data[slist1[i]*pnodecount+id]; + } + int reset = nrn_newton_thread(static_cast(*newtonspace1(thread)), 1, slist2, _newton_dX_derivimplicit_scalar{}, dlist2, id, pnodecount, data, indexes, thread, nt, ml, v); + return reset; + } + + + + + /** initialize channel */ + void nrn_init_derivimplicit_scalar(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + + setup_instance(nt, ml); + auto* const inst = static_cast(ml->instance); + + + int& deriv_advance_flag = *deriv1_advance(thread); + deriv_advance_flag = 0; + auto ns = newtonspace1(thread); + auto& th = thread[dith1()]; + if (*ns == nullptr) { + int vec_size = 2*1*pnodecount*sizeof(double); + double* vec = makevector(vec_size); + th.pval = vec; + *ns = nrn_cons_newtonspace(1, pnodecount); + } + if (_nrn_skip_initmodel == 0) { + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + inst->x[id] = inst->global->x0; + inst->x[id] = 42.0; + } + } + deriv_advance_flag = 1; + } + + + /** update state */ + void nrn_state_derivimplicit_scalar(NrnThread* nt, Memb_list* ml, int type) { + int nodecount = ml->nodecount; + int pnodecount = ml->_nodecount_padded; + const int* node_index = ml->nodeindices; + double* data = ml->data; + const double* voltage = nt->_actual_v; + Datum* indexes = ml->pdata; + ThreadDatum* thread = ml->_thread; + auto* const inst = static_cast(ml->instance); + + #pragma omp simd + #pragma ivdep + for (int id = 0; id < nodecount; id++) { + int node_id = node_index[id]; + double v = voltage[node_id]; + #if NRN_PRCELLSTATE + inst->v_unused[id] = v; + #endif + dX_derivimplicit_scalar(id, pnodecount, data, indexes, thread, nt, ml, v); + } + } + + + /** register channel with the simulator */ + void _derivimplicit_scalar_reg() { + + int mech_type = nrn_get_mechtype("derivimplicit_scalar"); + derivimplicit_scalar_global.mech_type = mech_type; + if (mech_type == -1) { + return; + } + + _nrn_layout_reg(mech_type, 0); + register_mech(mechanism_info, nrn_alloc_derivimplicit_scalar, nullptr, nullptr, nrn_state_derivimplicit_scalar, nrn_init_derivimplicit_scalar, nrn_private_constructor_derivimplicit_scalar, nrn_private_destructor_derivimplicit_scalar, first_pointer_var_index(), 4); + + thread_mem_init(derivimplicit_scalar_global.ext_call_thread); + _nrn_thread_reg0(mech_type, thread_mem_cleanup); + _nrn_thread_reg1(mech_type, thread_mem_init); + hoc_register_prop_size(mech_type, float_variables_size(), int_variables_size()); + hoc_register_var(hoc_scalar_double, hoc_vector_double, NULL); + } +} diff --git a/cnexp/neuron/cnexp_array.cpp b/solve/neuron/cnexp_array.cpp similarity index 100% rename from cnexp/neuron/cnexp_array.cpp rename to solve/neuron/cnexp_array.cpp diff --git a/cnexp/neuron/cnexp_scalar.cpp b/solve/neuron/cnexp_scalar.cpp similarity index 100% rename from cnexp/neuron/cnexp_scalar.cpp rename to solve/neuron/cnexp_scalar.cpp diff --git a/solve/neuron/derivimplicit_array.cpp b/solve/neuron/derivimplicit_array.cpp new file mode 100644 index 00000000..4b46897d --- /dev/null +++ b/solve/neuron/derivimplicit_array.cpp @@ -0,0 +1,740 @@ +/********************************************************* +Model Name : derivimplicit_array +Filename : derivimplicit_array.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 0; +static constexpr auto number_of_floating_point_variables = 7; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "derivimplicit_array", + 0, + "z_derivimplicit_array[3]", + 0, + "x_derivimplicit_array", + "s_derivimplicit_array[2]", + 0, + 0 + }; + + + /* NEURON global variables */ + static neuron::container::field_index _slist1[1], _dlist1[1]; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct derivimplicit_array_Store { + double x0{}; + double s0{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + derivimplicit_array_Store derivimplicit_array_global; + static std::vector _parameter_defaults = { + }; + + + /** all mechanism instance variables and global variables */ + struct derivimplicit_array_Instance { + double* z{}; + double* x{}; + double* s{}; + double* Dx{}; + double* Ds{}; + double* v_unused{}; + double* g_unused{}; + derivimplicit_array_Store* global{&derivimplicit_array_global}; + }; + + + struct derivimplicit_array_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static derivimplicit_array_Instance make_instance_derivimplicit_array(_nrn_mechanism_cache_range& _lmc) { + return derivimplicit_array_Instance { + _lmc.template data_array_ptr<0, 3>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template data_array_ptr<2, 2>(), + _lmc.template fpfield_ptr<3>(), + _lmc.template data_array_ptr<4, 2>(), + _lmc.template fpfield_ptr<5>(), + _lmc.template fpfield_ptr<6>() + }; + } + + + static derivimplicit_array_NodeData make_node_data_derivimplicit_array(NrnThread& nt, Memb_list& _ml_arg) { + return derivimplicit_array_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_derivimplicit_array(Prop* _prop) { + Datum *_ppvar = nullptr; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 7); + /*initialize range parameters*/ + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + struct functor_derivimplicit_array_0 { + _nrn_mechanism_cache_range& _lmc; + derivimplicit_array_Instance& inst; + size_t id; + Datum* _ppvar; + Datum* _thread; + NrnThread* nt; + double v; + double old_x; + + void initialize() { + old_x = inst.x[id]; + } + + functor_derivimplicit_array_0(_nrn_mechanism_cache_range& _lmc, derivimplicit_array_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* nt, double v) + : _lmc(_lmc), inst(inst), id(id), _ppvar(_ppvar), _thread(_thread), nt(nt), v(v) + {} + void operator()(const Eigen::Matrix& nmodl_eigen_xm, Eigen::Matrix& nmodl_eigen_fm, Eigen::Matrix& nmodl_eigen_jm) const { + const double* nmodl_eigen_x = nmodl_eigen_xm.data(); + double* nmodl_eigen_j = nmodl_eigen_jm.data(); + double* nmodl_eigen_f = nmodl_eigen_fm.data(); + nmodl_eigen_f[static_cast(0)] = nmodl_eigen_x[static_cast(0)] * nt->_dt * (inst.s+id*2)[static_cast(0)] * (inst.z+id*3)[static_cast(0)] * (inst.z+id*3)[static_cast(1)] * (inst.z+id*3)[static_cast(2)] + nmodl_eigen_x[static_cast(0)] * nt->_dt * (inst.s+id*2)[static_cast(1)] * (inst.z+id*3)[static_cast(0)] * (inst.z+id*3)[static_cast(1)] * (inst.z+id*3)[static_cast(2)] - nmodl_eigen_x[static_cast(0)] + old_x; + nmodl_eigen_j[static_cast(0)] = nt->_dt * (inst.s+id*2)[static_cast(0)] * (inst.z+id*3)[static_cast(0)] * (inst.z+id*3)[static_cast(1)] * (inst.z+id*3)[static_cast(2)] + nt->_dt * (inst.s+id*2)[static_cast(1)] * (inst.z+id*3)[static_cast(0)] * (inst.z+id*3)[static_cast(1)] * (inst.z+id*3)[static_cast(2)] - 1.0; + } + + void finalize() { + } + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_derivimplicit_array", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_derivimplicit_array(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_derivimplicit_array(_lmc); + auto node_data = make_node_data_derivimplicit_array(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.x[id] = 42.0; + (inst.s+id*2)[static_cast(0)] = 0.1; + (inst.s+id*2)[static_cast(1)] = -1.0; + (inst.z+id*3)[static_cast(0)] = 0.7; + (inst.z+id*3)[static_cast(1)] = 0.8; + (inst.z+id*3)[static_cast(2)] = 0.9; + } + } + + + void nrn_state_derivimplicit_array(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_derivimplicit_array(_lmc); + auto node_data = make_node_data_derivimplicit_array(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + int node_id = node_data.nodeindices[id]; + auto* _ppvar = _ml_arg->pdata[id]; + auto v = node_data.node_voltages[node_id]; + + Eigen::Matrix nmodl_eigen_xm; + double* nmodl_eigen_x = nmodl_eigen_xm.data(); + nmodl_eigen_x[static_cast(0)] = inst.x[id]; + // call newton solver + functor_derivimplicit_array_0 newton_functor(_lmc, inst, id, _ppvar, _thread, nt, v); + newton_functor.initialize(); + int newton_iterations = nmodl::newton::newton_solver(nmodl_eigen_xm, newton_functor); + if (newton_iterations < 0) assert(false && "Newton solver did not converge!"); + inst.x[id] = nmodl_eigen_x[static_cast(0)]; + newton_functor.finalize(); + + } + } + + + static void nrn_jacob_derivimplicit_array(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_derivimplicit_array(_lmc); + auto node_data = make_node_data_derivimplicit_array(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + int node_id = node_data.nodeindices[id]; + node_data.node_diagonal[node_id] += inst.g_unused[id]; + } + } + + + static void _initlists() { + /* x */ + _slist1[0] = {1, 0}; + /* Dx */ + _dlist1[0] = {3, 0}; + } + + + /** register channel with the simulator */ + extern "C" void _derivimplicit_array_reg() { + _initlists(); + + register_mech(mechanism_info, nrn_alloc_derivimplicit_array, nullptr, nrn_jacob_derivimplicit_array, nrn_state_derivimplicit_array, nrn_init_derivimplicit_array, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + hoc_register_parm_default(mech_type, &_parameter_defaults); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"z", 3} /* 0 */, + _nrn_mechanism_field{"x"} /* 1 */, + _nrn_mechanism_field{"s", 2} /* 2 */, + _nrn_mechanism_field{"Dx"} /* 3 */, + _nrn_mechanism_field{"Ds", 2} /* 4 */, + _nrn_mechanism_field{"v_unused"} /* 5 */, + _nrn_mechanism_field{"g_unused"} /* 6 */ + ); + + hoc_register_prop_size(mech_type, 11, 0); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +} diff --git a/solve/neuron/derivimplicit_scalar.cpp b/solve/neuron/derivimplicit_scalar.cpp new file mode 100644 index 00000000..de89dd56 --- /dev/null +++ b/solve/neuron/derivimplicit_scalar.cpp @@ -0,0 +1,723 @@ +/********************************************************* +Model Name : derivimplicit_scalar +Filename : derivimplicit_scalar.mod +NMODL Version : 7.7.0 +Vectorized : true +Threadsafe : true +Created : DATE +Simulator : NEURON +Backend : C++ (api-compatibility) +NMODL Compiler : VERSION +*********************************************************/ + +#include +#include +#include +#include +#include +#include + +/** + * \dir + * \brief Solver for a system of linear equations : Crout matrix decomposition + * + * \file + * \brief Implementation of Crout matrix decomposition (LU decomposition) followed by + * Forward/Backward substitution: Implementation details : (Legacy code) nrn / scopmath / crout.c + */ + +#include +#include + +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#include "coreneuron/utils/offload.hpp" +#endif + +namespace nmodl { +namespace crout { + +/** + * \brief Crout matrix decomposition : in-place LU Decomposition of matrix a. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: 0 if no error; -1 if matrix is singular or ill-conditioned + */ +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int Crout(int n, T* const a, int* const perm, double* const rowmax) { + // roundoff is the minimal value for a pivot element without its being considered too close to + // zero + double roundoff = 1.e-20; + int i, j, k, r, pivot, irow, save_i = 0, krow; + T sum, equil_1, equil_2; + + /* Initialize permutation and rowmax vectors */ + + for (i = 0; i < n; i++) { + perm[i] = i; + k = 0; + for (j = 1; j < n; j++) + if (std::fabs(a[i * n + j]) > std::fabs(a[i * n + k])) + k = j; + rowmax[i] = a[i * n + k]; + } + + /* Loop over rows and columns r */ + + for (r = 0; r < n; r++) { + /* + * Operate on rth column. This produces the lower triangular matrix + * of terms needed to transform the constant vector. + */ + + for (i = r; i < n; i++) { + sum = 0.0; + irow = perm[i]; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[irow * n + k] * a[krow * n + r]; + } + a[irow * n + r] -= sum; + } + + /* Find row containing the pivot in the rth column */ + + pivot = perm[r]; + equil_1 = std::fabs(a[pivot * n + r] / rowmax[pivot]); + for (i = r + 1; i < n; i++) { + irow = perm[i]; + equil_2 = std::fabs(a[irow * n + r] / rowmax[irow]); + if (equil_2 > equil_1) { + /* make irow the new pivot row */ + + pivot = irow; + save_i = i; + equil_1 = equil_2; + } + } + + /* Interchange entries in permutation vector if necessary */ + + if (pivot != perm[r]) { + perm[save_i] = perm[r]; + perm[r] = pivot; + } + + /* Check that pivot element is not too small */ + + if (std::fabs(a[pivot * n + r]) < roundoff) + return -1; + + /* + * Operate on row in rth position. This produces the upper + * triangular matrix whose diagonal elements are assumed to be unity. + * This matrix is used in the back substitution algorithm. + */ + + for (j = r + 1; j < n; j++) { + sum = 0.0; + for (k = 0; k < r; k++) { + krow = perm[k]; + sum += a[pivot * n + k] * a[krow * n + j]; + } + a[pivot * n + j] = (a[pivot * n + j] - sum) / a[pivot * n + r]; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +/** + * \brief Crout matrix decomposition : Forward/Backward substitution. + * + * Implementation details : (Legacy code) nrn / scopmath / crout.c + * + * Returns: no return variable + */ +#define y_(arg) p[y[arg]] +#define b_(arg) b[arg] +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_acc(routine seq) +nrn_pragma_omp(declare target) +#endif +template +EIGEN_DEVICE_FUNC inline int solveCrout(int n, + T const* const a, + T const* const b, + T* const p, + int const* const perm, + int const* const y = nullptr) { + int i, j, pivot; + T sum; + + /* Perform forward substitution with pivoting */ + if (y) { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (y_(j)); + y_(i) -= sum; + } + } else { + for (i = 0; i < n; i++) { + pivot = perm[i]; + sum = 0.0; + for (j = 0; j < i; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] = (b_(pivot) - sum) / a[pivot * n + i]; + } + + /* + * Note that the y vector is already in the correct order for back + * substitution. Perform back substitution, pivoting the matrix but not + * the y vector. There is no need to divide by the diagonal element as + * this is assumed to be unity. + */ + + for (i = n - 1; i >= 0; i--) { + pivot = perm[i]; + sum = 0.0; + for (j = i + 1; j < n; j++) + sum += a[pivot * n + j] * (p[j]); + p[i] -= sum; + } + } + return 0; +} +#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +nrn_pragma_omp(end declare target) +#endif + +#undef y_ +#undef b_ + +} // namespace crout +} // namespace nmodl + +/** + * \dir + * \brief Newton solver implementations + * + * \file + * \brief Implementation of Newton method for solving system of non-linear equations + */ + +#include +#include + +namespace nmodl { +/// newton solver implementations +namespace newton { + +/** + * @defgroup solver Solver Implementation + * @brief Solver implementation details + * + * Implementation of Newton method for solving system of non-linear equations using Eigen + * - newton::newton_solver is the preferred option: requires user to provide Jacobian + * - newton::newton_numerical_diff_solver is the fallback option: Jacobian not required + * + * @{ + */ + +static constexpr int MAX_ITER = 1e3; +static constexpr double EPS = 1e-12; + +/** + * \brief Newton method with user-provided Jacobian + * + * Newton method with user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, `J(X)` where `J(X)` is the Jacobian of `F(X)`, + * solves for \f$F(X) = 0\f$, starting with initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * when \f$|F|^2 < eps^2\f$, solution has converged. + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = -1; + while (++iter < max_iter) { + // calculate F, J from X using user-supplied functor + functor(X, F, J); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + // In Eigen the default storage order is ColMajor. + // Crout's implementation requires matrices stored in RowMajor order (C-style arrays). + // Therefore, the transposeInPlace is critical such that the data() method to give the rows + // instead of the columns. + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +static constexpr double SQUARE_ROOT_ULP = 1e-7; +static constexpr double CUBIC_ROOT_ULP = 1e-5; + +/** + * \brief Newton method without user-provided Jacobian + * + * Newton method without user-provided Jacobian: given initial vector X and a + * functor that calculates `F(X)`, solves for \f$F(X) = 0\f$, starting with + * initial value of `X` by iterating: + * + * \f[ + * X_{n+1} = X_n - J(X_n)^{-1} F(X_n) + * \f] + * + * where `J(X)` is the Jacobian of `F(X)`, which is approximated numerically + * using a symmetric finite difference approximation to the derivative + * when \f$|F|^2 < eps^2\f$, solution has converged/ + * + * @return number of iterations (-1 if failed to converge) + */ +template +EIGEN_DEVICE_FUNC int newton_numerical_diff_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + // Vector to store result of function F(X): + Eigen::Matrix F; + // Temporary storage for F(X+dx) + Eigen::Matrix F_p; + // Temporary storage for F(X-dx) + Eigen::Matrix F_m; + // Matrix to store jacobian of F(X): + Eigen::Matrix J; + // Solver iteration count: + int iter = 0; + while (iter < max_iter) { + // calculate F from X using user-supplied functor + functor(X, F); + // get error norm: here we use sqrt(|F|^2) + double error = F.norm(); + if (error < eps) { + // we have converged: return iteration count + return iter; + } + ++iter; + // calculate approximate Jacobian + for (int i = 0; i < N; ++i) { + // symmetric finite difference approximation to derivative + // df/dx ~= ( f(x+dx) - f(x-dx) ) / (2*dx) + // choose dx to be ~(ULP)^{1/3}*X[i] + // https://aip.scitation.org/doi/pdf/10.1063/1.4822971 + // also enforce a lower bound ~sqrt(ULP) to avoid dx being too small + double dX = std::max(CUBIC_ROOT_ULP * X[i], SQUARE_ROOT_ULP); + // F(X + dX) + X[i] += dX; + functor(X, F_p); + // F(X - dX) + X[i] -= 2.0 * dX; + functor(X, F_m); + F_p -= F_m; + // J = (F(X + dX) - F(X - dX)) / (2*dX) + J.col(i) = F_p / (2.0 * dX); + // restore X + X[i] += dX; + } + if (!J.IsRowMajor) + J.transposeInPlace(); + Eigen::Matrix pivot; + Eigen::Matrix rowmax; + // Check if J is singular + if (nmodl::crout::Crout(N, J.data(), pivot.data(), rowmax.data()) < 0) + return -1; + Eigen::Matrix X_solve; + nmodl::crout::solveCrout(N, J.data(), F.data(), X_solve.data(), pivot.data()); + X -= X_solve; + } + // If we fail to converge after max_iter iterations, return -1 + return -1; +} + +/** + * Newton method template specializations for \f$N <= 4\f$ Use explicit inverse + * of `F` instead of LU decomposition. This is faster, as there is no pivoting + * and therefore no branches, but it is not numerically safe for \f$N > 4\f$. + */ + +template +EIGEN_DEVICE_FUNC int newton_solver_small_N(Eigen::Matrix& X, + FUNC functor, + double eps, + int max_iter) { + bool invertible; + Eigen::Matrix F; + Eigen::Matrix J, J_inv; + int iter = -1; + while (++iter < max_iter) { + functor(X, F, J); + double error = F.norm(); + if (error < eps) { + return iter; + } + // The inverse can be called from within OpenACC regions without any issue, as opposed to + // Eigen::PartialPivLU. + J.computeInverseWithCheck(J_inv, invertible); + if (invertible) + X -= J_inv * F; + else + return -1; + } + return -1; +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +template +EIGEN_DEVICE_FUNC int newton_solver(Eigen::Matrix& X, + FUNC functor, + double eps = EPS, + int max_iter = MAX_ITER) { + return newton_solver_small_N(X, functor, eps, max_iter); +} + +/** @} */ // end of solver + +} // namespace newton +} // namespace nmodl + + +#include "mech_api.h" +#include "neuron/cache/mechanism_range.hpp" +#include "nrniv_mf.h" +#include "section_fwd.hpp" + +/* NEURON global macro definitions */ +/* VECTORIZED */ +#define NRN_VECTORIZED 1 + +static constexpr auto number_of_datum_variables = 0; +static constexpr auto number_of_floating_point_variables = 4; + +namespace { +template +using _nrn_mechanism_std_vector = std::vector; +using _nrn_model_sorted_token = neuron::model_sorted_token; +using _nrn_mechanism_cache_range = neuron::cache::MechanismRange; +using _nrn_mechanism_cache_instance = neuron::cache::MechanismInstance; +using _nrn_non_owning_id_without_container = neuron::container::non_owning_identifier_without_container; +template +using _nrn_mechanism_field = neuron::mechanism::field; +template +void _nrn_mechanism_register_data_fields(Args&&... args) { + neuron::mechanism::register_data_fields(std::forward(args)...); +} +} // namespace + +Prop* hoc_getdata_range(int type); +extern Node* nrn_alloc_node_; + + +namespace neuron { + #ifndef NRN_PRCELLSTATE + #define NRN_PRCELLSTATE 0 + #endif + + + /** channel information */ + static const char *mechanism_info[] = { + "7.7.0", + "derivimplicit_scalar", + 0, + 0, + "x_derivimplicit_scalar", + 0, + 0 + }; + + + /* NEURON global variables */ + static neuron::container::field_index _slist1[1], _dlist1[1]; + static int mech_type; + static Prop* _extcall_prop; + /* _prop_id kind of shadows _extcall_prop to allow validity checking. */ + static _nrn_non_owning_id_without_container _prop_id{}; + static int hoc_nrnpointerindex = -1; + static _nrn_mechanism_std_vector _extcall_thread; + + + /** all global variables */ + struct derivimplicit_scalar_Store { + double x0{}; + }; + static_assert(std::is_trivially_copy_constructible_v); + static_assert(std::is_trivially_move_constructible_v); + static_assert(std::is_trivially_copy_assignable_v); + static_assert(std::is_trivially_move_assignable_v); + static_assert(std::is_trivially_destructible_v); + derivimplicit_scalar_Store derivimplicit_scalar_global; + static std::vector _parameter_defaults = { + }; + + + /** all mechanism instance variables and global variables */ + struct derivimplicit_scalar_Instance { + double* x{}; + double* Dx{}; + double* v_unused{}; + double* g_unused{}; + derivimplicit_scalar_Store* global{&derivimplicit_scalar_global}; + }; + + + struct derivimplicit_scalar_NodeData { + int const * nodeindices; + double const * node_voltages; + double * node_diagonal; + double * node_rhs; + int nodecount; + }; + + + static derivimplicit_scalar_Instance make_instance_derivimplicit_scalar(_nrn_mechanism_cache_range& _lmc) { + return derivimplicit_scalar_Instance { + _lmc.template fpfield_ptr<0>(), + _lmc.template fpfield_ptr<1>(), + _lmc.template fpfield_ptr<2>(), + _lmc.template fpfield_ptr<3>() + }; + } + + + static derivimplicit_scalar_NodeData make_node_data_derivimplicit_scalar(NrnThread& nt, Memb_list& _ml_arg) { + return derivimplicit_scalar_NodeData { + _ml_arg.nodeindices, + nt.node_voltage_storage(), + nt.node_d_storage(), + nt.node_rhs_storage(), + _ml_arg.nodecount + }; + } + + + static void nrn_alloc_derivimplicit_scalar(Prop* _prop) { + Datum *_ppvar = nullptr; + _nrn_mechanism_cache_instance _lmc{_prop}; + size_t const _iml = 0; + assert(_nrn_mechanism_get_num_vars(_prop) == 4); + /*initialize range parameters*/ + } + + + /* Neuron setdata functions */ + extern void _nrn_setdata_reg(int, void(*)(Prop*)); + static void _setdata(Prop* _prop) { + _extcall_prop = _prop; + _prop_id = _nrn_get_prop_id(_prop); + } + static void _hoc_setdata() { + Prop *_prop = hoc_getdata_range(mech_type); + _setdata(_prop); + hoc_retpushx(1.); + } + /* Mechanism procedures and functions */ + + + struct functor_derivimplicit_scalar_0 { + _nrn_mechanism_cache_range& _lmc; + derivimplicit_scalar_Instance& inst; + size_t id; + Datum* _ppvar; + Datum* _thread; + NrnThread* nt; + double v; + double old_x; + + void initialize() { + old_x = inst.x[id]; + } + + functor_derivimplicit_scalar_0(_nrn_mechanism_cache_range& _lmc, derivimplicit_scalar_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* nt, double v) + : _lmc(_lmc), inst(inst), id(id), _ppvar(_ppvar), _thread(_thread), nt(nt), v(v) + {} + void operator()(const Eigen::Matrix& nmodl_eigen_xm, Eigen::Matrix& nmodl_eigen_fm, Eigen::Matrix& nmodl_eigen_jm) const { + const double* nmodl_eigen_x = nmodl_eigen_xm.data(); + double* nmodl_eigen_j = nmodl_eigen_jm.data(); + double* nmodl_eigen_f = nmodl_eigen_fm.data(); + nmodl_eigen_f[static_cast(0)] = -nmodl_eigen_x[static_cast(0)] * nt->_dt - nmodl_eigen_x[static_cast(0)] + old_x; + nmodl_eigen_j[static_cast(0)] = -nt->_dt - 1.0; + } + + void finalize() { + } + }; + + + /** connect global (scalar) variables to hoc -- */ + static DoubScal hoc_scalar_double[] = { + {nullptr, nullptr} + }; + + + /** connect global (array) variables to hoc -- */ + static DoubVec hoc_vector_double[] = { + {nullptr, nullptr, 0} + }; + + + /* declaration of user functions */ + + + /* connect user functions to hoc names */ + static VoidFunc hoc_intfunc[] = { + {"setdata_derivimplicit_scalar", _hoc_setdata}, + {nullptr, nullptr} + }; + static NPyDirectMechFunc npy_direct_func_proc[] = { + {nullptr, nullptr} + }; + + + void nrn_init_derivimplicit_scalar(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_derivimplicit_scalar(_lmc); + auto node_data = make_node_data_derivimplicit_scalar(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + auto* _ppvar = _ml_arg->pdata[id]; + int node_id = node_data.nodeindices[id]; + auto v = node_data.node_voltages[node_id]; + inst.v_unused[id] = v; + inst.x[id] = 42.0; + } + } + + + void nrn_state_derivimplicit_scalar(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_derivimplicit_scalar(_lmc); + auto node_data = make_node_data_derivimplicit_scalar(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + auto* _thread = _ml_arg->_thread; + for (int id = 0; id < nodecount; id++) { + int node_id = node_data.nodeindices[id]; + auto* _ppvar = _ml_arg->pdata[id]; + auto v = node_data.node_voltages[node_id]; + + Eigen::Matrix nmodl_eigen_xm; + double* nmodl_eigen_x = nmodl_eigen_xm.data(); + nmodl_eigen_x[static_cast(0)] = inst.x[id]; + // call newton solver + functor_derivimplicit_scalar_0 newton_functor(_lmc, inst, id, _ppvar, _thread, nt, v); + newton_functor.initialize(); + int newton_iterations = nmodl::newton::newton_solver(nmodl_eigen_xm, newton_functor); + if (newton_iterations < 0) assert(false && "Newton solver did not converge!"); + inst.x[id] = nmodl_eigen_x[static_cast(0)]; + newton_functor.finalize(); + + } + } + + + static void nrn_jacob_derivimplicit_scalar(const _nrn_model_sorted_token& _sorted_token, NrnThread* nt, Memb_list* _ml_arg, int _type) { + _nrn_mechanism_cache_range _lmc{_sorted_token, *nt, *_ml_arg, _type}; + auto inst = make_instance_derivimplicit_scalar(_lmc); + auto node_data = make_node_data_derivimplicit_scalar(*nt, *_ml_arg); + auto nodecount = _ml_arg->nodecount; + for (int id = 0; id < nodecount; id++) { + int node_id = node_data.nodeindices[id]; + node_data.node_diagonal[node_id] += inst.g_unused[id]; + } + } + + + static void _initlists() { + /* x */ + _slist1[0] = {0, 0}; + /* Dx */ + _dlist1[0] = {1, 0}; + } + + + /** register channel with the simulator */ + extern "C" void _derivimplicit_scalar_reg() { + _initlists(); + + register_mech(mechanism_info, nrn_alloc_derivimplicit_scalar, nullptr, nrn_jacob_derivimplicit_scalar, nrn_state_derivimplicit_scalar, nrn_init_derivimplicit_scalar, hoc_nrnpointerindex, 1); + + mech_type = nrn_get_mechtype(mechanism_info[1]); + hoc_register_parm_default(mech_type, &_parameter_defaults); + _nrn_mechanism_register_data_fields(mech_type, + _nrn_mechanism_field{"x"} /* 0 */, + _nrn_mechanism_field{"Dx"} /* 1 */, + _nrn_mechanism_field{"v_unused"} /* 2 */, + _nrn_mechanism_field{"g_unused"} /* 3 */ + ); + + hoc_register_prop_size(mech_type, 4, 0); + hoc_register_var(hoc_scalar_double, hoc_vector_double, hoc_intfunc); + hoc_register_npy_direct(mech_type, npy_direct_func_proc); + } +}