Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ensure compatibility with NESTML custom as well as NEST built-in synaptic plasticity models #997

Open
wants to merge 5 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion pynestml/codegeneration/nest_code_generator.py
Expand Up @@ -91,7 +91,7 @@ class NESTCodeGenerator(CodeGenerator):

Options:

- **neuron_parent_class**: The C++ class from which the generated NESTML neuron class inherits. Examples: ``"ArchivingNode"``, ``"StructuralPlasticityNode"``. Default: ``"ArchivingNode"``.
- **neuron_parent_class**: The C++ class from which the generated NESTML neuron class inherits. Examples: ``"ArchivingNode"``, ``"StructuralPlasticityNode"``. To generate a model that has the smallest memory footprint, use ``"StructuralPlasticityNode"``. To ensure compatibility with the NEST built-in plastic synapses (like the ``stdp_synapse``), choose ``"ArchivingNode"``. Default: ``"ArchivingNode"``.
- **neuron_parent_class_include**: The C++ header filename to include that contains **neuron_parent_class**. Default: ``"archiving_node.h"``.
- **neuron_synapse_pairs**: List of pairs of (neuron, synapse) model names.
- **preserve_expressions**: Set to True, or a list of strings corresponding to individual variable names, to disable internal rewriting of expressions, and return same output as input expression where possible. Only applies to variables specified as first-order differential equations. (This parameter is passed to ODE-toolbox.)
Expand Down
Expand Up @@ -265,7 +265,7 @@ std::vector< std::tuple< int, int > > {{neuronName}}::rport_to_nestml_buffer_idx
{%- if paired_synapse is defined %}
n_incoming_ = __n.n_incoming_;
max_delay_ = __n.max_delay_;
last_spike_ = __n.last_spike_;
last_spike_nestml_ = __n.last_spike_nestml_;

// cache initial values
{%- for var_name in transferred_variables %}
Expand Down Expand Up @@ -373,7 +373,7 @@ void {{neuronName}}::init_state_internal_()
// state variables for archiving state for paired synapse
n_incoming_ = 0;
max_delay_ = 0;
last_spike_ = -1.;
last_spike_nestml_ = -1.;

// cache initial values
{%- for var_name in transferred_variables %}
Expand Down Expand Up @@ -873,20 +873,24 @@ void {{neuronName}}::handle(nest::CurrentEvent& e)
inline double
{{neuronName}}::get_spiketime_ms() const
{
return last_spike_;
return last_spike_nestml_;
}


void
{{neuronName}}::register_stdp_connection( double t_first_read, double delay )
{
{%- if neuron_parent_class in ["ArchivingNode", "Archiving_Node"] %}
{{ neuron_parent_class }}::register_stdp_connection(t_first_read, delay);
{%- endif %}

// Mark all entries in the deque, which we will not read in future as read by
// this input input, so that we safely increment the incoming number of
// connections afterwards without leaving spikes in the history.
// For details see bug #218. MH 08-04-22

for ( std::deque< histentry__{{neuronName}} >::iterator runner = history_.begin();
runner != history_.end() and ( t_first_read - runner->t_ > -1.0 * nest::kernel().connection_manager.get_stdp_eps() );
for ( std::deque< histentry__{{neuronName}} >::iterator runner = history_nestml_.begin();
runner != history_nestml_.end() and ( t_first_read - runner->t_ > -1.0 * nest::kernel().connection_manager.get_stdp_eps() );
++runner )
{
( runner->access_counter_ )++;
Expand All @@ -899,26 +903,26 @@ void


void
{{neuronName}}::get_history__( double t1,
{{neuronName}}::get_history_nestml_( double t1,
double t2,
std::deque< histentry__{{neuronName}} >::iterator* start,
std::deque< histentry__{{neuronName}} >::iterator* finish )
{
*finish = history_.end();
if ( history_.empty() )
*finish = history_nestml_.end();
if ( history_nestml_.empty() )
{
*start = *finish;
return;
}
std::deque< histentry__{{neuronName}} >::reverse_iterator runner = history_.rbegin();
std::deque< histentry__{{neuronName}} >::reverse_iterator runner = history_nestml_.rbegin();
const double t2_lim = t2 + nest::kernel().connection_manager.get_stdp_eps();
const double t1_lim = t1 + nest::kernel().connection_manager.get_stdp_eps();
while ( runner != history_.rend() and runner->t_ >= t2_lim )
while ( runner != history_nestml_.rend() and runner->t_ >= t2_lim )
{
++runner;
}
*finish = runner.base();
while ( runner != history_.rend() and runner->t_ >= t1_lim )
while ( runner != history_nestml_.rend() and runner->t_ >= t1_lim )
{
runner->access_counter_++;
++runner;
Expand Down Expand Up @@ -946,28 +950,28 @@ void
// STDP synapses, and
// - there is another, later spike, that is strictly more than
// (min_global_delay + max_delay_ + eps) away from the new spike (at t_sp_ms)
while ( history_.size() > 1 )
while ( history_nestml_.size() > 1 )
{
const double next_t_sp = history_[ 1 ].t_;
if ( history_.front().access_counter_ >= n_incoming_ * num_transferred_variables
const double next_t_sp = history_nestml_[ 1 ].t_;
if ( history_nestml_.front().access_counter_ >= n_incoming_ * num_transferred_variables
and t_sp_ms - next_t_sp > max_delay_ + nest::Time::delay_steps_to_ms(nest::kernel().connection_manager.get_min_delay()) + nest::kernel().connection_manager.get_stdp_eps() )
{
history_.pop_front();
history_nestml_.pop_front();
}
else
{
break;
}
}

if (history_.size() > 0) {
assert(history_.back().t_ == last_spike_);
if (history_nestml_.size() > 0) {
assert(history_nestml_.back().t_ == last_spike_nestml_);

{%- for var in purely_numeric_state_variables_moved|sort %}
{{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_.back().{{var}}_;
{{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_nestml_.back().{{var}}_;
{%- endfor %}
{%- for var in analytic_state_variables_moved|sort %}
{{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_.back().{{var}}_;
{{ printer.print(utils.get_state_variable_by_name(astnode, var)) }} = history_nestml_.back().{{var}}_;
{%- endfor %}
}
else {
Expand All @@ -981,11 +985,11 @@ void


/**
* update state variables transferred from synapse from `last_spike_` to `t_sp_ms`
* update state variables transferred from synapse from `last_spike_nestml_` to `t_sp_ms`
**/

const double old___h = V_.__h;
V_.__h = t_sp_ms - last_spike_;
V_.__h = t_sp_ms - last_spike_nestml_;
if (V_.__h > 1E-12) {
recompute_internal_variables(true);
{#
Expand Down Expand Up @@ -1041,8 +1045,8 @@ S_.ode_state[State_::{{variable_name}}] = ode_state_bak[State_::{{variable_name}
{{ printer.print(utils.get_variable_by_name(astnode, spike_update.get_variable().get_complete_name())) }} += 1.;
{%- endfor %}

last_spike_ = t_sp_ms;
history_.push_back( histentry__{{neuronName}}( last_spike_
last_spike_nestml_ = t_sp_ms;
history_nestml_.push_back( histentry__{{neuronName}}( last_spike_nestml_
{%- for var in purely_numeric_state_variables_moved|sort %}
, get_{{var}}()
{%- endfor %}
Expand All @@ -1054,16 +1058,20 @@ S_.ode_state[State_::{{variable_name}}] = ode_state_bak[State_::{{variable_name}
}
else
{
last_spike_ = t_sp_ms;
last_spike_nestml_ = t_sp_ms;
}
}


void
{{neuronName}}::clear_history()
{
last_spike_ = -1.0;
history_.clear();
{%- if neuron_parent_class in ["ArchivingNode", "Archiving_Node"] %}
{{ neuron_parent_class }}::clear_history();
{%- endif %}

last_spike_nestml_ = -1.0;
history_nestml_.clear();
}


Expand All @@ -1086,7 +1094,7 @@ double
#endif

// case when the neuron has not yet spiked
if ( history_.empty() )
if ( history_nestml_.empty() )
{
#ifdef DEBUG
std::cout << "{{neuronName}}::get_{{var}}: \thistory empty, returning initial value = " << {{var}}__iv << std::endl;
Expand All @@ -1096,34 +1104,34 @@ double
}

// search for the latest post spike in the history buffer that came strictly before `t`
int i = history_.size() - 1;
int i = history_nestml_.size() - 1;
double eps = 0.;
if ( before_increment ) {
eps = nest::kernel().connection_manager.get_stdp_eps();
}
while ( i >= 0 )
{
if ( t - history_[ i ].t_ >= eps )
if ( t - history_nestml_[ i ].t_ >= eps )
{
#ifdef DEBUG
std::cout<<"{{neuronName}}::get_{{var}}: \tspike occurred at history[i].t_ = " << history_[i].t_ << std::endl;
std::cout<<"{{neuronName}}::get_{{var}}: \tspike occurred at history[i].t_ = " << history_nestml_[i].t_ << std::endl;
#endif

{%- for var_ in purely_numeric_state_variables_moved %}
{{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_[ i ].{{var_}}_;
{{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_nestml_[ i ].{{var_}}_;
{%- endfor %}
{%- for var_ in analytic_state_variables_moved %}
{{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_[ i ].{{var_}}_;
{{ printer.print(utils.get_variable_by_name(astnode, var_)) }} = history_nestml_[ i ].{{var_}}_;
{%- endfor %}

/**
* update state variables transferred from synapse from `history[i].t_` to `t`
**/

if ( t - history_[ i ].t_ >= nest::kernel().connection_manager.get_stdp_eps() )
if ( t - history_nestml_[ i ].t_ >= nest::kernel().connection_manager.get_stdp_eps() )
{
const double old___h = V_.__h;
V_.__h = t - history_[i].t_;
V_.__h = t - history_nestml_[i].t_;
assert(V_.__h > 0);
recompute_internal_variables(true);
{#
Expand Down Expand Up @@ -1171,13 +1179,13 @@ S_.ode_state[State_::{{variable_name}}] = ode_state_tmp[State_::{{variable_name}
}

// this case occurs when the trace was requested at a time precisely at that of the first spike in the history
if ( (!before_increment) and t == history_[ 0 ].t_)
if ( (!before_increment) and t == history_nestml_[ 0 ].t_)
{
{%- for var_ in purely_numeric_state_variables_moved %}
{{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_[ 0 ].{{var_}}_;
{{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_nestml_[ 0 ].{{var_}}_;
{%- endfor %}
{%- for var_ in analytic_state_variables_moved %}
{{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_[ 0 ].{{var_}}_;
{{ printer.print(utils.get_state_variable_by_name(astnode, var_)) }} = history_nestml_[ 0 ].{{var_}}_;
{%- endfor %}

#ifdef DEBUG
Expand Down
Expand Up @@ -299,14 +299,9 @@ public:
// support for spike archiving

/**
* \fn void get_history(long t1, long t2,
* std::deque<Archiver::histentry__>::iterator* start,
* std::deque<Archiver::histentry__>::iterator* finish)
* return the spike times (in steps) of spikes which occurred in the range
* (t1,t2].
* XXX: two underscores to differentiate it from nest::Node::get_history()
* Return the spike times (in steps) of spikes which occurred in the range (t1,t2].
*/
void get_history__( double t1,
void get_history_nestml_( double t1,
double t2,
std::deque< histentry__{{neuronName}} >::iterator* start,
std::deque< histentry__{{neuronName}} >::iterator* finish );
Expand Down Expand Up @@ -413,10 +408,10 @@ private:

double max_delay_;

double last_spike_;
double last_spike_nestml_;

// spiking history needed by stdp synapses
std::deque< histentry__{{neuronName}} > history_;
std::deque< histentry__{{neuronName}} > history_nestml_;

// cache for initial values
{%- for var in transferred_variables %}
Expand Down
Expand Up @@ -713,7 +713,7 @@ public:
// history[0, ..., t_last_spike - dendritic_delay] have been
// incremented by Archiving_Node::register_stdp_connection(). See bug #218 for
// details.
__target->get_history__( t_lastspike_ - __dendritic_delay,
__target->get_history_nestml_( t_lastspike_ - __dendritic_delay,
__t_spike - __dendritic_delay,
&start,
&finish );
Expand Down Expand Up @@ -1262,7 +1262,7 @@ inline void
// get spike history in relevant range (t_last_update, t_trig] from postsyn. neuron
std::deque< histentry__{{paired_neuron}} >::iterator start;
std::deque< histentry__{{paired_neuron}} >::iterator finish;
static_cast<{{paired_neuron}}*>(get_target(t))->get_history__( t_last_update_ - dendritic_delay, t_trig - dendritic_delay, &start, &finish );
static_cast<{{paired_neuron}}*>(get_target(t))->get_history_nestml_( t_last_update_ - dendritic_delay, t_trig - dendritic_delay, &start, &finish );

// facilitation due to postsyn. spikes since last update
double t0 = t_last_update_;
Expand Down
@@ -0,0 +1,101 @@
neuron iaf_psc_exp_nonlineardendrite:

state:
V_m mV = 0mV # membrane potential in mV
z pA = 0pA # dAP trace
active_dendrite boolean = false
dAP_counts integer = 0
ref_counts integer = 0

equations:

# exponential shaped postsynaptic current kernel
kernel I_kernel1 = exp(-1/tau_syn1*t)

# alpha shaped postsynaptic current kernel
kernel I_kernel2 = (e/tau_syn2) * t * exp(-t/tau_syn2)

# exponential shaped postsynaptic current kernel
kernel I_kernel3 = exp(-1/tau_syn3*t)

# exponential shaped postsynaptic current kernel
kernel I_kernel4 = exp(-1/tau_syn4*t)

# diff. eq. for membrane potential
recordable inline I_dend pA = convolve(I_kernel2, I_2) * pA
inline I_syn pA = convolve(I_kernel1, I_1) * pA + I_dend - convolve(I_kernel3, I_3) * pA + convolve(I_kernel4, I_4) * pA + I_e
V_m' = -(V_m-E_L)/tau_m + I_syn/C_m

# diff. eq. for dAP trace
z' = -z/tau_h

parameters:
C_m pF = 250 pF # capacity of the membrane
tau_m ms = 20 ms # membrane time constant.
tau_syn1 ms = 10 ms # time constant of synaptic current, port 1
tau_syn2 ms = 10 ms # time constant of synaptic current, port 2
tau_syn3 ms = 10 ms # time constant of synaptic current, port 3
tau_syn4 ms = 10 ms # time constant of synaptic current, port 4
tau_h ms = 400 ms # time constant of the dAP trace
V_th mV = 25 mV # spike threshold
V_reset mV = 0 mV # reset voltage
I_e pA = 0pA # external current.
E_L mV = 0mV # resting potential.

# dendritic action potential
theta_dAP pA = 60pA # current threshold for a dendritic action potential
I_p pA = 250pA # current clamp value for I_dAP during a dendritic action potential
tau_dAP ms = 60ms # time window over which the dendritic current clamp is active

# refractory parameters
t_ref ms = 10ms # refractory period

internals:
dAP_timeout_ticks integer = steps(tau_dAP)
ref_timeout_ticks integer = steps(t_ref)

input:
I_1 <- spike
I_2 <- spike
I_3 <- spike
I_4 <- spike

output:
spike

update:
# solve ODEs
integrate_odes()

# current-threshold, emit a dendritic action potential
if I_dend > theta_dAP or active_dendrite:
if dAP_counts == 0:

if active_dendrite == false:
z += 1pA
active_dendrite = true
I_dend = I_p
dAP_counts = dAP_timeout_ticks
else:
I_dend = 0pA
active_dendrite = false

else:
dAP_counts -= 1
I_dend = I_p

# threshold crossing and refractoriness
if ref_counts == 0:
if V_m > V_th:
emit_spike()
ref_counts = ref_timeout_ticks
V_m = V_reset
dAP_counts = 0
I_dend = 0pA
active_dendrite = false
else:
ref_counts -= 1
V_m = V_reset
active_dendrite = false
dAP_counts = 0
I_dend = 0pA