From b6dab81e618c730dba67f9e5a354b1146deb0999 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Thu, 31 Oct 2019 14:35:40 +0000 Subject: [PATCH 001/198] resolving weight scaling issues for purkinje cells --- .../connectors/from_list_connector.py | 6 +++--- .../neuron/input_types/input_type_conductance.py | 2 +- .../neuron/synapse_io/synapse_io_row_based.py | 3 ++- spynnaker/pyNN/models/neuron/synaptic_manager.py | 16 ++++++++++++---- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index cc6c9fa53b..c80bb0b63d 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -186,7 +186,7 @@ def get_n_connections_to_post_vertex_maximum(self): @overrides(AbstractConnector.get_weight_mean) def get_weight_mean(self, weights): if self.__weights is None: - return numpy.mean(weights) + return super(FromListConnector, self).get_weight_mean(weights) else: return numpy.mean(numpy.abs(self.__weights)) @@ -194,7 +194,7 @@ def get_weight_mean(self, weights): def get_weight_maximum(self, weights): # pylint: disable=too-many-arguments if self.__weights is None: - return numpy.amax(weights) + return self._get_weight_maximum(weights, len(self.__conn_list)) else: return numpy.amax(numpy.abs(self.__weights)) @@ -202,7 +202,7 @@ def get_weight_maximum(self, weights): def get_weight_variance(self, weights): # pylint: disable=too-many-arguments if self.__weights is None: - return numpy.var(weights) + return super(FromListConnector, self).get_weight_variance(weights) else: return numpy.var(numpy.abs(self.__weights)) diff --git a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py index a60bfdeaf6..4b1723028b 100644 --- a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py +++ b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py @@ -76,7 +76,7 @@ def update_values(self, values, parameters, state_variables): @overrides(AbstractInputType.get_global_weight_scale) def get_global_weight_scale(self): - return 1024.0 + return float(2**5) @property def e_rev_E(self): diff --git a/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py b/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py index 6967334f77..55aef75a39 100644 --- a/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py +++ b/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py @@ -18,6 +18,7 @@ from six import raise_from from spinn_utilities.overrides import overrides from spinn_front_end_common.utilities.constants import BYTES_PER_WORD +from spynnaker.pyNN.utilities.constants import MAX_SUPPORTED_DELAY_TICS from spynnaker.pyNN.models.neural_projections.connectors import ( AbstractConnector) from spynnaker.pyNN.exceptions import SynapseRowTooBigException @@ -42,7 +43,7 @@ class SynapseIORowBased(AbstractSynapseIO): @overrides(AbstractSynapseIO.get_maximum_delay_supported_in_ms) def get_maximum_delay_supported_in_ms(self, machine_time_step): # There are 16 slots, one per time step - return 16 * (machine_time_step / 1000.0) + return MAX_SUPPORTED_DELAY_TICS * (machine_time_step / 1000.0) @staticmethod def _n_words(n_bytes): diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index d8409637fd..d0b3fb9e37 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -446,6 +446,7 @@ def _get_ring_buffer_to_input_left_shifts( weights_signed = False rate_stats = [RunningStats() for _ in range(n_synapse_types)] steps_per_second = 1000000.0 / machine_timestep + min_max_weight = numpy.ones(n_synapse_types) * 2 ** 32 for app_edge in application_graph.get_edges_ending_at_vertex( application_vertex): @@ -472,6 +473,8 @@ def _get_ring_buffer_to_input_left_shifts( weight_max = (synapse_dynamics.get_weight_maximum( connector, synapse_info.weight) * weight_scale) + min_max_weight[synapse_type] = \ + min(min_max_weight[synapse_type], weight_max) biggest_weight[synapse_type] = max( biggest_weight[synapse_type], weight_max) @@ -519,23 +522,29 @@ def _get_ring_buffer_to_input_left_shifts( total_weights[synapse_type]) max_weights[synapse_type] = max( max_weights[synapse_type], biggest_weight[synapse_type]) + # This is to deal with very small weights that are floored to 0 + mmw = 2**math.floor(math.log(min_max_weight[synapse_type], 2)) + print("max_weights[", synapse_type, "]", max_weights[synapse_type], + "mmw", mmw) + max_weights[synapse_type] = min(mmw * 2 ** 15, + max_weights[synapse_type]) + # Convert these to powers max_weight_powers = ( - 0 if w <= 0 else int(math.ceil(max(0, math.log(w, 2)))) + 0 if w <= 1 else int(math.ceil(max(0, math.log(w, 2)))) for w in max_weights) # If 2^max_weight_power equals the max weight, we have to add another # power, as range is 0 - (just under 2^max_weight_power)! max_weight_powers = ( - w + 1 if (2 ** w) <= a else w + w + 1 if (2 ** w) < a else w for w, a in zip(max_weight_powers, max_weights)) # If we have synapse dynamics that uses signed weights, # Add another bit of shift to prevent overflows if weights_signed: max_weight_powers = (m + 1 for m in max_weight_powers) - return list(max_weight_powers) @staticmethod @@ -565,7 +574,6 @@ def _write_padding( next_block_allowed_address = self.__poptable_type\ .get_next_allowed_address(next_block_start_address) if next_block_allowed_address != next_block_start_address: - # Pad out data file with the added alignment bytes: spec.comment("\nWriting population table required padding\n") spec.switch_write_focus(synaptic_matrix_region) From a73821e03285aab7397b8c4a7390ec5fee8d4ac2 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Thu, 31 Oct 2019 14:36:17 +0000 Subject: [PATCH 002/198] hardcoded right shift of 5 instead of 10 --- .../src/neuron/input_types/input_type_conductance.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/input_types/input_type_conductance.h b/neural_modelling/src/neuron/input_types/input_type_conductance.h index 4d3577d6de..59f9d16afb 100644 --- a/neural_modelling/src/neuron/input_types/input_type_conductance.h +++ b/neural_modelling/src/neuron/input_types/input_type_conductance.h @@ -44,7 +44,7 @@ static inline input_t* input_type_get_input_value( input_t* value, input_type_pointer_t input_type, uint16_t num_receptors) { use(input_type); for (int i = 0; i < num_receptors; i++) { - value[i] = value[i] >> 10; + value[i] = value[i] >> 5; } return &value[0]; } From 7f130258d14c97e8202b22204669725f8a336276 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Wed, 6 Nov 2019 13:02:51 +0000 Subject: [PATCH 003/198] I should probably report weight scaling in a nicer way anyway --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index d0b3fb9e37..9d281835c1 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -524,8 +524,6 @@ def _get_ring_buffer_to_input_left_shifts( max_weights[synapse_type], biggest_weight[synapse_type]) # This is to deal with very small weights that are floored to 0 mmw = 2**math.floor(math.log(min_max_weight[synapse_type], 2)) - print("max_weights[", synapse_type, "]", max_weights[synapse_type], - "mmw", mmw) max_weights[synapse_type] = min(mmw * 2 ** 15, max_weights[synapse_type]) From 91930158ec066ee6e9a64fbe71c6bcff0f7eb328 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Tue, 26 Nov 2019 10:47:19 +0000 Subject: [PATCH 004/198] syn_info.weight -> weights, better call for weight variance --- .../models/neural_projections/connectors/from_list_connector.py | 2 +- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index 0b59b9c933..ed8a88f65f 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -196,7 +196,7 @@ def get_weight_mean(self, weights): def get_weight_maximum(self, synapse_info): # pylint: disable=too-many-arguments if self.__weights is None: - return numpy.amax(synapse_info.weights) + return self._get_weight_maximum(self.__weights, len(self.__conn_list)) else: return numpy.amax(numpy.abs(self.__weights)) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index fec3e80edc..6fb9438056 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -473,7 +473,7 @@ def _get_ring_buffer_to_input_left_shifts( 0.0, delay_variance, n_connections) weight_max = (synapse_dynamics.get_weight_maximum( - connector, synapse_info.weight) * weight_scale) + connector, synapse_info.weights) * weight_scale) min_max_weight[synapse_type] = \ min(min_max_weight[synapse_type], weight_max) From 15e5a0c929a697c323ea75abbb193ae594b90a27 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Tue, 26 Nov 2019 16:24:43 +0000 Subject: [PATCH 005/198] printing ring buffer left shifts --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 6fb9438056..fdea10eb95 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -529,7 +529,6 @@ def _get_ring_buffer_to_input_left_shifts( max_weights[synapse_type] = min(mmw * 2 ** 15, max_weights[synapse_type]) - # Convert these to powers max_weight_powers = ( 0 if w <= 1 else int(math.ceil(max(0, math.log(w, 2)))) @@ -545,6 +544,10 @@ def _get_ring_buffer_to_input_left_shifts( # Add another bit of shift to prevent overflows if weights_signed: max_weight_powers = (m + 1 for m in max_weight_powers) + print("=" * 60) + print("RB left shifts for {:20}".format(application_vertex.label), + "=", list(max_weight_powers)) + print("-" * 60) return list(max_weight_powers) @staticmethod From cc11b09a972534b5254bdcc0748a8b2253348893 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Tue, 26 Nov 2019 16:29:05 +0000 Subject: [PATCH 006/198] correctly interrogating a GENERATOR --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index fdea10eb95..a3764a0001 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -544,11 +544,12 @@ def _get_ring_buffer_to_input_left_shifts( # Add another bit of shift to prevent overflows if weights_signed: max_weight_powers = (m + 1 for m in max_weight_powers) + rb_ls = list(max_weight_powers) print("=" * 60) print("RB left shifts for {:20}".format(application_vertex.label), - "=", list(max_weight_powers)) + "=", rb_ls) print("-" * 60) - return list(max_weight_powers) + return rb_ls @staticmethod def _get_weight_scale(ring_buffer_to_input_left_shift): From a0b095ad3a5a327026dc62684f0edaef6ac8e656 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Thu, 28 Nov 2019 11:38:54 +0000 Subject: [PATCH 007/198] MAX SPIKES PER TICK PROVENANCE --- neural_modelling/src/neuron/c_main.c | 55 +++++++++++++++++++ .../src/neuron/spike_processing.c | 44 +++++++++++++++ .../src/neuron/spike_processing.h | 16 ++++++ .../neuron/population_machine_vertex.py | 52 +++++++++++++++++- 4 files changed, 166 insertions(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/c_main.c b/neural_modelling/src/neuron/c_main.c index 5263ea535a..accad1b236 100644 --- a/neural_modelling/src/neuron/c_main.c +++ b/neural_modelling/src/neuron/c_main.c @@ -41,6 +41,7 @@ #include "plasticity/synapse_dynamics.h" #include "structural_plasticity/synaptogenesis_dynamics.h" #include "profile_tags.h" +#include "spike_profiling.h" #include #include @@ -61,6 +62,11 @@ struct neuron_provenance { uint32_t n_input_buffer_overflows; uint32_t current_timer_tick; uint32_t n_plastic_synaptic_weight_saturations; + uint32_t max_spikes_in_a_tick; + uint32_t max_dmas_in_a_tick; + uint32_t max_pipeline_restarts; + uint32_t timer_callback_completed; + uint32_t spike_pipeline_deactivated; }; //! values for the priority for each callback @@ -73,6 +79,24 @@ typedef enum callback_priorities { // Globals +// Counters to assess maximum spikes per timer tick +uint32_t max_spikes_in_a_tick = 0; +uint32_t max_dmas_in_a_tick = 0; +uint32_t max_pipeline_restarts = 0; + +uint32_t timer_callback_completed = 20000000; +uint32_t temp_timer_callback_completed = 0; +uint32_t spike_pipeline_deactivated = 0; + +uint32_t last_spikes = 0; +uint32_t last_restarts = 0; +uint32_t deactivation_time = 0; + +struct spike_holder_t spike_counter; +struct spike_holder_t spike_cache; +struct spike_holder_t spike_counter_inh; +struct spike_holder_t spike_cache_inh; + //! the current timer tick value //! the timer tick callback returning the same value. uint32_t time; @@ -123,6 +147,11 @@ void c_main_store_provenance_data(address_t provenance_region) { prov->current_timer_tick = time; prov->n_plastic_synaptic_weight_saturations = synapse_dynamics_get_plastic_saturation_count(); + prov->max_spikes_in_a_tick = max_spikes_in_a_tick; + prov->max_dmas_in_a_tick = max_dmas_in_a_tick; + prov->max_pipeline_restarts = max_pipeline_restarts; + prov->timer_callback_completed = timer_callback_completed; + prov->spike_pipeline_deactivated = spike_pipeline_deactivated; log_debug("finished other provenance data"); } @@ -230,6 +259,11 @@ static bool initialise(void) { void resume_callback(void) { recording_reset(); + // reset high water mark for spike counter + max_spikes_in_a_tick = 0; + max_dmas_in_a_tick = 0; + max_pipeline_restarts = 0; + // try reloading neuron parameters data_specification_metadata_t *ds_regions = data_specification_get_data_address(); @@ -248,6 +282,27 @@ void resume_callback(void) { void timer_callback(uint timer_count, uint unused) { use(unused); + // Get number of spikes in last tick, and reset spike counter + last_spikes = spike_processing_get_and_reset_spikes_this_tick(); + uint32_t last_dmas = spike_processing_get_and_reset_dmas_this_tick(); + last_restarts = + spike_processing_get_and_reset_pipeline_restarts_this_tick(); + deactivation_time = spike_processing_get_pipeline_deactivation_time(); + + // cache and flush spike counters + spike_profiling_cache_and_flush_spike_holder(&spike_counter, + &spike_cache); + spike_profiling_cache_and_flush_spike_holder(&spike_counter_inh, + &spike_cache_inh); + + if (last_spikes > max_spikes_in_a_tick){ + max_spikes_in_a_tick = last_spikes; + max_dmas_in_a_tick = last_dmas; + max_pipeline_restarts = last_restarts; + timer_callback_completed = temp_timer_callback_completed; + spike_pipeline_deactivated = deactivation_time; + } + profiler_write_entry_disable_irq_fiq(PROFILER_ENTER | PROFILER_TIMER); time++; diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 8181cf53ef..0ba9de6664 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -80,6 +80,13 @@ static uint32_t dma_n_rewires; static uint32_t dma_n_spikes; +// counter for number of spikes between timer events +uint32_t spikes_this_tick = 0; +uint32_t dmas_this_tick = 0; +uint32_t pipeline_restarts_this_tick = 0; +uint32_t spike_pipeline_deactivation_time = 0; + + /* PRIVATE FUNCTIONS - static for inlining */ static inline void do_dma_read( @@ -234,6 +241,9 @@ static void multicast_packet_received_callback(uint key, uint payload) { use(payload); log_debug("Received spike %x at %d, DMA Busy = %d", key, time, dma_busy); + // Increment the count of number of spikes received this tick by this core + spikes_this_tick++; + // If there was space to add spike to incoming spike queue if (in_spikes_add_spike(key)) { // If we're not already processing synaptic DMAs, @@ -258,6 +268,10 @@ static void dma_complete_callback(uint unused, uint tag) { log_debug("DMA transfer complete at time %u with tag %u", time, tag); + // Increment the counter tracking the number of DMAs completed this + // timestep on a particular core + dmas_this_tick++; + // Get pointer to current buffer uint32_t current_buffer_index = buffer_being_read; dma_buffer *current_buffer = &dma_buffers[current_buffer_index]; @@ -325,6 +339,9 @@ void user_event_callback(uint unused0, uint unused1) { dma_n_rewires = 0; dma_n_spikes = 0; + // Increment counter for spike processing pipeline restarts + pipeline_restarts_this_tick++; + if (buffer_being_read < N_DMA_BUFFERS) { // If the DMA buffer is full of valid data, attempt to reuse it on the // next data to be used, as this might be able to make use of the buffer @@ -412,3 +429,30 @@ bool spike_processing_do_rewiring(int number_of_rewires) { spin1_mode_restore(cpsr); return true; } + +uint32_t spike_processing_get_and_reset_spikes_this_tick(){ + + uint32_t spikes_to_return = spikes_this_tick; + spikes_this_tick = 0; + + return spikes_to_return; +} + +uint32_t spike_processing_get_and_reset_dmas_this_tick(){ + + uint32_t dmas_to_return = dmas_this_tick; + dmas_this_tick = 0; + + return dmas_to_return; +} + +uint32_t spike_processing_get_and_reset_pipeline_restarts_this_tick(){ + uint32_t pipeline_restarts_to_return = pipeline_restarts_this_tick; + pipeline_restarts_this_tick = 0; + + return pipeline_restarts_to_return; +} + +uint32_t spike_processing_get_pipeline_deactivation_time(){ + return spike_pipeline_deactivation_time; +} \ No newline at end of file diff --git a/neural_modelling/src/neuron/spike_processing.h b/neural_modelling/src/neuron/spike_processing.h index 23f5f26918..5d63bfb8f6 100644 --- a/neural_modelling/src/neuron/spike_processing.h +++ b/neural_modelling/src/neuron/spike_processing.h @@ -34,4 +34,20 @@ uint32_t spike_processing_get_buffer_overflows(void); //! \return bool: currently, always true bool spike_processing_do_rewiring(int number_of_rew); +//! \brief get number of spikes received since last timer event +//! \return uint32_t number of spikes +uint32_t spike_processing_get_and_reset_spikes_this_tick(); + +//! \brief get number of dmas completed since last timer event +//! \return uint32_t number of DMAs +uint32_t spike_processing_get_and_reset_dmas_this_tick(); + +//! \brief get number of time pipeline was restarted since last timer event +//! \return uint32_t number of pipeline restarts +uint32_t spike_processing_get_and_reset_pipeline_restarts_this_tick(); + +//! \brief get time from T1 clock at which spike pipeline completed +//! \return uint32_t pipeline deactivation time +uint32_t spike_processing_get_pipeline_deactivation_time(); + #endif // _SPIKE_PROCESSING_H_ diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index 2372339cd5..4b5350125b 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -46,6 +46,11 @@ class EXTRA_PROVENANCE_DATA_ENTRIES(Enum): BUFFER_OVERFLOW_COUNT = 2 CURRENT_TIMER_TIC = 3 PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT = 4 + MAX_SPIKES_IN_A_TICK = 5 + MAX_DMAS_IN_A_TICK = 6 + MAX_PIPELINE_RESTARTS = 7 + TIMER_CALLBACK_COMPLETED = 8 + SPIKES_PIPELINE_ACTIVATED = 9 PROFILE_TAG_LABELS = { 0: "TIMER", @@ -108,7 +113,22 @@ def get_provenance_data_from_machine(self, transceiver, placement): self.EXTRA_PROVENANCE_DATA_ENTRIES.CURRENT_TIMER_TIC.value] n_plastic_saturations = provenance_data[ self.EXTRA_PROVENANCE_DATA_ENTRIES. - PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT.value] + PLASTIC_SYNAPTIC_WEIGHT_SATURATION_COUNT.value] + max_spikes_in_a_tick = provenance_data[ + self.EXTRA_PROVENANCE_DATA_ENTRIES. + MAX_SPIKES_IN_A_TICK.value] + max_dmas_in_a_tick = provenance_data[ + self.EXTRA_PROVENANCE_DATA_ENTRIES. + MAX_DMAS_IN_A_TICK.value] + max_pipeline_restarts = provenance_data[ + self.EXTRA_PROVENANCE_DATA_ENTRIES. + MAX_PIPELINE_RESTARTS.value] + timer_callback_completed = provenance_data[ + self.EXTRA_PROVENANCE_DATA_ENTRIES. + TIMER_CALLBACK_COMPLETED.value] + spike_pipeline_deactivated = provenance_data[ + self.EXTRA_PROVENANCE_DATA_ENTRIES. + SPIKES_PIPELINE_ACTIVATED.value] label, x, y, p, names = self._get_placement_details(placement) @@ -151,6 +171,36 @@ def get_provenance_data_from_machine(self, transceiver, placement): "spikes_per_second and / or ring_buffer_sigma values located " "within the .spynnaker.cfg file.".format( label, x, y, p, n_plastic_saturations)))) + provenance_items.append(ProvenanceDataItem( + self._add_name(names, + "MAX_SPIKES_IN_A_TICK"), + max_spikes_in_a_tick, + report=max_spikes_in_a_tick > 20, + message=( + "Max number of spikes for {} on {}, {}, {} " + "was {}. Empirically, we can deal with ~20 for real time " + "performance using a 0.1 ms timestep.".format( + label, x, y, p, max_spikes_in_a_tick)))) + provenance_items.append(ProvenanceDataItem( + self._add_name(names, + "MAX_DMAS_IN_A_TICK"), + max_dmas_in_a_tick)) + provenance_items.append(ProvenanceDataItem( + self._add_name(names, + "MAX_PIPELINE_RESTARTS"), + max_pipeline_restarts)) + provenance_items.append(ProvenanceDataItem( + self._add_name(names, + "MAX_PIPELINE_RESTARTS"), + max_pipeline_restarts)) + provenance_items.append(ProvenanceDataItem( + self._add_name(names, + "TIMER_CALLBACK_COMPLETED"), + timer_callback_completed)) + provenance_items.append(ProvenanceDataItem( + self._add_name(names, + "SPIKES_PIPELINE_ACTIVATED"), + spike_pipeline_deactivated)) return provenance_items From 76313834c9d251a26c4f0b518bfd165ee8c42558 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Thu, 28 Nov 2019 13:28:51 +0000 Subject: [PATCH 008/198] [PROVENANCE] forgot to add spike profiling utility --- neural_modelling/src/neuron/spike_profiling.h | 79 +++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 neural_modelling/src/neuron/spike_profiling.h diff --git a/neural_modelling/src/neuron/spike_profiling.h b/neural_modelling/src/neuron/spike_profiling.h new file mode 100644 index 0000000000..7b21687caf --- /dev/null +++ b/neural_modelling/src/neuron/spike_profiling.h @@ -0,0 +1,79 @@ +#include + +typedef struct spike_holder_t { + uint8_t spikes_a; + uint8_t spikes_b; + uint8_t spikes_c; + uint8_t spikes_d; +} spike_holder_t; + +static inline void spike_profiling_cache_and_flush_spike_holder( + struct spike_holder_t* counter_spikes, + struct spike_holder_t* cache_levels) { + + cache_levels->spikes_a = counter_spikes->spikes_a; + cache_levels->spikes_b = counter_spikes->spikes_b; + cache_levels->spikes_c = counter_spikes->spikes_c; + cache_levels->spikes_d = counter_spikes->spikes_d; + + // zero counters + counter_spikes->spikes_a = 0; + counter_spikes->spikes_b = 0; + counter_spikes->spikes_c = 0; + counter_spikes->spikes_d = 0; +} + +static inline void spike_profiling_add_count(uint32_t row_length, + struct spike_holder_t* spike_counter) { + + uint32_t a = 0; + uint32_t b = 1; + uint32_t c = 5; + + if (row_length <= a) { + spike_counter->spikes_a++; + } else if (row_length > a && row_length <= b) { + spike_counter->spikes_b++; + } else if (row_length > b && row_length <= c) { + spike_counter->spikes_c++; + } else if (row_length > c) { + spike_counter->spikes_d++; + } +} + +static inline int32_t spike_profiling_get_spike_holder_as_int( + struct spike_holder_t spikes) { + + union { + int32_t inty; + struct spike_holder_t sh; + } x; + + x.sh = spikes; + + return x.inty; +} + +static inline accum spike_profiling_get_spike_holder_as_accum( + struct spike_holder_t spikes) { + union { + accum acc; + struct spike_holder_t sh; + } x; + x.sh = spikes; + + return x.acc; +} + +static inline void spike_profiling_print_spikes_from_spike_holder( + struct spike_holder_t spikes_orig) { + io_printf(IO_BUF, "Spikes from input: a %u, b %u, c %u, d %u \n", + spikes_orig.spikes_a, spikes_orig.spikes_b, spikes_orig.spikes_c, + spikes_orig.spikes_d); +} + +static inline void spike_profiling_print_spikes_from_int(int32_t output) { + io_printf(IO_BUF, "Spikes from output: a %d, b %d, c %d, d %d \n", + (output & 0xFF), (output >> 8 & 0xFF), (output >> 16 & 0xFF), + (output >> 24 & 0xFF)); +} From db6084f4e57f5f041f78d620e57f27f63b91548a Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Thu, 28 Nov 2019 14:21:17 +0000 Subject: [PATCH 009/198] passing syn_info instead of weights. That used to work simply because I tested it with FromListConnectors --- .../neural_projections/connectors/from_list_connector.py | 3 ++- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index ed8a88f65f..8dbe2caf5e 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -196,7 +196,8 @@ def get_weight_mean(self, weights): def get_weight_maximum(self, synapse_info): # pylint: disable=too-many-arguments if self.__weights is None: - return self._get_weight_maximum(self.__weights, len(self.__conn_list)) + return self._get_weight_maximum(synapse_info.weights, + len(self.__conn_list)) else: return numpy.amax(numpy.abs(self.__weights)) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index a3764a0001..1124384501 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -473,7 +473,7 @@ def _get_ring_buffer_to_input_left_shifts( 0.0, delay_variance, n_connections) weight_max = (synapse_dynamics.get_weight_maximum( - connector, synapse_info.weights) * weight_scale) + connector, synapse_info) * weight_scale) min_max_weight[synapse_type] = \ min(min_max_weight[synapse_type], weight_max) From 53cc09b01a01a4823e68e25d112154ad5e60347d Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Mon, 9 Dec 2019 13:47:33 +0000 Subject: [PATCH 010/198] I think this is enough to enable 64 delay slots per neuron --- spynnaker/pyNN/utilities/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index fd7ba6ce66..54a92d655a 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -56,7 +56,7 @@ SCALE = WEIGHT_FLOAT_TO_FIXED_SCALE * NA_TO_PA_SCALE # natively supported delays for all abstract_models -MAX_SUPPORTED_DELAY_TICS = 16 +MAX_SUPPORTED_DELAY_TICS = 64 MAX_DELAY_BLOCKS = 8 MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 From 00fe5bfca5146cda72cbef9d7f6e1333b38436ed Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Wed, 11 Dec 2019 12:42:04 +0000 Subject: [PATCH 011/198] max delay in matrix generator --- .../matrix_generators/matrix_generator_common.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_common.h b/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_common.h index bcb1efae73..bee74729d1 100644 --- a/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_common.h +++ b/neural_modelling/src/synapse_expander/matrix_generators/matrix_generator_common.h @@ -28,7 +28,7 @@ /** *! \brief The maximum delay value that can be represented on core */ -#define MAX_DELAY 16 +#define MAX_DELAY 64 /** *! \brief A converted final delay value and delay stage From f194f17c702d1980141970b97a6966e1d4f6575d Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Tue, 28 Jan 2020 10:55:29 +0000 Subject: [PATCH 012/198] viridis based visualiser for a pushbot --- .../push_bot/push_bot_parameters/push_bot_retina_viewer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py b/spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py index a1a7b364be..9f0a1d883f 100644 --- a/spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py +++ b/spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py @@ -118,7 +118,7 @@ def run(self): # Create image plot of retina output fig = self.__pyplot.figure() self.__image = self.__pyplot.imshow( - self.__image_data_view, cmap="jet", vmin=0.0, + self.__image_data_view, cmap="viridis", vmin=0.0, vmax=self.__display_max) # Play animation From cc809b0ad7535ec7f617ed18ee2613d70e9ef658 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Fri, 7 Feb 2020 09:31:22 +0200 Subject: [PATCH 013/198] initial implementation of if_cond_alpha. Will probably go unused --- .../makefiles/neuron/IF_cond_alpha/Makefile | 26 ++++++++++++++ neural_modelling/makefiles/neuron/Makefile | 3 +- .../models/neuron/builds/if_cond_alpha.py | 34 +++++++++++++++---- 3 files changed, 56 insertions(+), 7 deletions(-) create mode 100644 neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile diff --git a/neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile b/neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile new file mode 100644 index 0000000000..45546379d5 --- /dev/null +++ b/neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile @@ -0,0 +1,26 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +APP = $(notdir $(CURDIR)) + +NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c +NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h +INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h +NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h +THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h +SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_alpha_impl.h +SYNAPSE_DYNAMICS = $(NEURON_DIR)/neuron/plasticity/synapse_dynamics_static_impl.c + +include ../neural_build.mk diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index 21b109c02f..e1f7967835 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -45,7 +45,8 @@ MODELS = IF_curr_exp \ IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight \ IF_curr_exp_sEMD \ IZK_curr_exp_stdp_mad_pair_additive \ - IZK_cond_exp_stdp_mad_pair_additive + IZK_cond_exp_stdp_mad_pair_additive \ + IF_cond_alpha all: for d in $(MODELS); do $(MAKE) -C $$d || exit $$?; done diff --git a/spynnaker/pyNN/models/neuron/builds/if_cond_alpha.py b/spynnaker/pyNN/models/neuron/builds/if_cond_alpha.py index 535d59ce9a..de9eb29c1e 100644 --- a/spynnaker/pyNN/models/neuron/builds/if_cond_alpha.py +++ b/spynnaker/pyNN/models/neuron/builds/if_cond_alpha.py @@ -13,21 +13,43 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from spynnaker.pyNN.exceptions import SpynnakerException +from spynnaker.pyNN.models.neuron import AbstractPyNNNeuronModelStandard from spynnaker.pyNN.models.defaults import defaults, default_initial_values +from spynnaker.pyNN.models.neuron.neuron_models import ( + NeuronModelLeakyIntegrateAndFire) +from spynnaker.pyNN.models.neuron.synapse_types import SynapseTypeAlpha +from spynnaker.pyNN.models.neuron.input_types import InputTypeConductance +from spynnaker.pyNN.models.neuron.threshold_types import ThresholdTypeStatic @defaults -class IFCondAlpha(object): +class IFCondAlpha(AbstractPyNNNeuronModelStandard): """ Leaky integrate and fire neuron with an alpha-shaped current input. """ # noinspection PyPep8Naming - @default_initial_values({"v", "gsyn_exc", "gsyn_inh"}) + @default_initial_values({"v", "exc_response", + "exc_exp_response", "inh_response", + "inh_exp_response"}) def __init__( self, tau_m=20, cm=1.0, e_rev_E=0.0, e_rev_I=-70.0, v_rest=-65.0, v_reset=-65.0, v_thresh=-50.0, tau_syn_E=0.3, tau_syn_I=0.5, - tau_refrac=0.1, i_offset=0, v=-65.0, gsyn_exc=0.0, gsyn_inh=0.0): + tau_refrac=0.1, i_offset=0, v=-65.0, + exc_response=0.0, exc_exp_response=0.0, inh_response=0.0, + inh_exp_response=0.0): # pylint: disable=too-many-arguments, too-many-locals, unused-argument - raise SpynnakerException( - "This neuron model is currently not supported by the tool chain") + neuron_model = NeuronModelLeakyIntegrateAndFire( + v, v_rest, tau_m, cm, i_offset, v_reset, tau_refrac) + + synapse_type = SynapseTypeAlpha( + exc_response=exc_response, exc_exp_response=exc_exp_response, + tau_syn_E=tau_syn_E, inh_response=inh_response, + inh_exp_response=inh_exp_response, tau_syn_I=tau_syn_I) + + input_type = InputTypeConductance(e_rev_E, e_rev_I) + threshold_type = ThresholdTypeStatic(v_thresh) + + super(IFCondAlpha, self).__init__( + model_name="IF_cond_alpha", binary="IF_cond_alpha.aplx", + neuron_model=neuron_model, input_type=input_type, + synapse_type=synapse_type, threshold_type=threshold_type) From afecd18ae2eefd528dd63d61a6e284508f2a1d13 Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Thu, 27 Feb 2020 15:58:01 +0000 Subject: [PATCH 014/198] correct reporting of max possible delays --- spynnaker/pyNN/abstract_spinnaker_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/abstract_spinnaker_common.py b/spynnaker/pyNN/abstract_spinnaker_common.py index bc6ed2c10d..b3a02a86b1 100644 --- a/spynnaker/pyNN/abstract_spinnaker_common.py +++ b/spynnaker/pyNN/abstract_spinnaker_common.py @@ -196,7 +196,7 @@ def _set_up_timings( raise ConfigurationException( "Pacman does not support max delays above {} ms with the " "current machine time step".format( - 0.144 * machine_time_step)) + max_delay_tics_supported * machine_time_step / 1000.0)) if max_delay is not None: self.__max_delay = max_delay else: From e9467bb753689eb87ae108e43347cd32904d0e44 Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Thu, 27 Feb 2020 16:33:40 +0000 Subject: [PATCH 015/198] setting delay extensions to have more delay slots. NEEDS TESTING --- neural_modelling/src/delay_extension/delay_extension.h | 2 +- spynnaker/pyNN/utilities/constants.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/delay_extension/delay_extension.h b/neural_modelling/src/delay_extension/delay_extension.h index a5d9552ed0..3d201192dc 100644 --- a/neural_modelling/src/delay_extension/delay_extension.h +++ b/neural_modelling/src/delay_extension/delay_extension.h @@ -21,7 +21,7 @@ #include // Constants -#define DELAY_STAGE_LENGTH 16 +#define DELAY_STAGE_LENGTH 64 //! region identifiers typedef enum region_identifiers { diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index 54a92d655a..78fe19a909 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -57,8 +57,8 @@ # natively supported delays for all abstract_models MAX_SUPPORTED_DELAY_TICS = 64 -MAX_DELAY_BLOCKS = 8 -MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 +MAX_DELAY_BLOCKS = 16 +MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 64 # the minimum supported delay slot between two neurons MIN_SUPPORTED_DELAY = 1 From 888f659520a5c3f7e244828e7ac3b098832bf19d Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Fri, 28 Feb 2020 16:09:39 +0000 Subject: [PATCH 016/198] tested and fixed delay extension with more delay slots. --- neural_modelling/src/delay_extension/delay_extension.c | 1 + spynnaker/pyNN/utilities/constants.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/delay_extension/delay_extension.c b/neural_modelling/src/delay_extension/delay_extension.c index 8c2fd35608..827d3c4989 100644 --- a/neural_modelling/src/delay_extension/delay_extension.c +++ b/neural_modelling/src/delay_extension/delay_extension.c @@ -371,6 +371,7 @@ void c_main(void) { // Initialise the incoming spike buffer if (!in_spikes_initialize_spike_buffer(256)) { + log_error("Error in initialisation of spike buffer!"); rt_error(RTE_SWERR); } diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index 78fe19a909..1836102218 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -57,8 +57,8 @@ # natively supported delays for all abstract_models MAX_SUPPORTED_DELAY_TICS = 64 -MAX_DELAY_BLOCKS = 16 -MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 64 +MAX_DELAY_BLOCKS = 64 +MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 # the minimum supported delay slot between two neurons MIN_SUPPORTED_DELAY = 1 From 7cc3a28d97a1fe9c8206bc0afcdf28f2558f46b9 Mon Sep 17 00:00:00 2001 From: Petrut Antoniu Bogdan Date: Fri, 6 Mar 2020 14:50:44 +0000 Subject: [PATCH 017/198] added support for setting RB left shift values --- .../pyNN/models/neuron/synaptic_manager.py | 19 ++++++++++++------- .../pyNN/models/pynn_population_common.py | 8 ++++++++ 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 1124384501..03ef9aa89b 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -545,10 +545,6 @@ def _get_ring_buffer_to_input_left_shifts( if weights_signed: max_weight_powers = (m + 1 for m in max_weight_powers) rb_ls = list(max_weight_powers) - print("=" * 60) - print("RB left shifts for {:20}".format(application_vertex.label), - "=", rb_ls) - print("-" * 60) return rb_ls @staticmethod @@ -975,9 +971,18 @@ def write_data_spec( all_syn_block_sz, graph_mapper, application_graph, application_vertex) - ring_buffer_shifts = self._get_ring_buffer_shifts( - application_vertex, application_graph, machine_time_step, - weight_scale) + print("=" * 80) + if hasattr(application_vertex, "rb_left_shifts"): + print("Using given values for RB left shifts.") + ring_buffer_shifts = application_vertex.rb_left_shifts + else: + print("Computing values for RB left shifts...") + ring_buffer_shifts = self._get_ring_buffer_shifts( + application_vertex, application_graph, machine_time_step, + weight_scale) + print("RB left shifts for {:20}".format(application_vertex.label), + "=", ring_buffer_shifts) + print("-" * 80) weight_scales = self._write_synapse_parameters( spec, ring_buffer_shifts, weight_scale) diff --git a/spynnaker/pyNN/models/pynn_population_common.py b/spynnaker/pyNN/models/pynn_population_common.py index c9cdd7c63c..539388becf 100644 --- a/spynnaker/pyNN/models/pynn_population_common.py +++ b/spynnaker/pyNN/models/pynn_population_common.py @@ -74,10 +74,18 @@ def __init__( raise ConfigurationException( "A population cannot have a negative or zero size.") population_parameters = dict(model.default_population_parameters) + + if "rb_left_shifts" in additional_parameters.keys(): + rb_left_shifts = additional_parameters['rb_left_shifts'] + del additional_parameters['rb_left_shifts'] + else: + rb_left_shifts = None if additional_parameters is not None: population_parameters.update(additional_parameters) self.__vertex = model.create_vertex( size, label, constraints, **population_parameters) + if rb_left_shifts: + self.__vertex.rb_left_shifts = rb_left_shifts # Use a provided application vertex directly elif isinstance(model, ApplicationVertex): From 70541be6ae180b8b491968027c4520613d174177 Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Fri, 6 Mar 2020 15:38:07 +0000 Subject: [PATCH 018/198] much better handling of rb values --- .../pyNN/models/neuron/synaptic_manager.py | 27 ++++++++++--------- .../pyNN/models/pynn_population_common.py | 2 +- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 03ef9aa89b..a3580cbf9a 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -936,11 +936,23 @@ def _get_ring_buffer_shifts( weight_scale): """ Get the ring buffer shifts for this vertex """ + if (self.__ring_buffer_shifts is None and + hasattr(application_vertex, "rb_left_shifts")): + print("=" * 80) + print("Using given values for RB left shifts.") + self.__ring_buffer_shifts = application_vertex.rb_left_shifts + print("RB left shifts for {:20}".format(application_vertex.label), + "=", self.__ring_buffer_shifts) + print("-" * 80) if self.__ring_buffer_shifts is None: + print("=" * 80) + print("Computing values for RB left shifts...") self.__ring_buffer_shifts = \ self._get_ring_buffer_to_input_left_shifts( application_vertex, application_graph, machine_timestep, weight_scale) + print("RB left shifts for {:20}".format(application_vertex.label), + "=", self.__ring_buffer_shifts) return self.__ring_buffer_shifts def write_data_spec( @@ -971,18 +983,9 @@ def write_data_spec( all_syn_block_sz, graph_mapper, application_graph, application_vertex) - print("=" * 80) - if hasattr(application_vertex, "rb_left_shifts"): - print("Using given values for RB left shifts.") - ring_buffer_shifts = application_vertex.rb_left_shifts - else: - print("Computing values for RB left shifts...") - ring_buffer_shifts = self._get_ring_buffer_shifts( - application_vertex, application_graph, machine_time_step, - weight_scale) - print("RB left shifts for {:20}".format(application_vertex.label), - "=", ring_buffer_shifts) - print("-" * 80) + ring_buffer_shifts = self._get_ring_buffer_shifts( + application_vertex, application_graph, machine_time_step, + weight_scale) weight_scales = self._write_synapse_parameters( spec, ring_buffer_shifts, weight_scale) diff --git a/spynnaker/pyNN/models/pynn_population_common.py b/spynnaker/pyNN/models/pynn_population_common.py index 539388becf..b43ecaae70 100644 --- a/spynnaker/pyNN/models/pynn_population_common.py +++ b/spynnaker/pyNN/models/pynn_population_common.py @@ -84,7 +84,7 @@ def __init__( population_parameters.update(additional_parameters) self.__vertex = model.create_vertex( size, label, constraints, **population_parameters) - if rb_left_shifts: + if rb_left_shifts is not None: self.__vertex.rb_left_shifts = rb_left_shifts # Use a provided application vertex directly From c7c70b35d47e00c75bd47edca29d36c59b06cb94 Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Mon, 9 Mar 2020 16:32:08 +0000 Subject: [PATCH 019/198] added check for additional_params being none --- spynnaker/pyNN/models/pynn_population_common.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/pynn_population_common.py b/spynnaker/pyNN/models/pynn_population_common.py index b43ecaae70..6fbc054511 100644 --- a/spynnaker/pyNN/models/pynn_population_common.py +++ b/spynnaker/pyNN/models/pynn_population_common.py @@ -75,7 +75,8 @@ def __init__( "A population cannot have a negative or zero size.") population_parameters = dict(model.default_population_parameters) - if "rb_left_shifts" in additional_parameters.keys(): + if (additional_parameters is not None and + "rb_left_shifts" in additional_parameters.keys()): rb_left_shifts = additional_parameters['rb_left_shifts'] del additional_parameters['rb_left_shifts'] else: From 897bb87f84c520476924d2ff828f9c516010e4c3 Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Fri, 13 Mar 2020 15:56:16 +0000 Subject: [PATCH 020/198] delays now work correctly --- neural_modelling/src/neuron/synapse_row.h | 2 +- .../neuron/synapse_dynamics/synapse_dynamics_static.py | 5 +++-- spynnaker/pyNN/utilities/constants.py | 2 ++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index 3f7ac4c103..40b80bbb51 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -51,7 +51,7 @@ //! how many bits the synapse delay will take #ifndef SYNAPSE_DELAY_BITS -#define SYNAPSE_DELAY_BITS 4 +#define SYNAPSE_DELAY_BITS 6 #endif // Create some masks based on the number of bits diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index 3a49768d24..398d49f0fb 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -22,6 +22,7 @@ AbstractGenerateOnMachine, MatrixGeneratorID) from spynnaker.pyNN.exceptions import InvalidParameterType from spynnaker.pyNN.utilities.utility_calls import get_n_bits +from spynnaker.pyNN.utilities.constants import DELAY_MASK class SynapseDynamicsStatic( @@ -91,7 +92,7 @@ def get_static_synaptic_data( fixed_fixed = ( ((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") & 0xFFFF) << 16) | - ((connections["delay"].astype("uint32") & 0xF) << + ((connections["delay"].astype("uint32") & DELAY_MASK) << (n_neuron_id_bits + n_synapse_type_bits)) | (connections["synapse_type"].astype( "uint32") << n_neuron_id_bits) | @@ -148,7 +149,7 @@ def read_static_synaptic_data( (data & neuron_id_mask) + post_vertex_slice.lo_atom) connections["weight"] = (data >> 16) & 0xFFFF connections["delay"] = (data >> (n_neuron_id_bits + - n_synapse_type_bits)) & 0xF + n_synapse_type_bits)) & DELAY_MASK connections["delay"][connections["delay"] == 0] = 16 return connections diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index 1836102218..70166a79ec 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -14,6 +14,7 @@ # along with this program. If not, see . from enum import Enum +import math from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, BYTES_PER_KB) @@ -58,6 +59,7 @@ # natively supported delays for all abstract_models MAX_SUPPORTED_DELAY_TICS = 64 MAX_DELAY_BLOCKS = 64 +DELAY_MASK = (1 << int(math.log2(MAX_SUPPORTED_DELAY_TICS))) -1 MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 # the minimum supported delay slot between two neurons From 5391c7dcdb60a094a0773f6806fb24a45d8b2cba Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Wed, 8 Apr 2020 12:57:22 +0100 Subject: [PATCH 021/198] Change the C code to take a minimum weight rather than a shift --- neural_modelling/src/neuron/c_main.c | 10 +++---- .../stdp/synapse_dynamics_stdp_mad_impl.c | 11 +++---- .../stdp/weight_dependence/weight.h | 7 ++--- .../weight_additive_one_term_impl.c | 5 ++-- .../weight_additive_two_term_impl.c | 5 ++-- .../weight_multiplicative_impl.c | 21 +++---------- .../weight_multiplicative_impl.h | 9 ++---- .../src/neuron/plasticity/synapse_dynamics.h | 4 +-- .../plasticity/synapse_dynamics_static_impl.c | 8 ++--- neural_modelling/src/neuron/synapses.c | 30 ++++++++----------- neural_modelling/src/neuron/synapses.h | 18 ++++------- 11 files changed, 43 insertions(+), 85 deletions(-) diff --git a/neural_modelling/src/neuron/c_main.c b/neural_modelling/src/neuron/c_main.c index e93b8bbfb7..19fe18e167 100644 --- a/neural_modelling/src/neuron/c_main.c +++ b/neural_modelling/src/neuron/c_main.c @@ -156,16 +156,14 @@ static bool initialise(void) { } // Set up the synapses - uint32_t *ring_buffer_to_input_buffer_left_shifts; + REAL *min_weights; address_t indirect_synapses_address = data_specification_get_region(SYNAPTIC_MATRIX_REGION, ds_regions); address_t direct_synapses_address; if (!synapses_initialise( data_specification_get_region(SYNAPSE_PARAMS_REGION, ds_regions), data_specification_get_region(DIRECT_MATRIX_REGION, ds_regions), - n_neurons, n_synapse_types, - &ring_buffer_to_input_buffer_left_shifts, - &direct_synapses_address)) { + n_neurons, n_synapse_types, &min_weights, &direct_synapses_address)) { return false; } @@ -182,7 +180,7 @@ static bool initialise(void) { data_specification_get_region(SYNAPSE_DYNAMICS_REGION, ds_regions); address_t syn_dyn_end_address = synapse_dynamics_initialise( synapse_dynamics_region_address, n_neurons, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts); + min_weights); if (synapse_dynamics_region_address && !syn_dyn_end_address) { return false; @@ -226,7 +224,7 @@ void resume_callback(void) { // flushed in case there is a delayed spike left over from a previous run // NOTE: at reset, time is set to UINT_MAX ahead of timer_callback(...) if ((time+1) == 0) { - synapses_flush_ring_buffers(); + synapses_flush_ring_buffers(); } } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c index c98c42a367..e2e5aad4cb 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c @@ -178,10 +178,9 @@ static inline pre_event_history_t *plastic_event_history( void synapse_dynamics_print_plastic_synapses( address_t plastic_region_address, address_t fixed_region_address, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + REAL *min_weights) { use(plastic_region_address); use(fixed_region_address); - use(ring_buffer_to_input_buffer_left_shifts); #if LOG_LEVEL >= LOG_DEBUG // Extract separate arrays of weights (from plastic region), @@ -209,8 +208,7 @@ void synapse_dynamics_print_plastic_synapses( weight_t weight = synapse_structure_get_final_weight(final_state); log_debug("%08x [%3d: (w: %5u (=", control_word, i, weight); - synapses_print_weight( - weight, ring_buffer_to_input_buffer_left_shifts[synapse_type]); + synapses_print_weight(weight, min_weights[synapse_type]); log_debug("nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", synapse_row_sparse_delay(control_word, synapse_type_index_bits), synapse_types_get_type_char(synapse_type), @@ -232,7 +230,7 @@ static inline index_t sparse_axonal_delay(uint32_t x) { address_t synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + REAL *min_weights) { stdp_params *sdram_params = (stdp_params *) address; spin1_memcpy(¶ms, sdram_params, sizeof(stdp_params)); @@ -246,8 +244,7 @@ address_t synapse_dynamics_initialise( // Load weight dependence data address_t weight_result = weight_initialise( - weight_region_address, n_synapse_types, - ring_buffer_to_input_buffer_left_shifts); + weight_region_address, n_synapse_types, min_weights); if (weight_result == NULL) { return NULL; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h index b6f7d8818b..edcf193a1f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h @@ -39,15 +39,12 @@ * \param[in] address: the absolute address in SRAM where the weight parameters * are stored. * \param[in] n_synapse_types: The number of synapse types - * \param[in] ring_buffer_to_input_buffer_left_shifts: how much a value needs - * to be shifted in the left direction to support comprises with fixed point - * arithmetic + * \param[in] min_weights: The value of the weight of the LSB of the weight * \return address_t: returns the end of the weight region as an absolute * SDRAM memory address. */ address_t weight_initialise( - address_t address, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts); + address_t address, uint32_t n_synapse_types, REAL *min_weights); /*! * \brief diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c index e6c75242f5..f35611ced0 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c @@ -27,9 +27,8 @@ plasticity_weight_region_data_t *plasticity_weight_region_data; // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { - use(ring_buffer_to_input_buffer_left_shifts); + address_t address, uint32_t n_synapse_types, REAL *min_weights) { + use(min_weights); log_debug("weight_initialise: starting"); log_debug("\tSTDP additive one-term weight dependence"); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c index e036928be4..9cc9aee994 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c @@ -27,9 +27,8 @@ plasticity_weight_region_data_t *plasticity_weight_region_data; // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { - use(ring_buffer_to_input_buffer_left_shifts); + address_t address, uint32_t n_synapse_types, REAL *min_weights) { + use(min_weights); log_debug("weight_initialise: starting"); log_debug("\tSTDP additive two-term weight dependance"); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c index a40e9c6030..ae8deada4e 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c @@ -22,14 +22,13 @@ //--------------------------------------- // Global plasticity parameter data plasticity_weight_region_data_t *plasticity_weight_region_data; -uint32_t *weight_multiply_right_shift; //--------------------------------------- // Functions //--------------------------------------- uint32_t *weight_initialise( - uint32_t *address, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + uint32_t *address, uint32_t n_synapse_types, REAL *min_weights) { + use(min_weights); log_debug("weight_initialise: starting"); log_debug("\tSTDP multiplicative weight dependence"); @@ -41,12 +40,6 @@ uint32_t *weight_initialise( log_error("Could not initialise weight region data"); return NULL; } - weight_multiply_right_shift = - spin1_malloc(sizeof(uint32_t) * n_synapse_types); - if (weight_multiply_right_shift == NULL) { - log_error("Could not initialise weight region data"); - return NULL; - } int32_t *plasticity_word = (int32_t *) address; for (uint32_t s = 0; s < n_synapse_types; s++) { @@ -56,17 +49,11 @@ uint32_t *weight_initialise( plasticity_weight_region_data[s].a2_plus = *plasticity_word++; plasticity_weight_region_data[s].a2_minus = *plasticity_word++; - // Calculate the right shift required to fixed-point multiply weights - weight_multiply_right_shift[s] = - 16 - (ring_buffer_to_input_buffer_left_shifts[s] + 1); - - log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d," - " Weight multiply right shift:%u", + log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d", s, plasticity_weight_region_data[s].min_weight, plasticity_weight_region_data[s].max_weight, plasticity_weight_region_data[s].a2_plus, - plasticity_weight_region_data[s].a2_minus, - weight_multiply_right_shift[s]); + plasticity_weight_region_data[s].a2_minus); } log_debug("weight_initialise: completed successfully"); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h index 04a23f9358..c2ddabc093 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h @@ -38,8 +38,6 @@ typedef struct { typedef struct { int32_t weight; - - uint32_t weight_multiply_right_shift; const plasticity_weight_region_data_t *weight_region; } weight_state_t; @@ -49,7 +47,6 @@ typedef struct { // Externals //--------------------------------------- extern plasticity_weight_region_data_t *plasticity_weight_region_data; -extern uint32_t *weight_multiply_right_shift; //--------------------------------------- // Weight dependance functions @@ -58,8 +55,6 @@ static inline weight_state_t weight_get_initial( weight_t weight, index_t synapse_type) { return (weight_state_t ) { .weight = (int32_t) weight, - .weight_multiply_right_shift = - weight_multiply_right_shift[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -72,7 +67,7 @@ static inline weight_state_t weight_one_term_apply_depression( // fixed-point format int32_t scale = maths_fixed_mul16( state.weight - state.weight_region->min_weight, - state.weight_region->a2_minus, state.weight_multiply_right_shift); + state.weight_region->a2_minus, 0); // Multiply scale by depression and subtract // **NOTE** using standard STDP fixed-point format handles format conversion @@ -87,7 +82,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( // fixed-point format int32_t scale = maths_fixed_mul16( state.weight_region->max_weight - state.weight, - state.weight_region->a2_plus, state.weight_multiply_right_shift); + state.weight_region->a2_plus, 0); // Multiply scale by potentiation and add // **NOTE** using standard STDP fixed-point format handles format conversion diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h index 4d5fee0c0c..7a239c5216 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h @@ -23,7 +23,7 @@ address_t synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts); + REAL *min_weights); bool synapse_dynamics_process_plastic_synapses( address_t plastic_region_address, address_t fixed_region_address, @@ -37,7 +37,7 @@ input_t synapse_dynamics_get_intrinsic_bias( void synapse_dynamics_print_plastic_synapses( address_t plastic_region_address, address_t fixed_region_address, - uint32_t *ring_buffer_to_input_buffer_left_shifts); + REAL *min_weights); //! \brief returns the counters for plastic pre synaptic events based //! on (if the model was compiled with SYNAPSE_BENCHMARK parameter) or diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c index 8fbbed88e9..9cb4787199 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c @@ -37,9 +37,9 @@ static uint32_t synapse_type_mask; address_t synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t *ring_buffer_to_input_buffer_left_shifts) { + REAL *min_weights) { use(address); - use(ring_buffer_to_input_buffer_left_shifts); + use(min_weights); uint32_t n_neurons_power_2 = n_neurons; uint32_t log_n_neurons = 1; @@ -94,10 +94,10 @@ input_t synapse_dynamics_get_intrinsic_bias( void synapse_dynamics_print_plastic_synapses( address_t plastic_region_address, address_t fixed_region_address, - uint32_t *ring_buffer_to_input_left_shifts) { + REAL *min_weights) { use(plastic_region_address); use(fixed_region_address); - use(ring_buffer_to_input_left_shifts); + use(min_weights); } uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 1721a2f174..657e6519f7 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -44,8 +44,8 @@ static weight_t *ring_buffers; // Ring buffer size static uint32_t ring_buffer_size; -// Amount to left shift the ring buffer by to make it an input -static uint32_t *ring_buffer_to_input_left_shifts; +// The weight value represented by the LSB of a weight +static REAL *min_weights; // Count of the number of times the ring buffers have saturated static uint32_t saturation_count = 0; @@ -93,7 +93,7 @@ static inline void print_synaptic_row(synaptic_row_t synaptic_row) { log_debug("%08x [%3d: (w: %5u (=", synapse, i, synapse_row_sparse_weight(synapse)); synapses_print_weight(synapse_row_sparse_weight(synapse), - ring_buffer_to_input_left_shifts[synapse_type]); + min_weights[synapse_type]); log_debug( "nA) d: %2u, %s, n = %3u)] - {%08x %08x}\n", synapse_row_sparse_delay(synapse, synapse_type_index_bits), @@ -108,8 +108,7 @@ static inline void print_synaptic_row(synaptic_row_t synaptic_row) { address_t plastic_region_address = synapse_row_plastic_region(synaptic_row); synapse_dynamics_print_plastic_synapses( - plastic_region_address, fixed_region_address, - ring_buffer_to_input_left_shifts); + plastic_region_address, fixed_region_address, min_weights); } log_debug("----------------------------------------\n"); @@ -139,7 +138,7 @@ static inline void print_ring_buffers(uint32_t time) { d + time, t, n, synapse_type_index_bits, synapse_index_bits); synapses_print_weight(ring_buffers[ring_buffer_index], - ring_buffer_to_input_left_shifts[t]); + min_weights[t]); } io_printf(IO_BUF, "\n"); } @@ -220,24 +219,19 @@ static inline void print_synapse_parameters(void) { bool synapses_initialise( address_t synapse_params_address, address_t direct_matrix_address, uint32_t n_neurons_value, uint32_t n_synapse_types_value, - uint32_t **ring_buffer_to_input_buffer_left_shifts, - address_t *direct_synapses_address) { + REAL **min_weights_out, address_t *direct_synapses_address) { log_debug("synapses_initialise: starting"); n_neurons = n_neurons_value; n_synapse_types = n_synapse_types_value; // Set up ring buffer left shifts - ring_buffer_to_input_left_shifts = - spin1_malloc(n_synapse_types * sizeof(uint32_t)); - if (ring_buffer_to_input_left_shifts == NULL) { + min_weights = spin1_malloc(n_synapse_types * sizeof(REAL)); + if (min_weights == NULL) { log_error("Not enough memory to allocate ring buffer"); return false; } - spin1_memcpy( - ring_buffer_to_input_left_shifts, synapse_params_address, - n_synapse_types * sizeof(uint32_t)); - *ring_buffer_to_input_buffer_left_shifts = - ring_buffer_to_input_left_shifts; + spin1_memcpy(min_weights, synapse_params_address, n_synapse_types * sizeof(REAL)); + *min_weights_out = min_weights; // Work out the positions of the direct and indirect synaptic matrices // and copy the direct matrix to DTCM @@ -321,7 +315,7 @@ void synapses_do_timestep_update(timer_t time) { synapse_type_index, neuron_index, synapses_convert_weight_to_input( ring_buffers[ring_buffer_index], - ring_buffer_to_input_left_shifts[synapse_type_index])); + min_weights[synapse_type_index])); // Clear ring buffer ring_buffers[ring_buffer_index] = 0; @@ -387,7 +381,7 @@ uint32_t synapses_get_pre_synaptic_events(void) { } void synapses_flush_ring_buffers(void) { - for (uint32_t i = 0; i < ring_buffer_size; i++) { + for (uint32_t i = 0; i < ring_buffer_size; i++) { ring_buffers[i] = 0; } } diff --git a/neural_modelling/src/neuron/synapses.h b/neural_modelling/src/neuron/synapses.h index 5696182adb..772c2714be 100644 --- a/neural_modelling/src/neuron/synapses.h +++ b/neural_modelling/src/neuron/synapses.h @@ -45,22 +45,15 @@ static inline index_t synapses_get_ring_buffer_index_combined( // Converts a weight stored in a synapse row to an input static inline input_t synapses_convert_weight_to_input( - weight_t weight, uint32_t left_shift) { - union { - int_k_t input_type; - s1615 output_type; - } converter; - - converter.input_type = (int_k_t) (weight) << left_shift; - - return converter.output_type; + weight_t weight, REAL min_weight) { + return weight * min_weight; } static inline void synapses_print_weight( - weight_t weight, uint32_t left_shift) { + weight_t weight, REAL min_weight) { if (weight != 0) { io_printf(IO_BUF, "%12.6k", - synapses_convert_weight_to_input(weight, left_shift)); + synapses_convert_weight_to_input(weight, min_weight)); } else { io_printf(IO_BUF, " "); } @@ -69,8 +62,7 @@ static inline void synapses_print_weight( bool synapses_initialise( address_t synapse_params_address, address_t direct_matrix_address, uint32_t n_neurons, uint32_t n_synapse_types, - uint32_t **ring_buffer_to_input_buffer_left_shifts, - address_t *direct_synapses_address); + REAL **min_weights_out, address_t *direct_synapses_address); void synapses_do_timestep_update(timer_t time); From 14d97fbeb45abc54a1c91a6cd50bac843f202ace Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Wed, 8 Apr 2020 16:36:17 +0100 Subject: [PATCH 022/198] First go at Python code (runs through) --- .../external_device_lif_control.py | 4 +- .../external_device_lif_control_vertex.py | 8 +- .../neuron/abstract_population_vertex.py | 6 +- .../neuron/abstract_pynn_neuron_model.py | 7 +- .../stdp/common/plasticity_helpers.py | 16 +- .../abstract_timing_dependence.py | 11 + ...timing_dependence_pfister_spike_triplet.py | 18 +- .../timing_dependence_recurrent.py | 6 + .../timing_dependence_spike_nearest_pair.py | 8 +- .../timing_dependence_spike_pair.py | 8 +- .../timing_dependence_vogels_2011.py | 8 +- .../abstract_weight_dependence.py | 8 + .../weight_dependence_additive.py | 5 + .../weight_dependence_additive_triplet.py | 16 +- .../weight_dependence_multiplicative.py | 5 + .../abstract_synapse_dynamics.py | 20 +- .../synapse_dynamics/synapse_dynamics_stdp.py | 6 + .../pyNN/models/neuron/synaptic_manager.py | 278 ++++-------------- spynnaker/pyNN/spynnaker.cfg | 3 + 19 files changed, 200 insertions(+), 241 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index a6b0ffed7b..8da55ce550 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -99,7 +99,7 @@ def __init__( @overrides(AbstractPyNNNeuronModelStandard.create_vertex) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size): + ring_buffer_sigma, min_weights, incoming_spike_buffer_size): if n_neurons != len(self._devices): raise ConfigurationException( "Number of neurons does not match number of devices in {}" @@ -108,4 +108,4 @@ def create_vertex( return ExternalDeviceLifControlVertex( self._devices, self._create_edges, max_atoms, self._model, self, self._translator, spikes_per_second, label, ring_buffer_sigma, - incoming_spike_buffer_size, constraints) + min_weights, incoming_spike_buffer_size, constraints) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index 34c6ae6e23..47f74a3f47 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -51,8 +51,8 @@ class ExternalDeviceLifControlVertex( def __init__( self, devices, create_edges, max_atoms_per_core, neuron_impl, pynn_model, translator=None, spikes_per_second=None, label=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, - constraints=None): + ring_buffer_sigma=None, min_weights=None, + incoming_spike_buffer_size=None, constraints=None): """ :param list(AbstractMulticastControllableDevice) devices: The AbstractMulticastControllableDevice instances to be controlled @@ -104,8 +104,8 @@ def __init__( super(ExternalDeviceLifControlVertex, self).__init__( len(devices), label, constraints, max_atoms_per_core, - spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, - neuron_impl, pynn_model) + spikes_per_second, ring_buffer_sigma, min_weights, + incoming_spike_buffer_size, neuron_impl, pynn_model) def routing_key_partition_atom_mapping(self, routing_info, partition): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 8e327633de..da00a60475 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -116,8 +116,8 @@ class AbstractPopulationVertex( def __init__( self, n_neurons, label, constraints, max_atoms_per_core, - spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, - neuron_impl, pynn_model): + spikes_per_second, ring_buffer_sigma, min_weights, + incoming_spike_buffer_size, neuron_impl, pynn_model): # pylint: disable=too-many-arguments, too-many-locals super(AbstractPopulationVertex, self).__init__( label, constraints, max_atoms_per_core) @@ -157,7 +157,7 @@ def __init__( # Set up synapse handling self.__synapse_manager = SynapticManager( self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma, - spikes_per_second, config) + spikes_per_second, min_weights, config) # bool for if state has changed. self.__change_requires_mapping = True diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py index c8112f3304..48695fb53f 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py @@ -21,7 +21,7 @@ _population_parameters = { "spikes_per_second": None, "ring_buffer_sigma": None, - "incoming_spike_buffer_size": None + "min_weights": None, "incoming_spike_buffer_size": None } @@ -52,9 +52,10 @@ def get_max_atoms_per_core(cls): additional_arguments=_population_parameters.keys()) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size): + ring_buffer_sigma, min_weights, incoming_spike_buffer_size): # pylint: disable=arguments-differ max_atoms = self.get_max_atoms_per_core() return AbstractPopulationVertex( n_neurons, label, constraints, max_atoms, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size, self.__model, self) + ring_buffer_sigma, min_weights, incoming_spike_buffer_size, + self.__model, self) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py index 2dcdc97aff..4f80f2ba8f 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py @@ -12,7 +12,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . - +from __future__ import division import math import logging import numpy @@ -43,3 +43,17 @@ def get_exp_lut_array(time_step, time_constant, shift=0): # Concatenate with the header header = numpy.array([len(a), shift], dtype="uint16") return numpy.concatenate((header, a.astype("uint16"))).view("uint32") + +def get_min_lut_value(exp_lut_array): + """ Get the smallest non-zero value of an exponential lookup array,\ + or zero if no such value + + :param numpy.ndarray exp_lut_array: The lookup array + :rtype: float + """ + if not len(exp_lut_array): + return 0 + values = exp_lut_array.view("uint16") + if values[-1] != 0: + return values[-1] / STDP_FIXED_POINT_ONE + return values[-2] / STDP_FIXED_POINT_ONE diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py index a11db4e352..5c27729e93 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py @@ -84,6 +84,17 @@ def get_parameter_names(self): :rtype: iterable(str) """ + @abstractproperty + def minimum_delta(self): + """ The smallest non-zero changes that will be passed to the weight\ + rule + + :return: An array of minimum change values, one for potentiation,\ + one for depression. If this requires a 2-parameter weight rule, + each of the values of the arrays must then be an array of arrays + :rtype: list of (float or list of float) + """ + def get_provenance_data(self, pre_population_label, post_population_label): """ Get any provenance data diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py index 3e3e1f3009..c446699609 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py @@ -17,7 +17,7 @@ from spinn_utilities.overrides import overrides from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spynnaker.pyNN.models.neuron.plasticity.stdp.common\ - .plasticity_helpers import get_exp_lut_array + .plasticity_helpers import get_exp_lut_array, get_min_lut_value from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence\ import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure\ @@ -117,3 +117,19 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return ['tau_plus', 'tau_minus', 'tau_x', 'tau_y'] + + @property + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self): + # The minimums for potentiation + min_decayed_r1 = get_min_lut_value(self.__tau_plus_data) + min_decayed_r1_o2 = min_decayed_r1 * get_min_lut_value( + self.__tau_y_data) + + # The minimums for depression + min_decayed_o1 = get_min_lut_value(self.__tau_minus_data) + min_decayed_o1_r2 = min_decayed_o1 * get_min_lut_value( + self.__tau_x_data) + + return [[min_decayed_r1, min_decayed_r1_o2], + [min_decayed_o1, min_decayed_o1_r2]] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py index bfe22c1da6..dd06057361 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py @@ -136,3 +136,9 @@ def synaptic_structure(self): def get_parameter_names(self): return ['accumulator_depression', 'accumulator_potentiation', 'mean_pre_window', 'mean_post_window', 'dual_fsm'] + + @property + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self): + # This rule always has a delta of 1 + return [1.0, 1.0] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py index e6613f27a1..fa2ae038c8 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py @@ -17,7 +17,7 @@ from spinn_utilities.overrides import overrides from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spynnaker.pyNN.models.neuron.plasticity.stdp.common\ - .plasticity_helpers import get_exp_lut_array + .plasticity_helpers import get_exp_lut_array, get_min_lut_value from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightOnly) from .abstract_timing_dependence import AbstractTimingDependence @@ -97,3 +97,9 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return ['tau_plus', 'tau_minus'] + + @property + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self): + return [get_min_lut_value(self.__tau_plus_data), + get_min_lut_value(self.__tau_minus_data)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py index aba8e892ae..55d9369688 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py @@ -18,7 +18,7 @@ from spinn_front_end_common.utilities.constants import ( BYTES_PER_SHORT, BYTES_PER_WORD) from spynnaker.pyNN.models.neuron.plasticity.stdp.common\ - .plasticity_helpers import get_exp_lut_array + .plasticity_helpers import get_exp_lut_array, get_min_lut_value from .abstract_timing_dependence import AbstractTimingDependence from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure import ( SynapseStructureWeightOnly) @@ -95,3 +95,9 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return ['tau_plus', 'tau_minus'] + + @property + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self): + return [get_min_lut_value(self.__tau_plus_data), + get_min_lut_value(self.__tau_minus_data)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py index 599b7ec36d..cf330618b8 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py @@ -26,7 +26,7 @@ plasticity_helpers) from spinn_front_end_common.utilities.globals_variables import get_simulator from spynnaker.pyNN.models.neuron.plasticity.stdp.common\ - .plasticity_helpers import get_exp_lut_array + .plasticity_helpers import get_exp_lut_array, get_min_lut_value logger = logging.getLogger(__name__) @@ -100,3 +100,9 @@ def synaptic_structure(self): @overrides(AbstractTimingDependence.get_parameter_names) def get_parameter_names(self): return ['alpha', 'tau'] + + @property + @overrides(AbstractTimingDependence.minimum_delta) + def minimum_delta(self): + min_tau = get_min_lut_value(self.__tau_data) + return [min_tau - self.__alpha, min_tau] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py index 109ae9ce86..c6c2fd38aa 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py @@ -84,3 +84,11 @@ def weight_maximum(self): :rtype: float """ + + @abstractmethod + def weight_change_minimum(self, min_delta): + """ The minimum non-zero change in weight that will occur + + :param list min_delta: The minimum delta values from the timing rules + :rtype: float + """ diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py index 24099adaf4..73cb0c74b8 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py @@ -114,6 +114,11 @@ def weight_maximum(self): """ return self.__w_max + @overrides(AbstractWeightDependence.weight_change_minimum) + def weight_change_minimum(self, min_delta): + pot, dep = min_delta + return min(pot * self.A_plus, dep * self.A_minus) + @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): return ['w_min', 'w_max', 'A_plus', 'A_minus'] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py index e2a112411b..94bc80daa9 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py @@ -18,6 +18,7 @@ from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from .abstract_has_a_plus_a_minus import AbstractHasAPlusAMinus from .abstract_weight_dependence import AbstractWeightDependence + # Six words per synapse type _SPACE_PER_SYNAPSE_TYPE = 6 * BYTES_PER_WORD @@ -143,14 +144,19 @@ def write_parameters( data_type=DataType.INT32) @property + @overrides(AbstractWeightDependence.weight_maximum) def weight_maximum(self): - """ The maximum weight that will ever be set in a synapse as a result\ - of this rule - - :rtype: float - """ return self.__w_max + @overrides(AbstractWeightDependence.weight_change_minimum) + def weight_change_minimum(self, min_delta): + pot, dep = min_delta + a2_plus, a3_plus = pot + a2_minus, a3_minus = dep + min_pot = a2_plus * self.A_plus + a3_plus * self.__a3_plus + min_dep = a2_minus * self.A_minus + a3_minus * self.__a3_minus + return min(min_pot, min_dep) + @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): return ['w_min', 'w_max', 'A3_plus', 'A3_minus'] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index 5b5ccce85e..5461f6a905 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -111,6 +111,11 @@ def weight_maximum(self): """ return self.__w_max + @overrides(AbstractWeightDependence.weight_change_minimum) + def weight_change_minimum(self, min_delta): + pot, dep = min_delta + return [pot * self.A_plus, dep * self.A_minus] + @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): return ['w_min', 'w_max', 'A_plus', 'A_minus'] diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index ca31ddaf4b..855bee595a 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -133,7 +133,7 @@ def get_delay_maximum(self, connector, synapse_info): """ Get the maximum delay for the synapses :param AbstractConnector connector: - :param ~numpy.ndarray delays: + :param SynapseInformation synapse_info: """ return connector.get_delay_maximum(synapse_info) @@ -141,7 +141,7 @@ def get_delay_variance(self, connector, delays): """ Get the variance in delay for the synapses :param AbstractConnector connector: - :param ~numpy.ndarray delays: + :param SynapseInformation synapse_info: """ # pylint: disable=too-many-arguments return connector.get_delay_variance(delays) @@ -150,7 +150,7 @@ def get_weight_mean(self, connector, synapse_info): """ Get the mean weight for the synapses :param AbstractConnector connector: - :param ~numpy.ndarray weights: + :param SynapseInformation synapse_info: """ # pylint: disable=too-many-arguments return connector.get_weight_mean(synapse_info.weights) @@ -159,7 +159,7 @@ def get_weight_maximum(self, connector, synapse_info): """ Get the maximum weight for the synapses :param AbstractConnector connector: - :param ~numpy.ndarray weights: + :param SynapseInformation synapse_info: """ # pylint: disable=too-many-arguments return connector.get_weight_maximum(synapse_info) @@ -173,6 +173,18 @@ def get_weight_variance(self, connector, weights): # pylint: disable=too-many-arguments return connector.get_weight_variance(weights) + def get_weight_minimum(self, connector, synapse_info, sigma): + """ Get the minimum non-zero weight for the synapses, or 0 if all\ + synapses are zero + + :param AbstractConnector connector: The connector in use + :param SynapseInformation synapse_info: + :param float sigma: The number of standard deviations of accuracy + """ + weights = synapse_info.weights + return (connector.get_weight_mean(weights) - + connector.get_weight_variance(weights) * sigma) + def convert_per_connection_data_to_rows( self, connection_row_indices, n_rows, data): """ Converts per-connection data generated from connections into\ diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 310f096d08..122def1525 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -464,6 +464,12 @@ def get_weight_variance(self, connector, weights): # has to be given as no variance return 0.0 + @overrides(AbstractPlasticSynapseDynamics.get_weight_minimum) + def get_weight_minimum(self, connector, synapse_info, sigma): + # Use the minimum weight change as the minimum non-zero weight + return self.__weight_dependence.weight_change_minimum( + self.__timing_dependence.minimum_delta) + @overrides(AbstractPlasticSynapseDynamics.get_weight_maximum) def get_weight_maximum(self, connector, synapse_info): w_max = super(SynapseDynamicsSTDP, self).get_weight_maximum( diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 241f9503cf..2fc7996452 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -12,19 +12,13 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from spinn_front_end_common.utilities.constants import \ - MICRO_TO_SECOND_CONVERSION - from collections import defaultdict import math import struct import numpy -import scipy.stats # @UnresolvedImport -from scipy import special # @UnresolvedImport -from pyNN.random import RandomDistribution from data_specification.enums import DataType from spinn_front_end_common.utilities.helpful_functions import ( - locate_memory_region_for_placement) + locate_memory_region_for_placement, read_config) from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spynnaker.pyNN.models.neuron.generator_data import GeneratorData from spynnaker.pyNN.models.neural_projections.connectors import ( @@ -34,16 +28,13 @@ AbstractSynapseDynamicsStructural, AbstractGenerateOnMachine, SynapseDynamicsStructuralSTDP) from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased -from spynnaker.pyNN.models.spike_source.spike_source_poisson_vertex import ( - SpikeSourcePoissonVertex) from spynnaker.pyNN.models.utility_models.delays import DelayExtensionVertex -from spynnaker.pyNN.utilities.constants import ( - POPULATION_BASED_REGIONS, POSSION_SIGMA_SUMMATION_LIMIT) -from spynnaker.pyNN.utilities.utility_calls import ( - get_maximum_probable_value, get_n_bits) -from spynnaker.pyNN.utilities.running_stats import RunningStats +from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS +from spynnaker.pyNN.utilities.utility_calls import get_n_bits from spynnaker.pyNN.models.neuron.master_pop_table import ( MasterPopTableAsBinarySearch) +from spynnaker.pyNN.exceptions import SynapticConfigurationException +import sys TIME_STAMP_BYTES = BYTES_PER_WORD @@ -79,19 +70,22 @@ class SynapticManager(object): "__retrieved_blocks", "__ring_buffer_sigma", "__spikes_per_second", + "__min_weights", + "__min_weights_auto", "__synapse_dynamics", "__synapse_io", "__weight_scales", - "__ring_buffer_shifts", "__gen_on_machine", "__max_row_info", "__synapse_indices"] - def __init__(self, n_synapse_types, ring_buffer_sigma, spikes_per_second, - config, population_table_type=None, synapse_io=None): + def __init__( + self, n_synapse_types, ring_buffer_sigma, spikes_per_second, + min_weights, config, population_table_type=None, synapse_io=None): self.__n_synapse_types = n_synapse_types self.__ring_buffer_sigma = ring_buffer_sigma self.__spikes_per_second = spikes_per_second + self.__min_weights = min_weights # Get the type of population table self.__poptable_type = population_table_type @@ -111,13 +105,29 @@ def __init__(self, n_synapse_types, ring_buffer_sigma, spikes_per_second, self.__spikes_per_second = config.getfloat( "Simulation", "spikes_per_second") + # Read the minimum weight if not set; this might *still* be None, + # meaning "auto calculate" + if self.__min_weights is None: + config_min_weights = read_config( + config, "Simulation", "min_weights") + if config_min_weights is not None: + self.__min_weights = [float(v) + for v in config_min_weights.split(',')] + self.__min_weights_auto = True + if self.__min_weights is not None: + self.__min_weights_auto = False + if len(self.__min_weights) != self.__n_synapse_types: + raise SynapticConfigurationException( + "The number of minimum weights provided ({}) does not" + " match the number of synapses ({})".format( + self.__min_weights, self.__n_synapse_types)) + # Prepare for dealing with STDP - there can only be one (non-static) # synapse dynamics per vertex at present self.__synapse_dynamics = None # Keep the details once computed to allow reading back self.__weight_scales = dict() - self.__ring_buffer_shifts = None self.__delay_key_index = dict() self.__retrieved_blocks = dict() @@ -359,205 +369,17 @@ def _reserve_memory_regions( region=POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS.value, size=synapse_dynamics_sz, label='synapseDynamicsParams') - @staticmethod - def _ring_buffer_expected_upper_bound( - weight_mean, weight_std_dev, spikes_per_second, - machine_timestep, n_synapses_in, sigma): - """ Provides expected upper bound on accumulated values in a ring\ - buffer element. - - Requires an assessment of maximum Poisson input rate. - - Assumes knowledge of mean and SD of weight distribution, fan-in\ - and timestep. - - All arguments should be assumed real values except n_synapses_in\ - which will be an integer. - - :param weight_mean: Mean of weight distribution (in either nA or\ - microSiemens as required) - :param weight_std_dev: SD of weight distribution - :param spikes_per_second: Maximum expected Poisson rate in Hz - :param machine_timestep: in us - :param n_synapses_in: No of connected synapses - :param sigma: How many SD above the mean to go for upper bound; a\ - good starting choice is 5.0. Given length of simulation we can\ - set this for approximate number of saturation events. - """ - # E[ number of spikes ] in a timestep - steps_per_second = MICRO_TO_SECOND_CONVERSION / machine_timestep - average_spikes_per_timestep = ( - float(n_synapses_in * spikes_per_second) / steps_per_second) - - # Exact variance contribution from inherent Poisson variation - poisson_variance = average_spikes_per_timestep * (weight_mean ** 2) - - # Upper end of range for Poisson summation required below - # upper_bound needs to be an integer - upper_bound = int(round(average_spikes_per_timestep + - POSSION_SIGMA_SUMMATION_LIMIT * - math.sqrt(average_spikes_per_timestep))) - - # Closed-form exact solution for summation that gives the variance - # contributed by weight distribution variation when modulated by - # Poisson PDF. Requires scipy.special for gamma and incomplete gamma - # functions. Beware: incomplete gamma doesn't work the same as - # Mathematica because (1) it's regularised and needs a further - # multiplication and (2) it's actually the complement that is needed - # i.e. 'gammaincc'] - - weight_variance = 0.0 - - if weight_std_dev > 0: - # pylint: disable=no-member - lngamma = special.gammaln(1 + upper_bound) - gammai = special.gammaincc( - 1 + upper_bound, average_spikes_per_timestep) - - big_ratio = (math.log(average_spikes_per_timestep) * upper_bound - - lngamma) - - if -701.0 < big_ratio < 701.0 and big_ratio != 0.0: - log_weight_variance = ( - -average_spikes_per_timestep + - math.log(average_spikes_per_timestep) + - 2.0 * math.log(weight_std_dev) + - math.log(math.exp(average_spikes_per_timestep) * gammai - - math.exp(big_ratio))) - weight_variance = math.exp(log_weight_variance) - - # upper bound calculation -> mean + n * SD - return ((average_spikes_per_timestep * weight_mean) + - (sigma * math.sqrt(poisson_variance + weight_variance))) - - def _get_ring_buffer_to_input_left_shifts( - self, application_vertex, application_graph, machine_timestep, - weight_scale): - """ Get the scaling of the ring buffer to provide as much accuracy as\ - possible without too much overflow - """ - weight_scale_squared = weight_scale * weight_scale - n_synapse_types = self.__n_synapse_types - running_totals = [RunningStats() for _ in range(n_synapse_types)] - delay_running_totals = [RunningStats() for _ in range(n_synapse_types)] - total_weights = numpy.zeros(n_synapse_types) - biggest_weight = numpy.zeros(n_synapse_types) - weights_signed = False - rate_stats = [RunningStats() for _ in range(n_synapse_types)] - steps_per_second = MICRO_TO_SECOND_CONVERSION / machine_timestep - - for app_edge in application_graph.get_edges_ending_at_vertex( - application_vertex): - if isinstance(app_edge, ProjectionApplicationEdge): - for synapse_info in app_edge.synapse_information: - synapse_type = synapse_info.synapse_type - synapse_dynamics = synapse_info.synapse_dynamics - connector = synapse_info.connector - - weight_mean = ( - synapse_dynamics.get_weight_mean( - connector, synapse_info) * weight_scale) - n_connections = \ - connector.get_n_connections_to_post_vertex_maximum( - synapse_info) - weight_variance = synapse_dynamics.get_weight_variance( - connector, synapse_info.weights) * weight_scale_squared - running_totals[synapse_type].add_items( - weight_mean, weight_variance, n_connections) - - delay_variance = synapse_dynamics.get_delay_variance( - connector, synapse_info.delays) - delay_running_totals[synapse_type].add_items( - 0.0, delay_variance, n_connections) - - weight_max = (synapse_dynamics.get_weight_maximum( - connector, synapse_info) * weight_scale) - biggest_weight[synapse_type] = max( - biggest_weight[synapse_type], weight_max) - - spikes_per_tick = max( - 1.0, self.__spikes_per_second / steps_per_second) - spikes_per_second = self.__spikes_per_second - if isinstance(app_edge.pre_vertex, - SpikeSourcePoissonVertex): - rate = app_edge.pre_vertex.max_rate - # If non-zero rate then use it; otherwise keep default - if rate != 0: - spikes_per_second = rate - if hasattr(spikes_per_second, "__getitem__"): - spikes_per_second = numpy.max(spikes_per_second) - elif isinstance(spikes_per_second, RandomDistribution): - spikes_per_second = get_maximum_probable_value( - spikes_per_second, app_edge.pre_vertex.n_atoms) - prob = 1.0 - ( - (1.0 / 100.0) / app_edge.pre_vertex.n_atoms) - spikes_per_tick = spikes_per_second / steps_per_second - spikes_per_tick = scipy.stats.poisson.ppf( - prob, spikes_per_tick) - rate_stats[synapse_type].add_items( - spikes_per_second, 0, n_connections) - total_weights[synapse_type] += spikes_per_tick * ( - weight_max * n_connections) - - if synapse_dynamics.are_weights_signed(): - weights_signed = True - - max_weights = numpy.zeros(n_synapse_types) - for synapse_type in range(n_synapse_types): - stats = running_totals[synapse_type] - rates = rate_stats[synapse_type] - if delay_running_totals[synapse_type].variance == 0.0: - max_weights[synapse_type] = max(total_weights[synapse_type], - biggest_weight[synapse_type]) - else: - max_weights[synapse_type] = min( - self._ring_buffer_expected_upper_bound( - stats.mean, stats.standard_deviation, rates.mean, - machine_timestep, stats.n_items, - self.__ring_buffer_sigma), - total_weights[synapse_type]) - max_weights[synapse_type] = max( - max_weights[synapse_type], biggest_weight[synapse_type]) - - # Convert these to powers - max_weight_powers = ( - 0 if w <= 0 else int(math.ceil(max(0, math.log(w, 2)))) - for w in max_weights) - - # If 2^max_weight_power equals the max weight, we have to add another - # power, as range is 0 - (just under 2^max_weight_power)! - max_weight_powers = ( - w + 1 if (2 ** w) <= a else w - for w, a in zip(max_weight_powers, max_weights)) - - # If we have synapse dynamics that uses signed weights, - # Add another bit of shift to prevent overflows - if weights_signed: - max_weight_powers = (m + 1 for m in max_weight_powers) - - return list(max_weight_powers) - - @staticmethod - def _get_weight_scale(ring_buffer_to_input_left_shift): - """ Return the amount to scale the weights by to convert them from \ - floating point values to 16-bit fixed point numbers which can be \ - shifted left by ring_buffer_to_input_left_shift to produce an\ - s1615 fixed point number - """ - return float(math.pow(2, 16 - (ring_buffer_to_input_left_shift + 1))) - def _write_synapse_parameters( - self, spec, ring_buffer_shifts, weight_scale): + self, spec, min_weights, weight_scale): """Get the ring buffer shifts and scaling factors.""" # Write the ring buffer shifts spec.switch_write_focus(POPULATION_BASED_REGIONS.SYNAPSE_PARAMS.value) - spec.write_array(ring_buffer_shifts) + for w in min_weights: + spec.write_value(w, data_type=DataType.S1615) # Return the weight scaling factors - return numpy.array([ - self._get_weight_scale(r) * weight_scale - for r in ring_buffer_shifts]) + return numpy.array([(1 / w) * weight_scale for w in min_weights]) def _write_padding( self, spec, synaptic_matrix_region, next_block_start_address): @@ -921,6 +743,32 @@ def _get_ring_buffer_shifts( weight_scale) return self.__ring_buffer_shifts + def _calculate_min_weights( + self, application_vertex, application_graph, weight_scale): + min_weights = [sys.maxsize for _ in range(self.__n_synapse_types)] + for app_edge in application_graph.get_edges_ending_at_vertex( + application_vertex): + if isinstance(app_edge, ProjectionApplicationEdge): + for synapse_info in app_edge.synapse_information: + synapse_type = synapse_info.synapse_type + synapse_dynamics = synapse_info.synapse_dynamics + connector = synapse_info.connector + + weight_min = (synapse_dynamics.get_weight_minimum( + connector, synapse_info, self.__ring_buffer_sigma) * + weight_scale) + if weight_min != 0: + min_weights[synapse_type] = min( + min_weights[synapse_type], weight_min) + return [m if m != sys.maxsize else 0 for m in min_weights] + + def _get_min_weights( + self, application_vertex, application_graph, weight_scale): + if self.__min_weights is None: + self.__min_weights = self._calculate_min_weights( + application_vertex, application_graph, weight_scale) + return self.__min_weights + def write_data_spec( self, spec, application_vertex, post_vertex_slice, machine_vertex, placement, machine_graph, application_graph, routing_info, @@ -949,11 +797,10 @@ def write_data_spec( all_syn_block_sz, graph_mapper, application_graph, application_vertex) - ring_buffer_shifts = self._get_ring_buffer_shifts( - application_vertex, application_graph, machine_time_step, - weight_scale) + min_weights = self._get_min_weights( + application_vertex, application_graph, weight_scale) weight_scales = self._write_synapse_parameters( - spec, ring_buffer_shifts, weight_scale) + spec, min_weights, weight_scale) gen_data = self._write_synaptic_matrix_and_master_population_table( spec, post_slices, post_slice_idx, machine_vertex, @@ -1222,7 +1069,8 @@ def gen_on_machine(self, vertex_slice): return self.__gen_on_machine.get(key, False) def reset_ring_buffer_shifts(self): - self.__ring_buffer_shifts = None + if self.__min_weights_auto: + self.__min_weights = None @property def changes_during_run(self): diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index 953b7b46b6..e805bf16b9 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -21,6 +21,9 @@ incoming_spike_buffer_size = 256 # Limit the amount of DTCM used by one-to-one connections one_to_one_connection_dtcm_max_bytes = 2048 +# Auto-compute the minimum weights +min_weights = None + [Mapping] # Algorithms below # pacman algorithms are: From 648a941397c99edb52157647993a6f1001b12d8e Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Thu, 23 Apr 2020 15:28:37 +0100 Subject: [PATCH 023/198] Bring in changes to make the models work with multiple steps per ts --- .../implementations/neuron_impl_standard.h | 166 ++++++++++-------- .../external_device_lif_control.py | 4 +- ...threshold_type_multicast_device_control.py | 2 +- .../abstract_pynn_neuron_model_standard.py | 17 ++ .../additional_input_ca2_adaptive.py | 4 +- .../abstract_standard_neuron_component.py | 2 +- .../implementations/neuron_impl_standard.py | 37 +++- .../input_types/input_type_conductance.py | 2 +- .../neuron/input_types/input_type_current.py | 2 +- .../input_types/input_type_current_semd.py | 2 +- .../neuron_models/abstract_neuron_model.py | 8 +- .../neuron/neuron_models/neuron_model_izh.py | 12 +- .../neuron_model_leaky_integrate_and_fire.py | 4 +- .../synapse_types/synapse_type_alpha.py | 4 +- .../synapse_types/synapse_type_delta.py | 2 +- .../synapse_type_dual_exponential.py | 4 +- .../synapse_types/synapse_type_exponential.py | 4 +- .../neuron/synapse_types/synapse_type_semd.py | 4 +- .../threshold_type_maass_stochastic.py | 4 +- .../threshold_types/threshold_type_static.py | 2 +- unittests/test_populations/test_vertex.py | 4 +- 21 files changed, 166 insertions(+), 124 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index 90662fcbe3..c225005566 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -39,13 +39,13 @@ #ifndef NUM_EXCITATORY_RECEPTORS #define NUM_EXCITATORY_RECEPTORS 1 #error NUM_EXCITATORY_RECEPTORS was undefined. It should be defined by a synapse\ - shaping include + shaping include #endif #ifndef NUM_INHIBITORY_RECEPTORS #define NUM_INHIBITORY_RECEPTORS 1 #error NUM_INHIBITORY_RECEPTORS was undefined. It should be defined by a synapse\ - shaping include + shaping include #endif //! Array of neuron states @@ -66,6 +66,9 @@ static global_neuron_params_pointer_t global_parameters; // The synapse shaping parameters static synapse_param_t *neuron_synapse_shaping_params; +// The number of steps to run per timestep +static uint n_steps_per_timestep; + static bool neuron_impl_initialise(uint32_t n_neurons) { // allocate DTCM for the global parameter details if (sizeof(global_neuron_params_t)) { @@ -149,6 +152,11 @@ static void neuron_impl_load_neuron_parameters( log_debug("reading parameters, next is %u, n_neurons is %u ", next, n_neurons); + // Read the number of steps per timestep + n_steps_per_timestep = address[next]; + log_info("Looping over %u steps each timestep", n_steps_per_timestep); + next += 1; + if (sizeof(global_neuron_params_t)) { log_debug("writing neuron global parameters"); spin1_memcpy(global_parameters, &address[next], @@ -222,61 +230,71 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, state_t voltage = neuron_model_get_membrane_voltage(neuron); recorded_variable_values[V_RECORDING_INDEX] = voltage; - // Get the exc and inh values from the synapses - input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); - input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); + // Store whether the neuron has spiked + bool spike = false; - // Call functions to obtain exc_input and inh_input - input_t* exc_input_values = input_type_get_input_value( - exc_value, input_type, NUM_EXCITATORY_RECEPTORS); - input_t* inh_input_values = input_type_get_input_value( - inh_value, input_type, NUM_INHIBITORY_RECEPTORS); + // Loop however many times requested + for (uint32_t i = n_steps_per_timestep; i > 0; i--) { - // Sum g_syn contributions from all receptors for recording - REAL total_exc = 0; - REAL total_inh = 0; + // Get the exc and inh values from the synapses + input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); + input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); - for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { - total_exc += exc_input_values[i]; - } - for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { - total_inh += inh_input_values[i]; - } + // Call functions to obtain exc_input and inh_input + input_t* exc_input_values = input_type_get_input_value( + exc_value, input_type, NUM_EXCITATORY_RECEPTORS); + input_t* inh_input_values = input_type_get_input_value( + inh_value, input_type, NUM_INHIBITORY_RECEPTORS); + + // Sum g_syn contributions from all receptors for recording + REAL total_exc = 0; + REAL total_inh = 0; - // Call functions to get the input values to be recorded - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { + total_exc += exc_input_values[i]; + } + for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { + total_inh += inh_input_values[i]; + } - // Call functions to convert exc_input and inh_input to current - input_type_convert_excitatory_input_to_current( - exc_input_values, input_type, voltage); - input_type_convert_inhibitory_input_to_current( - inh_input_values, input_type, voltage); + // Call functions to get the input values to be recorded + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; - external_bias += additional_input_get_input_value_as_current( - additional_input, voltage); + // Call functions to convert exc_input and inh_input to current + voltage = neuron_model_get_membrane_voltage(neuron); + input_type_convert_excitatory_input_to_current( + exc_input_values, input_type, voltage); + input_type_convert_inhibitory_input_to_current( + inh_input_values, input_type, voltage); - // update neuron parameters - state_t result = neuron_model_state_update( - NUM_EXCITATORY_RECEPTORS, exc_input_values, - NUM_INHIBITORY_RECEPTORS, inh_input_values, - external_bias, neuron); + external_bias += additional_input_get_input_value_as_current( + additional_input, voltage); - // determine if a spike should occur - bool spike = threshold_type_is_above_threshold(result, threshold_type); + // update neuron parameters + state_t result = neuron_model_state_update( + NUM_EXCITATORY_RECEPTORS, exc_input_values, + NUM_INHIBITORY_RECEPTORS, inh_input_values, + external_bias, neuron); - // If spike occurs, communicate to relevant parts of model - if (spike) { - // Call relevant model-based functions - // Tell the neuron model - neuron_model_has_spiked(neuron); + // determine if a spike should occur + bool spike_now = threshold_type_is_above_threshold(result, threshold_type); - // Tell the additional input - additional_input_has_spiked(additional_input); - } + // If spike occurs, communicate to relevant parts of model + if (spike_now) { + spike = true; + + // Call relevant model-based functions + // Tell the neuron model + neuron_model_has_spiked(neuron); + + // Tell the additional input + additional_input_has_spiked(additional_input); + } - // Shape the existing input according to the included rule - synapse_types_shape_input(synapse_type); + // Shape the existing input according to the included rule + synapse_types_shape_input(synapse_type); + } #if LOG_LEVEL >= LOG_DEBUG neuron_model_print_state_variables(neuron); @@ -342,40 +360,40 @@ static void neuron_impl_store_neuron_parameters( #if LOG_LEVEL >= LOG_DEBUG void neuron_impl_print_inputs(uint32_t n_neurons) { - bool empty = true; - for (index_t i = 0; i < n_neurons; i++) { - empty = empty && (0 == bitsk( - synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) - - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]))); - } - - if (!empty) { - log_debug("-------------------------------------\n"); - - for (index_t i = 0; i < n_neurons; i++) { - input_t input = - synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) - - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]); - if (bitsk(input) != 0) { - log_debug("%3u: %12.6k (= ", i, input); - synapse_types_print_input(&neuron_synapse_shaping_params[i]); - log_debug(")\n"); - } - } - log_debug("-------------------------------------\n"); - } + bool empty = true; + for (index_t i = 0; i < n_neurons; i++) { + empty = empty && (0 == bitsk( + synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) + - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]))); + } + + if (!empty) { + log_debug("-------------------------------------\n"); + + for (index_t i = 0; i < n_neurons; i++) { + input_t input = + synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) + - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]); + if (bitsk(input) != 0) { + log_debug("%3u: %12.6k (= ", i, input); + synapse_types_print_input(&neuron_synapse_shaping_params[i]); + log_debug(")\n"); + } + } + log_debug("-------------------------------------\n"); + } } void neuron_impl_print_synapse_parameters(uint32_t n_neurons) { - log_debug("-------------------------------------\n"); - for (index_t n = 0; n < n_neurons; n++) { - synapse_types_print_parameters(&neuron_synapse_shaping_params[n]); - } - log_debug("-------------------------------------\n"); + log_debug("-------------------------------------\n"); + for (index_t n = 0; n < n_neurons; n++) { + synapse_types_print_parameters(&neuron_synapse_shaping_params[n]); + } + log_debug("-------------------------------------\n"); } const char *neuron_impl_get_synapse_type_char(uint32_t synapse_type) { - return synapse_types_get_type_char(synapse_type); + return synapse_types_get_type_char(synapse_type); } #endif // LOG_LEVEL >= LOG_DEBUG diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index 21ed3f43da..e5c89815e4 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -87,11 +87,13 @@ def __init__( @overrides(AbstractPyNNNeuronModelStandard.create_vertex) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size): + ring_buffer_sigma, incoming_spike_buffer_size, + n_steps_per_timestep): if n_neurons != len(self._devices): raise ConfigurationException( "Number of neurons does not match number of devices in {}" .format(label)) + self._model.n_steps_per_timestep = n_steps_per_timestep max_atoms = self.get_max_atoms_per_core() return ExternalDeviceLifControlVertex( self._devices, self._create_edges, max_atoms, self._model, self, diff --git a/spynnaker/pyNN/external_devices_models/threshold_type_multicast_device_control.py b/spynnaker/pyNN/external_devices_models/threshold_type_multicast_device_control.py index bc400ef7d9..5baba1880d 100644 --- a/spynnaker/pyNN/external_devices_models/threshold_type_multicast_device_control.py +++ b/spynnaker/pyNN/external_devices_models/threshold_type_multicast_device_control.py @@ -64,7 +64,7 @@ def has_variable(self, variable): return variable in UNITS @overrides(AbstractThresholdType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data return [parameters[DEVICE].apply_operation( diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py index 3d422de786..f238703522 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py @@ -15,14 +15,31 @@ from .abstract_pynn_neuron_model import AbstractPyNNNeuronModel from spynnaker.pyNN.models.neuron.implementations import NeuronImplStandard +from spinn_utilities.overrides import overrides +_population_parameters = AbstractPyNNNeuronModel.default_population_parameters +_population_parameters["n_steps_per_timestep"] = 1 class AbstractPyNNNeuronModelStandard(AbstractPyNNNeuronModel): __slots__ = [] + default_population_parameters = _population_parameters + def __init__( self, model_name, binary, neuron_model, input_type, synapse_type, threshold_type, additional_input_type=None): AbstractPyNNNeuronModel.__init__(self, NeuronImplStandard( model_name, binary, neuron_model, input_type, synapse_type, threshold_type, additional_input_type)) + + @overrides(AbstractPyNNNeuronModel.create_vertex, + additional_arguments={"n_steps_per_timestep"}) + def create_vertex( + self, n_neurons, label, constraints, spikes_per_second, + ring_buffer_sigma, incoming_spike_buffer_size, + n_steps_per_timestep): + # pylint: disable=arguments-differ + self._model.n_steps_per_timestep = n_steps_per_timestep + return super(AbstractPyNNNeuronModelStandard, self).create_vertex( + n_neurons, label, constraints, spikes_per_second, + ring_buffer_sigma, incoming_spike_buffer_size) diff --git a/spynnaker/pyNN/models/neuron/additional_inputs/additional_input_ca2_adaptive.py b/spynnaker/pyNN/models/neuron/additional_inputs/additional_input_ca2_adaptive.py index bc8b41aa62..05651418a1 100644 --- a/spynnaker/pyNN/models/neuron/additional_inputs/additional_input_ca2_adaptive.py +++ b/spynnaker/pyNN/models/neuron/additional_inputs/additional_input_ca2_adaptive.py @@ -16,7 +16,6 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_additional_input import AbstractAdditionalInput I_ALPHA = "i_alpha" @@ -67,8 +66,7 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractAdditionalInput.get_values, additional_arguments={'ts'}) + @overrides(AbstractAdditionalInput.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py index 623716d8fd..98bcb0fa6d 100644 --- a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py +++ b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py @@ -90,7 +90,7 @@ def add_state_variables(self, state_variables): """ @abstractmethod - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): """ Get the values to be written to the machine for this model :param parameters: The holder of the parameters diff --git a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py index 3c0344ad82..8efd730ef4 100644 --- a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py +++ b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py @@ -17,6 +17,13 @@ from spinn_utilities.overrides import overrides from spynnaker.pyNN.models.neuron.input_types import InputTypeConductance from .abstract_neuron_impl import AbstractNeuronImpl +from spinn_front_end_common.utilities import globals_variables + +# The base size of the data +_BASE_SIZE = 4 + +# The default number of steps per timestep +_DEFAULT_N_STEPS_PER_TIMESTEP = 1 class NeuronImplStandard(AbstractNeuronImpl): @@ -31,7 +38,8 @@ class NeuronImplStandard(AbstractNeuronImpl): "__synapse_type", "__threshold_type", "__additional_input_type", - "__components" + "__components", + "__n_steps_per_timestep" ] _RECORDABLES = ["v", "gsyn_exc", "gsyn_inh"] @@ -52,6 +60,7 @@ def __init__( self.__synapse_type = synapse_type self.__threshold_type = threshold_type self.__additional_input_type = additional_input_type + self.__n_steps_per_timestep = _DEFAULT_N_STEPS_PER_TIMESTEP self.__components = [ self.__neuron_model, self.__input_type, self.__threshold_type, @@ -59,6 +68,14 @@ def __init__( if self.__additional_input_type is not None: self.__components.append(self.__additional_input_type) + @property + def n_steps_per_timestep(self): + return self.__n_steps_per_timestep + + @n_steps_per_timestep.setter + def n_steps_per_timestep(self, n_steps_per_timestep): + self.__n_steps_per_timestep = n_steps_per_timestep + @property @overrides(AbstractNeuronImpl.model_name) def model_name(self): @@ -81,7 +98,8 @@ def get_n_cpu_cycles(self, n_neurons): @overrides(AbstractNeuronImpl.get_dtcm_usage_in_bytes) def get_dtcm_usage_in_bytes(self, n_neurons): - total = self.__neuron_model.get_dtcm_usage_in_bytes(n_neurons) + total = _BASE_SIZE + total += self.__neuron_model.get_dtcm_usage_in_bytes(n_neurons) total += self.__synapse_type.get_dtcm_usage_in_bytes(n_neurons) total += self.__input_type.get_dtcm_usage_in_bytes(n_neurons) total += self.__threshold_type.get_dtcm_usage_in_bytes(n_neurons) @@ -92,7 +110,8 @@ def get_dtcm_usage_in_bytes(self, n_neurons): @overrides(AbstractNeuronImpl.get_sdram_usage_in_bytes) def get_sdram_usage_in_bytes(self, n_neurons): - total = self.__neuron_model.get_sdram_usage_in_bytes(n_neurons) + total = _BASE_SIZE + total += self.__neuron_model.get_sdram_usage_in_bytes(n_neurons) total += self.__synapse_type.get_sdram_usage_in_bytes(n_neurons) total += self.__input_type.get_sdram_usage_in_bytes(n_neurons) total += self.__threshold_type.get_sdram_usage_in_bytes(n_neurons) @@ -145,10 +164,14 @@ def add_state_variables(self, state_variables): @overrides(AbstractNeuronImpl.get_data) def get_data(self, parameters, state_variables, vertex_slice): - return numpy.concatenate([ - component.get_data(parameters, state_variables, vertex_slice) - for component in self.__components - ]) + # Work out the time step per step + ts = globals_variables.get_simulator().machine_time_step + ts /= self.__n_steps_per_timestep + items = [numpy.array([self.__n_steps_per_timestep], dtype="uint32")] + items.extend( + component.get_data(parameters, state_variables, vertex_slice, ts) + for component in self.__components) + return numpy.concatenate(items) @overrides(AbstractNeuronImpl.read_data) def read_data( diff --git a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py index 4b1723028b..d2d0827b3e 100644 --- a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py +++ b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py @@ -63,7 +63,7 @@ def has_variable(self, variable): return variable in UNITS @overrides(AbstractInputType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data return [parameters[E_REV_E], parameters[E_REV_I]] diff --git a/spynnaker/pyNN/models/neuron/input_types/input_type_current.py b/spynnaker/pyNN/models/neuron/input_types/input_type_current.py index 354c5f7d1a..a18ef9abef 100644 --- a/spynnaker/pyNN/models/neuron/input_types/input_type_current.py +++ b/spynnaker/pyNN/models/neuron/input_types/input_type_current.py @@ -38,7 +38,7 @@ def add_state_variables(self, state_variables): pass @overrides(AbstractInputType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): return [] @overrides(AbstractInputType.update_values) diff --git a/spynnaker/pyNN/models/neuron/input_types/input_type_current_semd.py b/spynnaker/pyNN/models/neuron/input_types/input_type_current_semd.py index 00f14bb97b..76a4e7b1ea 100644 --- a/spynnaker/pyNN/models/neuron/input_types/input_type_current_semd.py +++ b/spynnaker/pyNN/models/neuron/input_types/input_type_current_semd.py @@ -62,7 +62,7 @@ def has_variable(self, variable): return variable in UNITS @overrides(AbstractInputType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data return [parameters[MULTIPLICATOR], state_variables[INH_INPUT_PREVIOUS]] diff --git a/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py b/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py index aae4f85dcd..72f24c79cf 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py @@ -60,7 +60,7 @@ def get_sdram_usage_in_bytes(self, n_neurons): return usage + (self.__global_struct.get_size_in_whole_words() * BYTES_PER_WORD) - def get_global_values(self): + def get_global_values(self, ts): """ Get the global values to be written to the machine for this model :return: A list with the same length as self.global_struct.field_types @@ -69,10 +69,10 @@ def get_global_values(self): return numpy.zeros(0, dtype="uint32") @overrides(AbstractStandardNeuronComponent.get_data) - def get_data(self, parameters, state_variables, vertex_slice): + def get_data(self, parameters, state_variables, vertex_slice, ts): super_data = super(AbstractNeuronModel, self).get_data( - parameters, state_variables, vertex_slice) - values = self.get_global_values() + parameters, state_variables, vertex_slice, ts) + values = self.get_global_values(ts) global_data = self.__global_struct.get_data(values) return numpy.concatenate([global_data, super_data]) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_izh.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_izh.py index e715e3661b..64a2189727 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_izh.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_izh.py @@ -15,7 +15,6 @@ from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_neuron_model import AbstractNeuronModel A = 'a' @@ -87,15 +86,12 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"machine_time_step": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_global_values, - additional_arguments={'machine_time_step'}) - def get_global_values(self, machine_time_step): + @overrides(AbstractNeuronModel.get_global_values) + def get_global_values(self, ts): # pylint: disable=arguments-differ - return [float(machine_time_step)/1000.0] + return [float(ts) / 1000.0] - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + @overrides(AbstractNeuronModel.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py index b17f0c1ca3..851f5c1c9c 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py @@ -16,7 +16,6 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_neuron_model import AbstractNeuronModel V = "v" @@ -98,8 +97,7 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractNeuronModel.get_values, additional_arguments={'ts'}) + @overrides(AbstractNeuronModel.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_alpha.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_alpha.py index 798fa6b59d..cd717366eb 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_alpha.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_alpha.py @@ -16,7 +16,6 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_synapse_type import AbstractSynapseType EXC_RESPONSE = "exc_response" @@ -99,8 +98,7 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractSynapseType.get_values, additional_arguments={'ts'}) + @overrides(AbstractSynapseType.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_delta.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_delta.py index 619c456e67..5ee0da7587 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_delta.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_delta.py @@ -62,7 +62,7 @@ def has_variable(self, variable): return variable in UNITS @overrides(AbstractSynapseType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data return [state_variables[ISYN_EXC], state_variables[ISYN_INH]] diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_dual_exponential.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_dual_exponential.py index 5f2f163574..983f139ad2 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_dual_exponential.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_dual_exponential.py @@ -16,7 +16,6 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_synapse_type import AbstractSynapseType TAU_SYN_E = 'tau_syn_E' @@ -89,8 +88,7 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractSynapseType.get_values, additional_arguments={'ts'}) + @overrides(AbstractSynapseType.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_exponential.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_exponential.py index e506493618..f2512bf671 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_exponential.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_exponential.py @@ -16,7 +16,6 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_synapse_type import AbstractSynapseType TAU_SYN_E = 'tau_syn_E' @@ -74,8 +73,7 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractSynapseType.get_values, additional_arguments={'ts'}) + @overrides(AbstractSynapseType.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_semd.py b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_semd.py index 2660cb4b24..28c2cb12a5 100644 --- a/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_semd.py +++ b/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_semd.py @@ -16,7 +16,6 @@ import numpy from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_synapse_type import AbstractSynapseType TAU_SYN_E = 'tau_syn_E' @@ -101,8 +100,7 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractSynapseType.get_values, additional_arguments={'ts'}) + @overrides(AbstractSynapseType.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): tsfloat = float(ts) / 1000.0 diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_maass_stochastic.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_maass_stochastic.py index 777a55cb0d..5dd4128b36 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_maass_stochastic.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_maass_stochastic.py @@ -15,7 +15,6 @@ from spinn_utilities.overrides import overrides from data_specification.enums import DataType -from pacman.executor.injection_decorator import inject_items from .abstract_threshold_type import AbstractThresholdType DU_TH = "du_th" @@ -69,8 +68,7 @@ def get_units(self, variable): def has_variable(self, variable): return variable in UNITS - @inject_items({"ts": "MachineTimeStep"}) - @overrides(AbstractThresholdType.get_values, additional_arguments={'ts'}) + @overrides(AbstractThresholdType.get_values) def get_values(self, parameters, state_variables, vertex_slice, ts): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_static.py b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_static.py index 2c754a4f34..bb81d11a84 100644 --- a/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_static.py +++ b/spynnaker/pyNN/models/neuron/threshold_types/threshold_type_static.py @@ -54,7 +54,7 @@ def has_variable(self, variable): return variable in UNITS @overrides(AbstractThresholdType.get_values) - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): # Add the rest of the data return [parameters[V_THRESH]] diff --git a/unittests/test_populations/test_vertex.py b/unittests/test_populations/test_vertex.py index e976502741..f2334f8555 100644 --- a/unittests/test_populations/test_vertex.py +++ b/unittests/test_populations/test_vertex.py @@ -39,7 +39,7 @@ def add_parameters(self, parameters): def add_state_variables(self, state_variables): pass - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): return numpy.zeros(dtype="uint32") def update_values(self, values, parameters, state_variables): @@ -81,7 +81,7 @@ def add_state_variables(self, state_variables): state_variables["foo"] = self._foo state_variables["bar"] = self._bar - def get_values(self, parameters, state_variables, vertex_slice): + def get_values(self, parameters, state_variables, vertex_slice, ts): return numpy.zeros(dtype="uint32") def update_values(self, values, parameters, state_variables): From 6eecc8bddf4710019cec81918db45d3e305421b5 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Thu, 23 Apr 2020 15:51:06 +0100 Subject: [PATCH 024/198] Missed bits --- .../implementations/abstract_standard_neuron_component.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py index 98bcb0fa6d..4f26a20552 100644 --- a/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py +++ b/spynnaker/pyNN/models/neuron/implementations/abstract_standard_neuron_component.py @@ -104,8 +104,8 @@ def get_values(self, parameters, state_variables, vertex_slice, ts): :rtype: A list of (single value or list of values or RangedList) """ - def get_data(self, parameters, state_variables, vertex_slice): - """ Get the data to be written to the machine for this model + def get_data(self, parameters, state_variables, vertex_slice, ts): + """ Get the data *to be written to the machine* for this model. :param parameters: The holder of the parameters :type parameters:\ @@ -116,7 +116,7 @@ def get_data(self, parameters, state_variables, vertex_slice): :param vertex_slice: The slice of the vertex to generate parameters for :rtype: numpy array of uint32 """ - values = self.get_values(parameters, state_variables, vertex_slice) + values = self.get_values(parameters, state_variables, vertex_slice, ts) return self.struct.get_data( values, vertex_slice.lo_atom, vertex_slice.n_atoms) From ddb6380ef5392678cf3a5334fb537aab30d5e1a7 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Thu, 30 Apr 2020 15:24:18 +0100 Subject: [PATCH 025/198] Add required argument --- .../pyNN/models/neuron/abstract_pynn_neuron_model_standard.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py index dbe3dc2426..a9f3a9def2 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py @@ -55,10 +55,10 @@ def __init__( additional_arguments={"n_steps_per_timestep"}) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size, + ring_buffer_sigma, min_weights, incoming_spike_buffer_size, n_steps_per_timestep): # pylint: disable=arguments-differ self._model.n_steps_per_timestep = n_steps_per_timestep return super(AbstractPyNNNeuronModelStandard, self).create_vertex( n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, incoming_spike_buffer_size) + ring_buffer_sigma, min_weights, incoming_spike_buffer_size) From 76c9ffa57fe054e6391f7804a89ce86115ad4964 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Thu, 30 Apr 2020 15:37:17 +0100 Subject: [PATCH 026/198] Min weight could be 0 --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 2fc7996452..504940ca74 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -379,7 +379,8 @@ def _write_synapse_parameters( spec.write_value(w, data_type=DataType.S1615) # Return the weight scaling factors - return numpy.array([(1 / w) * weight_scale for w in min_weights]) + return numpy.array([(1 / w) * weight_scale if w != 0 else 0 + for w in min_weights]) def _write_padding( self, spec, synaptic_matrix_region, next_block_start_address): From 781a86c364cb3bf365b73001f02d8960a8096de0 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 08:28:53 +0100 Subject: [PATCH 027/198] Use closest representable value for minimum weight to avoid issues --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 504940ca74..722a9216fd 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -761,7 +761,11 @@ def _calculate_min_weights( if weight_min != 0: min_weights[synapse_type] = min( min_weights[synapse_type], weight_min) - return [m if m != sys.maxsize else 0 for m in min_weights] + + # Convert values to their closest representable value to ensure + # that division works for the minimum value + return [DataType.S1615.closest_representable_value(m) + if m != sys.maxsize else 0 for m in min_weights] def _get_min_weights( self, application_vertex, application_graph, weight_scale): From e72e0b13ce93fd98d74b440afa981adb3f70a94b Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 10:06:56 +0100 Subject: [PATCH 028/198] Be careful if the minimum representable rounds to 0! --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 722a9216fd..c32cc99385 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -764,8 +764,14 @@ def _calculate_min_weights( # Convert values to their closest representable value to ensure # that division works for the minimum value - return [DataType.S1615.closest_representable_value(m) - if m != sys.maxsize else 0 for m in min_weights] + min_weights = [DataType.S1615.closest_representable_value(m) + if m != sys.maxsize else 0 for m in min_weights] + + # The minimum weight shouldn't be 0 unless set above (and then it + # doesn't matter that we use the min as there are no weights); so + # set the weight to the smallest representable value if 0 + return [m if m > 0 else DataType.S1615.decode_from_int(1) + for m in min_weights] def _get_min_weights( self, application_vertex, application_graph, weight_scale): From bc6fecd08da62105d601a94f7a75b86ed2724550 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 10:59:43 +0100 Subject: [PATCH 029/198] Return the minimum --- .../stdp/weight_dependence/weight_dependence_multiplicative.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index 5461f6a905..d369abeb48 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -114,7 +114,7 @@ def weight_maximum(self): @overrides(AbstractWeightDependence.weight_change_minimum) def weight_change_minimum(self, min_delta): pot, dep = min_delta - return [pot * self.A_plus, dep * self.A_minus] + return min(pot * self.A_plus, dep * self.A_minus) @overrides(AbstractWeightDependence.get_parameter_names) def get_parameter_names(self): From 46352b62c17164de249eec428d9a7a5ab58edca1 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:07:37 +0100 Subject: [PATCH 030/198] Support empty from-list again --- .../models/neural_projections/connectors/from_list_connector.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index 4867efb881..8ef22e3ced 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -258,6 +258,8 @@ def conn_list(self): def get_n_connections(self, pre_slices, post_slices, pre_hi, post_hi): self._split_connections(pre_slices, post_slices) + if not self.__split_conn_list: + return 0 return len(self.__split_conn_list[(pre_hi, post_hi)]) @conn_list.setter From f49bff3fc2affe2b45f42b59f962a8bceacb2922 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:17:35 +0100 Subject: [PATCH 031/198] Push get_weight_minimum down to the connector --- .../connectors/abstract_connector.py | 13 +++++++++++++ .../connectors/from_list_connector.py | 7 +++++++ .../synapse_dynamics/abstract_synapse_dynamics.py | 4 +--- 3 files changed, 21 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 8c24cdd78d..84974f68ba 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -267,6 +267,19 @@ def get_weight_mean(self, weights): return numpy.mean(weights) raise Exception("Unrecognised weight format") + def get_weight_minimum(self, weights, sigma): + """ Get the minimum of the weights. This default uses the mean and the + variance to avoid needing to add this to all subclasses, but + subclasses can override it if it makes sense to do so. + + :param weights: + :type weights: ~numpy.ndarray or ~pyNN.random.NumpyRNG or int or float + or list(int) or list(float) + :param float sigma: The number of standard deviations of accuracy + """ + return (self.get_weight_mean(weights) - + math.sqrt(self.get_weight_variance(weights)) * sigma) + def _get_weight_maximum(self, weights, n_connections): """ Get the maximum of the weights. diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index 8ef22e3ced..5fbe4deb14 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -211,6 +211,13 @@ def get_weight_variance(self, weights): else: return numpy.var(numpy.abs(self.__weights)) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, sigma): + if self.__weights is None: + return numpy.amin(weights) + else: + return numpy.amin(numpy.abs(self.__weights)) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 855bee595a..ef21cb9214 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -181,9 +181,7 @@ def get_weight_minimum(self, connector, synapse_info, sigma): :param SynapseInformation synapse_info: :param float sigma: The number of standard deviations of accuracy """ - weights = synapse_info.weights - return (connector.get_weight_mean(weights) - - connector.get_weight_variance(weights) * sigma) + return connector.get_weight_minimum(synapse_info.weights, sigma) def convert_per_connection_data_to_rows( self, connection_row_indices, n_rows, data): From 15827e0d17121b3b2594e71622e872148f8326a1 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:40:49 +0100 Subject: [PATCH 032/198] Fix missing arguments --- .../model_tests/neuron/test_synaptic_manager.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index 08154f6fa7..d9d21d151d 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -119,7 +119,7 @@ def test_retrieve_synaptic_block(self): synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, - config=config, + min_weights=None, config=config, population_table_type=MockMasterPopulationTable( {key: [(1, 0, False)]}), synapse_io=MockSynapseIO()) @@ -173,7 +173,7 @@ def test_retrieve_direct_block(self): synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, - config=config, + min_weights=None, config=config, population_table_type=MockMasterPopulationTable( {key: [(1, 0, True), (1, n_rows * 4, True)]}), synapse_io=MockSynapseIO()) @@ -289,7 +289,7 @@ def test_write_synaptic_matrix_and_master_population_table(self): synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config) + spikes_per_second=100.0, min_weights=None, config=config) synaptic_manager._write_synaptic_matrix_and_master_population_table( spec, [post_vertex_slice], post_slice_index, post_vertex, post_vertex_slice, all_syn_block_sz, weight_scales, @@ -403,7 +403,7 @@ def test_set_synapse_dynamics(self): AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config) + spikes_per_second=100.0, min_weights=None, config=config) static = SynapseDynamicsStatic() stdp = SynapseDynamicsSTDP( @@ -495,7 +495,7 @@ def test_set_synapse_dynamics(self): # Try starting again to get a couple more combinations synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config) + spikes_per_second=100.0, min_weights=None, config=config) # STDP followed by structural STDP should result in Structural STDP synaptic_manager.synapse_dynamics = stdp @@ -516,7 +516,7 @@ def test_set_synapse_dynamics(self): # One more time! synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config) + spikes_per_second=100.0, min_weights=None, config=config) # Static followed by static structural should result in static # structural @@ -552,7 +552,7 @@ def test_set_synapse_dynamics(self): # OK, just one more, honest synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config) + spikes_per_second=100.0, min_weights=None, config=config) synaptic_manager.synapse_dynamics = static_struct synaptic_manager.synapse_dynamics = stdp_struct From ad425fc12a574b127312cfb3d661279fb3b3782f Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:45:38 +0100 Subject: [PATCH 033/198] Add new option --- unittests/mocks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/unittests/mocks.py b/unittests/mocks.py index 299d83d317..b452d15bcb 100644 --- a/unittests/mocks.py +++ b/unittests/mocks.py @@ -92,7 +92,8 @@ def __init__(self): {"spikes_per_second": "30", "incoming_spike_buffer_size": "256", "ring_buffer_sigma": "5", - "one_to_one_connection_dtcm_max_bytes": "0"} + "one_to_one_connection_dtcm_max_bytes": "0", + "min_weights": "None"} self.config["Buffers"] = {"time_between_requests": "10", "minimum_buffer_sdram": "10", "use_auto_pause_and_resume": "True", From 8a1913963910597e1c27d4afe24fc9aeae5b54af Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:48:22 +0100 Subject: [PATCH 034/198] Fix super call --- unittests/test_populations/test_vertex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/unittests/test_populations/test_vertex.py b/unittests/test_populations/test_vertex.py index f2334f8555..6b863bb375 100644 --- a/unittests/test_populations/test_vertex.py +++ b/unittests/test_populations/test_vertex.py @@ -117,7 +117,7 @@ def __init__(self): n_neurons=5, label="Mock", constraints=None, max_atoms_per_core=None, spikes_per_second=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, - neuron_impl=foo_bar.model, pynn_model=foo_bar) + neuron_impl=foo_bar.model, pynn_model=foo_bar, min_weights=None) def test_initializable(): From 008e07c2bebdd13cbc019322b8f23ec9b2314dd7 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:51:49 +0100 Subject: [PATCH 035/198] Use to remove warning --- .../src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c index e2e5aad4cb..e4eb9e933a 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c @@ -179,6 +179,7 @@ static inline pre_event_history_t *plastic_event_history( void synapse_dynamics_print_plastic_synapses( address_t plastic_region_address, address_t fixed_region_address, REAL *min_weights) { + use(min_weights); use(plastic_region_address); use(fixed_region_address); From 38b21b0ad26e85c3c101b59987fbee1ee36f3b7b Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:57:52 +0100 Subject: [PATCH 036/198] flake8 --- .../models/neuron/plasticity/stdp/common/plasticity_helpers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py index 4f80f2ba8f..9c477036ac 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py @@ -44,6 +44,7 @@ def get_exp_lut_array(time_step, time_constant, shift=0): header = numpy.array([len(a), shift], dtype="uint16") return numpy.concatenate((header, a.astype("uint16"))).view("uint32") + def get_min_lut_value(exp_lut_array): """ Get the smallest non-zero value of an exponential lookup array,\ or zero if no such value From 9ddc30be4927ccd650b936ccdfad414d33f60e36 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 11:59:29 +0100 Subject: [PATCH 037/198] Call super --- .../connectors/abstract_generate_connector_on_machine.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py index f73339c435..066ad73d33 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py @@ -62,8 +62,8 @@ class AbstractGenerateConnectorOnMachine(with_metaclass( ] def __init__(self, safe=True, callback=None, verbose=False): - AbstractConnector.__init__( - self, safe=safe, callback=callback, verbose=verbose) + super(AbstractGenerateConnectorOnMachine, self).__init__( + safe=safe, callback=callback, verbose=verbose) self.__delay_seed = dict() self.__weight_seed = dict() self.__connector_seed = dict() From 359fe5fff515312d5dfe0ae219d5a113026765b5 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 12:00:52 +0100 Subject: [PATCH 038/198] Ignore unused --- .../connectors/abstract_generate_connector_on_machine.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py index 066ad73d33..5a0adae68a 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py @@ -230,6 +230,7 @@ def gen_connector_params( :rtype: numpy array of uint32 """ + # pylint: disable=unused-argument return numpy.zeros(0, dtype="uint32") @property From a1443907c26686aa9fc79d69969b8ac70eb7bf18 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 12:02:31 +0100 Subject: [PATCH 039/198] Remove unused method --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index c32cc99385..19dc123c98 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -732,18 +732,6 @@ def __write_row_data( block_addr += len(row_data) * BYTES_PER_WORD return block_addr, single_addr, index - def _get_ring_buffer_shifts( - self, application_vertex, application_graph, machine_timestep, - weight_scale): - """ Get the ring buffer shifts for this vertex - """ - if self.__ring_buffer_shifts is None: - self.__ring_buffer_shifts = \ - self._get_ring_buffer_to_input_left_shifts( - application_vertex, application_graph, machine_timestep, - weight_scale) - return self.__ring_buffer_shifts - def _calculate_min_weights( self, application_vertex, application_graph, weight_scale): min_weights = [sys.maxsize for _ in range(self.__n_synapse_types)] From deec0db10d29dfaa02606b241421442e51a26fdf Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 12:03:21 +0100 Subject: [PATCH 040/198] Ignore unused --- .../pyNN/models/neuron/neuron_models/abstract_neuron_model.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py b/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py index de1ad4d70b..34b3f86329 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/abstract_neuron_model.py @@ -76,6 +76,7 @@ def get_global_values(self, ts): :return: A list with the same length as self.global_struct.field_types :rtype: list(int or float) or ~numpy.ndarray """ + # pylint: disable=unused-argument return numpy.zeros(0, dtype="uint32") @overrides(AbstractStandardNeuronComponent.get_data) From 0c3b83ce9c7a8eb81132d13d7a05d89e6157d93e Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 13:25:29 +0100 Subject: [PATCH 041/198] We also need representable values for 1/w --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 19dc123c98..137f0e37f9 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -379,8 +379,9 @@ def _write_synapse_parameters( spec.write_value(w, data_type=DataType.S1615) # Return the weight scaling factors - return numpy.array([(1 / w) * weight_scale if w != 0 else 0 - for w in min_weights]) + return numpy.array([ + DataType.S1615.closest_representable_value((1 / w) * weight_scale) + if w != 0 else 0 for w in min_weights]) def _write_padding( self, spec, synaptic_matrix_region, next_block_start_address): From 5c6c85222f70d18e9669d6152b8f8d6bb8179aed Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 13:47:22 +0100 Subject: [PATCH 042/198] Get minimum weight from Kernel connector --- .../connectors/abstract_connector.py | 32 +++++++++++++++++++ .../connectors/kernel_connector.py | 11 +++++++ 2 files changed, 43 insertions(+) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 84974f68ba..6b8db819b7 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -267,6 +267,38 @@ def get_weight_mean(self, weights): return numpy.mean(weights) raise Exception("Unrecognised weight format") + def _get_weight_minimum(self, weights, n_connections): + """ Get the minimum of the weights. + + :param weights: + :type weights: ~numpy.ndarray or ~pyNN.random.NumpyRNG or int or float + or list(int) or list(float) + :param int n_connections: + :rtype: float + """ + if isinstance(weights, RandomDistribution): + mean_weight = utility_calls.get_mean(weights) + if mean_weight < 0: + max_weight = utility_calls.get_maximum_probable_value( + weights, n_connections) + high = utility_calls.high(weights) + if high is None: + return abs(max_weight) + return abs(max(max_weight, high)) + else: + min_weight = utility_calls.get_minimum_probable_value( + weights, n_connections) + low = utility_calls.low(weights) + if low is None: + return abs(min_weight) + return abs(min(min_weight, low)) + + elif numpy.isscalar(weights): + return abs(weights) + elif hasattr(weights, "__getitem__"): + return numpy.amax(numpy.abs(weights)) + raise Exception("Unrecognised weight format") + def get_weight_minimum(self, weights, sigma): """ Get the minimum of the weights. This default uses the mean and the variance to avoid needing to add this to all subclasses, but diff --git a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py index 02fe53913f..9590f27f8b 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py @@ -286,6 +286,17 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum(synapse_info.weights, n_conns) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, sigma): + # I think this is overestimated, but not by much + n_conns = ( + self._pre_w * self._pre_h * self._kernel_w * self._kernel_h) + # Use the kernel delays if user has supplied them + if self._krn_weights is not None: + return self._get_weight_minimum(self._krn_weights, n_conns) + + return self._get_weight_minimum(weights, n_conns) + def __repr__(self): return "KernelConnector(shape_kernel[{},{}])".format( self._kernel_w, self._kernel_h) From 0aaeef115aff7f04e00f7690f9932c1c8487bae5 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Fri, 1 May 2020 15:03:58 +0100 Subject: [PATCH 043/198] Make sure the min weight and the scale stay related --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 137f0e37f9..8895ca8c0f 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -379,9 +379,8 @@ def _write_synapse_parameters( spec.write_value(w, data_type=DataType.S1615) # Return the weight scaling factors - return numpy.array([ - DataType.S1615.closest_representable_value((1 / w) * weight_scale) - if w != 0 else 0 for w in min_weights]) + return numpy.array([(1 / w) * weight_scale if w != 0 else 0 + for w in min_weights]) def _write_padding( self, spec, synaptic_matrix_region, next_block_start_address): @@ -733,6 +732,14 @@ def __write_row_data( block_addr += len(row_data) * BYTES_PER_WORD return block_addr, single_addr, index + def __get_closest_weight(self, value): + """ Get the best representation of the weight so that both weight and + 1 / w work + """ + if abs(value) < 1.0: + return DataType.S1615.closest_representable_value(value) + return 1 / (DataType.S1615.closest_representable_value(1 / value)) + def _calculate_min_weights( self, application_vertex, application_graph, weight_scale): min_weights = [sys.maxsize for _ in range(self.__n_synapse_types)] @@ -753,7 +760,7 @@ def _calculate_min_weights( # Convert values to their closest representable value to ensure # that division works for the minimum value - min_weights = [DataType.S1615.closest_representable_value(m) + min_weights = [self.__get_closest_weight(m) if m != sys.maxsize else 0 for m in min_weights] # The minimum weight shouldn't be 0 unless set above (and then it From 1b8782b38c6a90b918abe4ea21f4b1a5b19abda1 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Mon, 4 May 2020 09:05:06 +0100 Subject: [PATCH 044/198] It looks like we can't avoid updating all the connectors --- .../connectors/abstract_connector.py | 10 +++------- .../connectors/all_to_all_connector.py | 6 ++++++ .../neural_projections/connectors/array_connector.py | 5 +++++ .../neural_projections/connectors/csa_connector.py | 5 +++++ .../distance_dependent_probability_connector.py | 10 ++++++++++ .../connectors/fixed_number_post_connector.py | 5 +++++ .../connectors/fixed_number_pre_connector.py | 6 ++++++ .../connectors/fixed_probability_connector.py | 9 +++++++++ .../connectors/from_list_connector.py | 4 ++-- .../connectors/index_based_probability_connector.py | 9 +++++++++ .../neural_projections/connectors/kernel_connector.py | 4 ++-- .../connectors/multapse_connector.py | 5 +++++ .../connectors/one_to_one_connector.py | 6 ++++++ .../connectors/small_world_connector.py | 6 ++++++ .../synapse_dynamics/abstract_synapse_dynamics.py | 5 ++--- .../neuron/synapse_dynamics/synapse_dynamics_stdp.py | 2 +- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 17 files changed, 83 insertions(+), 16 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 6b8db819b7..62097d9cc7 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -299,18 +299,14 @@ def _get_weight_minimum(self, weights, n_connections): return numpy.amax(numpy.abs(weights)) raise Exception("Unrecognised weight format") - def get_weight_minimum(self, weights, sigma): - """ Get the minimum of the weights. This default uses the mean and the - variance to avoid needing to add this to all subclasses, but - subclasses can override it if it makes sense to do so. + @abstractmethod + def get_weight_minimum(self, synapse_info): + """ Get the minimum of the weights. :param weights: :type weights: ~numpy.ndarray or ~pyNN.random.NumpyRNG or int or float or list(int) or list(float) - :param float sigma: The number of standard deviations of accuracy """ - return (self.get_weight_mean(weights) - - math.sqrt(self.get_weight_variance(weights)) * sigma) def _get_weight_maximum(self, weights, n_connections): """ Get the maximum of the weights. diff --git a/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py index 7052dc81a7..a1523e1ed1 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py @@ -97,6 +97,12 @@ def get_weight_maximum(self, synapse_info): n_conns = synapse_info.n_pre_neurons * synapse_info.n_post_neurons return self._get_weight_maximum(synapse_info.weights, n_conns) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + # pylint: disable=too-many-arguments + n_conns = synapse_info.n_pre_neurons * synapse_info.n_post_neurons + return self._get_weight_minimum(synapse_info.weights, n_conns) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py index 1242cbd619..90285e24f7 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py @@ -83,6 +83,11 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__n_total_connections) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + return self._get_weight_minimum( + synapse_info.weights, self.__n_total_connections) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py index fcad949e21..93c70305ba 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py @@ -110,6 +110,11 @@ def get_weight_maximum(self, synapse_info): n_conns_max = synapse_info.n_pre_neurons * synapse_info.n_post_neurons return self._get_weight_maximum(synapse_info.weights, n_conns_max) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + n_conns_max = synapse_info.n_pre_neurons * synapse_info.n_post_neurons + return self._get_weight_minimum(synapse_info.weights, n_conns_max) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py index 84f522378b..3bbe07d97f 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py @@ -156,6 +156,16 @@ def get_weight_maximum(self, synapse_info): synapse_info.n_pre_neurons * synapse_info.n_post_neurons, numpy.amax(self.__probs))) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + # pylint: disable=too-many-arguments + return self._get_weight_minimum( + synapse_info.weights, + utility_calls.get_probable_maximum_selected( + synapse_info.n_pre_neurons * synapse_info.n_post_neurons, + synapse_info.n_pre_neurons * synapse_info.n_post_neurons, + numpy.amax(self.__probs))) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, post_slice_index, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py index 96cda60cac..96d0f0de6d 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py @@ -198,6 +198,11 @@ def get_weight_maximum(self, synapse_info): n_connections = synapse_info.n_pre_neurons * self.__n_post return self._get_weight_maximum(synapse_info.weights, n_connections) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + n_connections = synapse_info.n_pre_neurons * self.__n_post + return self._get_weight_minimum(synapse_info.weights, n_connections) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py index dcb516130d..e8e1c18f77 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py @@ -202,6 +202,12 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__n_pre * synapse_info.n_post_neurons) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + # pylint: disable=too-many-arguments + return self._get_weight_minimum( + synapse_info.weights, self.__n_pre * synapse_info.n_post_neurons) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py index c062b55b30..271e27f75b 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py @@ -107,6 +107,15 @@ def get_weight_maximum(self, synapse_info): self._p_connect) return self._get_weight_maximum(synapse_info.weights, n_connections) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + # pylint: disable=too-many-arguments + n_connections = utility_calls.get_probable_maximum_selected( + synapse_info.n_pre_neurons * synapse_info.n_post_neurons, + synapse_info.n_pre_neurons * synapse_info.n_post_neurons, + self._p_connect) + return self._get_weight_minimum(synapse_info.weights, n_connections) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index 5fbe4deb14..a0a6be8ec0 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -212,9 +212,9 @@ def get_weight_variance(self, weights): return numpy.var(numpy.abs(self.__weights)) @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, weights, sigma): + def get_weight_minimum(self, synapse_info): if self.__weights is None: - return numpy.amin(weights) + return numpy.amin(synapse_info.weights) else: return numpy.amin(numpy.abs(self.__weights)) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py index e09a77c6b7..2dceb23370 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py @@ -115,6 +115,15 @@ def get_weight_maximum(self, synapse_info): numpy.amax(self.__probs)) return self._get_weight_maximum(synapse_info.weights, n_connections) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + self._update_probs_from_index_expression(synapse_info) + n_connections = utility_calls.get_probable_maximum_selected( + synapse_info.n_pre_neurons * synapse_info.n_post_neurons, + synapse_info.n_pre_neurons * synapse_info.n_post_neurons, + numpy.amax(self.__probs)) + return self._get_weight_minimum(synapse_info.weights, n_connections) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py index 9590f27f8b..1bbae65f60 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py @@ -287,7 +287,7 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum(synapse_info.weights, n_conns) @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, weights, sigma): + def get_weight_minimum(self, synapse_info): # I think this is overestimated, but not by much n_conns = ( self._pre_w * self._pre_h * self._kernel_w * self._kernel_h) @@ -295,7 +295,7 @@ def get_weight_minimum(self, weights, sigma): if self._krn_weights is not None: return self._get_weight_minimum(self._krn_weights, n_conns) - return self._get_weight_minimum(weights, n_conns) + return self._get_weight_minimum(synapse_info.weights, n_conns) def __repr__(self): return "KernelConnector(shape_kernel[{},{}])".format( diff --git a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py index 0a3e41742d..6a1fb8d3b2 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py @@ -153,6 +153,11 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__num_synapses) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + return self._get_weight_minimum( + synapse_info.weights, self.__num_synapses) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py index cbbdee0adc..4bab41acf6 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py @@ -81,6 +81,12 @@ def get_weight_maximum(self, synapse_info): synapse_info.weights, max((synapse_info.n_pre_neurons, synapse_info.n_post_neurons))) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + return self._get_weight_minimum( + synapse_info.weights, + max((synapse_info.n_pre_neurons, synapse_info.n_post_neurons))) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py index 4141f3ce91..64ca203708 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py @@ -104,6 +104,12 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__n_connections) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, synapse_info): + # pylint: disable=too-many-arguments + return self._get_weight_minimum( + synapse_info.weights, self.__n_connections) + @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index ef21cb9214..55c3758708 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -173,15 +173,14 @@ def get_weight_variance(self, connector, weights): # pylint: disable=too-many-arguments return connector.get_weight_variance(weights) - def get_weight_minimum(self, connector, synapse_info, sigma): + def get_weight_minimum(self, connector, synapse_info): """ Get the minimum non-zero weight for the synapses, or 0 if all\ synapses are zero :param AbstractConnector connector: The connector in use :param SynapseInformation synapse_info: - :param float sigma: The number of standard deviations of accuracy """ - return connector.get_weight_minimum(synapse_info.weights, sigma) + return connector.get_weight_minimum(synapse_info) def convert_per_connection_data_to_rows( self, connection_row_indices, n_rows, data): diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 122def1525..2582dcfe0e 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -465,7 +465,7 @@ def get_weight_variance(self, connector, weights): return 0.0 @overrides(AbstractPlasticSynapseDynamics.get_weight_minimum) - def get_weight_minimum(self, connector, synapse_info, sigma): + def get_weight_minimum(self, connector, synapse_info): # Use the minimum weight change as the minimum non-zero weight return self.__weight_dependence.weight_change_minimum( self.__timing_dependence.minimum_delta) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 8895ca8c0f..30fb746e45 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -752,7 +752,7 @@ def _calculate_min_weights( connector = synapse_info.connector weight_min = (synapse_dynamics.get_weight_minimum( - connector, synapse_info, self.__ring_buffer_sigma) * + connector, synapse_info) * weight_scale) if weight_min != 0: min_weights[synapse_type] = min( From 873d26789aa79e78947d6750f8b0b73eeb9c2002 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Mon, 4 May 2020 14:53:44 +0100 Subject: [PATCH 045/198] Split the minimum weight and minimum weight change --- .../abstract_synapse_dynamics.py | 9 --------- .../synapse_dynamics/synapse_dynamics_stdp.py | 6 +++--- .../pyNN/models/neuron/synaptic_manager.py | 19 ++++++++++++++----- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 55c3758708..37800c98b4 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -173,15 +173,6 @@ def get_weight_variance(self, connector, weights): # pylint: disable=too-many-arguments return connector.get_weight_variance(weights) - def get_weight_minimum(self, connector, synapse_info): - """ Get the minimum non-zero weight for the synapses, or 0 if all\ - synapses are zero - - :param AbstractConnector connector: The connector in use - :param SynapseInformation synapse_info: - """ - return connector.get_weight_minimum(synapse_info) - def convert_per_connection_data_to_rows( self, connection_row_indices, n_rows, data): """ Converts per-connection data generated from connections into\ diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 2582dcfe0e..49eb23ef4f 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -464,9 +464,9 @@ def get_weight_variance(self, connector, weights): # has to be given as no variance return 0.0 - @overrides(AbstractPlasticSynapseDynamics.get_weight_minimum) - def get_weight_minimum(self, connector, synapse_info): - # Use the minimum weight change as the minimum non-zero weight + def get_weight_min_delta(self): + """ Get the minimum non-zero weight change + """ return self.__weight_dependence.weight_change_minimum( self.__timing_dependence.minimum_delta) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 30fb746e45..11d6ed01bd 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -35,6 +35,7 @@ MasterPopTableAsBinarySearch) from spynnaker.pyNN.exceptions import SynapticConfigurationException import sys +from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_stdp import SynapseDynamicsSTDP TIME_STAMP_BYTES = BYTES_PER_WORD @@ -743,21 +744,29 @@ def __get_closest_weight(self, value): def _calculate_min_weights( self, application_vertex, application_graph, weight_scale): min_weights = [sys.maxsize for _ in range(self.__n_synapse_types)] + stdp_min_deltas = [sys.maxsize for _ in range(self.__n_synapse_types)] for app_edge in application_graph.get_edges_ending_at_vertex( application_vertex): if isinstance(app_edge, ProjectionApplicationEdge): for synapse_info in app_edge.synapse_information: synapse_type = synapse_info.synapse_type - synapse_dynamics = synapse_info.synapse_dynamics - connector = synapse_info.connector - weight_min = (synapse_dynamics.get_weight_minimum( - connector, synapse_info) * - weight_scale) + connector = synapse_info.connector + weight_min = connector.get_weight_minimum(synapse_info) + weight_min *= weight_scale if weight_min != 0: min_weights[synapse_type] = min( min_weights[synapse_type], weight_min) + synapse_dynamics = synapse_info.synapse_dynamics + if isinstance(synapse_dynamics, SynapseDynamicsSTDP): + min_delta = synapse_dynamics.get_weight_min_delta() + stdp_min_deltas[synapse_type] = min( + stdp_min_deltas[synapse_type], min_delta) + + # Try to allow STDP weights to get as small as they want to, but try + # to keep a reasonable upper range too + # Convert values to their closest representable value to ensure # that division works for the minimum value min_weights = [self.__get_closest_weight(m) From 70fe2854c1eb8e03317945a0e8303b3798bb60aa Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Mon, 11 May 2020 10:07:44 +0100 Subject: [PATCH 046/198] Fix things that have been fixed in main branch --- .../implementations/neuron_impl_standard.h | 18 +++++++++++------- .../implementations/neuron_impl_standard.py | 1 + 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index c225005566..583d61a8a6 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -226,16 +226,15 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, synapse_param_pointer_t synapse_type = &neuron_synapse_shaping_params[neuron_index]; - // Get the voltage - state_t voltage = neuron_model_get_membrane_voltage(neuron); - recorded_variable_values[V_RECORDING_INDEX] = voltage; - // Store whether the neuron has spiked bool spike = false; // Loop however many times requested for (uint32_t i = n_steps_per_timestep; i > 0; i--) { + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + // Get the exc and inh values from the synapses input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); input_t* inh_value = synapse_types_get_inhibitory_input(synapse_type); @@ -258,11 +257,13 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, } // Call functions to get the input values to be recorded - recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; - recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + if (i == n_steps_per_timestep) { + recorded_variable_values[V_RECORDING_INDEX] = voltage; + recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; + recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + } // Call functions to convert exc_input and inh_input to current - voltage = neuron_model_get_membrane_voltage(neuron); input_type_convert_excitatory_input_to_current( exc_input_values, input_type, voltage); input_type_convert_inhibitory_input_to_current( @@ -315,6 +316,9 @@ static void neuron_impl_store_neuron_parameters( // return; //} + // Skip over the steps per timestep + next += 1; + if (sizeof(global_neuron_params_t)) { log_debug("writing neuron global parameters"); spin1_memcpy(&address[next], global_parameters, diff --git a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py index 8efd730ef4..5315cd0b6b 100644 --- a/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py +++ b/spynnaker/pyNN/models/neuron/implementations/neuron_impl_standard.py @@ -176,6 +176,7 @@ def get_data(self, parameters, state_variables, vertex_slice): @overrides(AbstractNeuronImpl.read_data) def read_data( self, data, offset, vertex_slice, parameters, state_variables): + offset += _BASE_SIZE for component in self.__components: offset = component.read_data( data, offset, vertex_slice, parameters, state_variables) From bf6b7b5f0ea35568053b02a86561f87eb5b9d808 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Mon, 11 May 2020 10:10:10 +0100 Subject: [PATCH 047/198] Another fix --- .../pyNN/models/neuron/abstract_pynn_neuron_model_standard.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py index f238703522..50698422f7 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py @@ -17,7 +17,8 @@ from spynnaker.pyNN.models.neuron.implementations import NeuronImplStandard from spinn_utilities.overrides import overrides -_population_parameters = AbstractPyNNNeuronModel.default_population_parameters +_population_parameters = dict( + AbstractPyNNNeuronModel.default_population_parameters) _population_parameters["n_steps_per_timestep"] = 1 class AbstractPyNNNeuronModelStandard(AbstractPyNNNeuronModel): From c85667db9f3f08871401cb185900677f1e103c85 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Tue, 12 May 2020 16:25:29 +0100 Subject: [PATCH 048/198] Add new config options --- spynnaker/pyNN/spynnaker.cfg | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index e805bf16b9..aaa8b16d3c 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -21,9 +21,18 @@ incoming_spike_buffer_size = 256 # Limit the amount of DTCM used by one-to-one connections one_to_one_connection_dtcm_max_bytes = 2048 -# Auto-compute the minimum weights +# Auto-compute or specify the minimum weights min_weights = None +# Expected maximum time in ms between spikes for STDP. This is used in the +# minimum weight calculation. It is ignored if the minimum weights are +# specified. +max_stdp_spike_delta = 10 + +# Number of standard deviations from the mean to account for in the calculation +# of the minimum weight when a random weight is specified +weight_random_sigma = 2 + [Mapping] # Algorithms below # pacman algorithms are: From c6eb6306fb7e3ba41fed8ddd3bcbfc7ea35fb116 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Tue, 12 May 2020 16:25:39 +0100 Subject: [PATCH 049/198] Flake8 fix --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 11d6ed01bd..20e3cd8815 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -34,8 +34,8 @@ from spynnaker.pyNN.models.neuron.master_pop_table import ( MasterPopTableAsBinarySearch) from spynnaker.pyNN.exceptions import SynapticConfigurationException +from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsSTDP import sys -from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_stdp import SynapseDynamicsSTDP TIME_STAMP_BYTES = BYTES_PER_WORD From f1234c740a39f028aec1e474d9699e7ff74a332d Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Tue, 12 May 2020 16:26:01 +0100 Subject: [PATCH 050/198] Different way of doing minimum weights --- .../connectors/abstract_connector.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 62097d9cc7..0d31fcd3f8 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -267,7 +267,7 @@ def get_weight_mean(self, weights): return numpy.mean(weights) raise Exception("Unrecognised weight format") - def _get_weight_minimum(self, weights, n_connections): + def _get_weight_minimum(self, weights, n_connections, min_weight_sigma): """ Get the minimum of the weights. :param weights: @@ -278,16 +278,14 @@ def _get_weight_minimum(self, weights, n_connections): """ if isinstance(weights, RandomDistribution): mean_weight = utility_calls.get_mean(weights) + weight_var = utility_calls.get_variance(weights) + min_weight = mean_weight - weight_var if mean_weight < 0: - max_weight = utility_calls.get_maximum_probable_value( - weights, n_connections) high = utility_calls.high(weights) if high is None: - return abs(max_weight) - return abs(max(max_weight, high)) + return abs(min_weight) + return abs(max(min_weight, high)) else: - min_weight = utility_calls.get_minimum_probable_value( - weights, n_connections) low = utility_calls.low(weights) if low is None: return abs(min_weight) @@ -300,7 +298,7 @@ def _get_weight_minimum(self, weights, n_connections): raise Exception("Unrecognised weight format") @abstractmethod - def get_weight_minimum(self, synapse_info): + def get_weight_minimum(self, synapse_info, weight_random_sigma): """ Get the minimum of the weights. :param weights: From 4a938985621dc86cdac776b75b1f240313c4fe08 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Tue, 19 May 2020 16:11:43 +0100 Subject: [PATCH 051/198] Support better minimum weight calculation and provenance gathering --- .../external_device_lif_control.py | 6 +- .../external_device_lif_control_vertex.py | 8 +- .../connectors/abstract_connector.py | 19 ++-- .../connectors/all_to_all_connector.py | 6 -- .../connectors/array_connector.py | 5 -- .../connectors/csa_connector.py | 5 -- ...istance_dependent_probability_connector.py | 10 --- .../connectors/fixed_number_post_connector.py | 5 -- .../connectors/fixed_number_pre_connector.py | 6 -- .../connectors/fixed_probability_connector.py | 9 -- .../connectors/from_list_connector.py | 8 +- .../index_based_probability_connector.py | 9 -- .../connectors/kernel_connector.py | 13 ++- .../connectors/multapse_connector.py | 5 -- .../connectors/one_to_one_connector.py | 6 -- .../connectors/small_world_connector.py | 6 -- .../neuron/abstract_population_vertex.py | 13 ++- .../neuron/abstract_pynn_neuron_model.py | 9 +- .../abstract_pynn_neuron_model_standard.py | 6 +- .../stdp/common/plasticity_helpers.py | 35 ++++++-- .../abstract_timing_dependence.py | 7 +- ...timing_dependence_pfister_spike_triplet.py | 9 +- .../timing_dependence_recurrent.py | 3 +- .../timing_dependence_spike_nearest_pair.py | 9 +- .../timing_dependence_spike_pair.py | 9 +- .../timing_dependence_vogels_2011.py | 6 +- .../synapse_dynamics/synapse_dynamics_stdp.py | 7 +- .../pyNN/models/neuron/synaptic_manager.py | 86 ++++++++++++++++--- 28 files changed, 177 insertions(+), 148 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index ef656ef089..519da3a09c 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -99,7 +99,8 @@ def __init__( @overrides(AbstractPyNNNeuronModelStandard.create_vertex) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, incoming_spike_buffer_size, + ring_buffer_sigma, min_weights, weight_random_sigma, + max_stdp_spike_delta, incoming_spike_buffer_size, n_steps_per_timestep): if n_neurons != len(self._devices): raise ConfigurationException( @@ -110,4 +111,5 @@ def create_vertex( return ExternalDeviceLifControlVertex( self._devices, self._create_edges, max_atoms, self._model, self, self._translator, spikes_per_second, label, ring_buffer_sigma, - min_weights, incoming_spike_buffer_size, constraints) + min_weights, weight_random_sigma, max_stdp_spike_delta, + incoming_spike_buffer_size, constraints) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index 47f74a3f47..95ef4a3450 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -51,8 +51,9 @@ class ExternalDeviceLifControlVertex( def __init__( self, devices, create_edges, max_atoms_per_core, neuron_impl, pynn_model, translator=None, spikes_per_second=None, label=None, - ring_buffer_sigma=None, min_weights=None, - incoming_spike_buffer_size=None, constraints=None): + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, + constraints=None): """ :param list(AbstractMulticastControllableDevice) devices: The AbstractMulticastControllableDevice instances to be controlled @@ -70,6 +71,9 @@ def __init__( :param float spikes_per_second: :param str label: :param float ring_buffer_sigma: + :param list min_weights: + :param float weight_random_sigma: + :param float max_stdp_spike_delta: :param int incoming_spike_buffer_size: :param list(~pacman.model.constraints.AbstractConstraint) constraints: """ diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 0d31fcd3f8..a814a5cf72 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -267,19 +267,19 @@ def get_weight_mean(self, weights): return numpy.mean(weights) raise Exception("Unrecognised weight format") - def _get_weight_minimum(self, weights, n_connections, min_weight_sigma): + def get_weight_minimum(self, weights, weight_random_sigma): """ Get the minimum of the weights. - :param weights: :type weights: ~numpy.ndarray or ~pyNN.random.NumpyRNG or int or float or list(int) or list(float) - :param int n_connections: + :param int weight_random_sigma: The number of standard deviations from + the mean to allow for when using a random distribution :rtype: float """ if isinstance(weights, RandomDistribution): mean_weight = utility_calls.get_mean(weights) - weight_var = utility_calls.get_variance(weights) - min_weight = mean_weight - weight_var + weight_sd = math.sqrt(utility_calls.get_variance(weights)) + min_weight = mean_weight - (weight_sd * weight_random_sigma) if mean_weight < 0: high = utility_calls.high(weights) if high is None: @@ -297,15 +297,6 @@ def _get_weight_minimum(self, weights, n_connections, min_weight_sigma): return numpy.amax(numpy.abs(weights)) raise Exception("Unrecognised weight format") - @abstractmethod - def get_weight_minimum(self, synapse_info, weight_random_sigma): - """ Get the minimum of the weights. - - :param weights: - :type weights: ~numpy.ndarray or ~pyNN.random.NumpyRNG or int or float - or list(int) or list(float) - """ - def _get_weight_maximum(self, weights, n_connections): """ Get the maximum of the weights. diff --git a/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py index a1523e1ed1..7052dc81a7 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/all_to_all_connector.py @@ -97,12 +97,6 @@ def get_weight_maximum(self, synapse_info): n_conns = synapse_info.n_pre_neurons * synapse_info.n_post_neurons return self._get_weight_maximum(synapse_info.weights, n_conns) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - # pylint: disable=too-many-arguments - n_conns = synapse_info.n_pre_neurons * synapse_info.n_post_neurons - return self._get_weight_minimum(synapse_info.weights, n_conns) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py index 90285e24f7..1242cbd619 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/array_connector.py @@ -83,11 +83,6 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__n_total_connections) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - return self._get_weight_minimum( - synapse_info.weights, self.__n_total_connections) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py index 93c70305ba..fcad949e21 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/csa_connector.py @@ -110,11 +110,6 @@ def get_weight_maximum(self, synapse_info): n_conns_max = synapse_info.n_pre_neurons * synapse_info.n_post_neurons return self._get_weight_maximum(synapse_info.weights, n_conns_max) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - n_conns_max = synapse_info.n_pre_neurons * synapse_info.n_post_neurons - return self._get_weight_minimum(synapse_info.weights, n_conns_max) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py index 3bbe07d97f..84f522378b 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/distance_dependent_probability_connector.py @@ -156,16 +156,6 @@ def get_weight_maximum(self, synapse_info): synapse_info.n_pre_neurons * synapse_info.n_post_neurons, numpy.amax(self.__probs))) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - # pylint: disable=too-many-arguments - return self._get_weight_minimum( - synapse_info.weights, - utility_calls.get_probable_maximum_selected( - synapse_info.n_pre_neurons * synapse_info.n_post_neurons, - synapse_info.n_pre_neurons * synapse_info.n_post_neurons, - numpy.amax(self.__probs))) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, post_slice_index, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py index ac2fb98a3c..6134c92aaa 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_post_connector.py @@ -197,11 +197,6 @@ def get_weight_maximum(self, synapse_info): n_connections = synapse_info.n_pre_neurons * self.__n_post return self._get_weight_maximum(synapse_info.weights, n_connections) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - n_connections = synapse_info.n_pre_neurons * self.__n_post - return self._get_weight_minimum(synapse_info.weights, n_connections) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py index e8e1c18f77..dcb516130d 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py @@ -202,12 +202,6 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__n_pre * synapse_info.n_post_neurons) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - # pylint: disable=too-many-arguments - return self._get_weight_minimum( - synapse_info.weights, self.__n_pre * synapse_info.n_post_neurons) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py index 271e27f75b..c062b55b30 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/fixed_probability_connector.py @@ -107,15 +107,6 @@ def get_weight_maximum(self, synapse_info): self._p_connect) return self._get_weight_maximum(synapse_info.weights, n_connections) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - # pylint: disable=too-many-arguments - n_connections = utility_calls.get_probable_maximum_selected( - synapse_info.n_pre_neurons * synapse_info.n_post_neurons, - synapse_info.n_pre_neurons * synapse_info.n_post_neurons, - self._p_connect) - return self._get_weight_minimum(synapse_info.weights, n_connections) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index a0a6be8ec0..efac3f2688 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -212,11 +212,11 @@ def get_weight_variance(self, weights): return numpy.var(numpy.abs(self.__weights)) @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): + def get_weight_minimum(self, weights, weight_random_sigma): if self.__weights is None: - return numpy.amin(synapse_info.weights) - else: - return numpy.amin(numpy.abs(self.__weights)) + return super(FromListConnector, self).get_weight_minimum( + weights, weight_random_sigma) + return numpy.amin(numpy.abs(self.__weights)) @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( diff --git a/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py index 2dceb23370..e09a77c6b7 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/index_based_probability_connector.py @@ -115,15 +115,6 @@ def get_weight_maximum(self, synapse_info): numpy.amax(self.__probs)) return self._get_weight_maximum(synapse_info.weights, n_connections) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - self._update_probs_from_index_expression(synapse_info) - n_connections = utility_calls.get_probable_maximum_selected( - synapse_info.n_pre_neurons * synapse_info.n_post_neurons, - synapse_info.n_pre_neurons * synapse_info.n_post_neurons, - numpy.amax(self.__probs)) - return self._get_weight_minimum(synapse_info.weights, n_connections) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py index 1bbae65f60..fdd98b4cd0 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py @@ -287,15 +287,14 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum(synapse_info.weights, n_conns) @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - # I think this is overestimated, but not by much - n_conns = ( - self._pre_w * self._pre_h * self._kernel_w * self._kernel_h) - # Use the kernel delays if user has supplied them + def get_weight_minimum(self, weights, weight_random_sigma): + # Use the kernel weights if user has supplied them if self._krn_weights is not None: - return self._get_weight_minimum(self._krn_weights, n_conns) + return self.get_weight_minimum( + self._krn_weights, weight_random_sigma) - return self._get_weight_minimum(synapse_info.weights, n_conns) + return super(KernelConnector, self).get_weight_minimum( + weights, weight_random_sigma) def __repr__(self): return "KernelConnector(shape_kernel[{},{}])".format( diff --git a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py index 6a1fb8d3b2..0a3e41742d 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py @@ -153,11 +153,6 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__num_synapses) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - return self._get_weight_minimum( - synapse_info.weights, self.__num_synapses) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py index 4bab41acf6..cbbdee0adc 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/one_to_one_connector.py @@ -81,12 +81,6 @@ def get_weight_maximum(self, synapse_info): synapse_info.weights, max((synapse_info.n_pre_neurons, synapse_info.n_post_neurons))) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - return self._get_weight_minimum( - synapse_info.weights, - max((synapse_info.n_pre_neurons, synapse_info.n_post_neurons))) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py index 64ca203708..4141f3ce91 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/small_world_connector.py @@ -104,12 +104,6 @@ def get_weight_maximum(self, synapse_info): return self._get_weight_maximum( synapse_info.weights, self.__n_connections) - @overrides(AbstractConnector.get_weight_minimum) - def get_weight_minimum(self, synapse_info): - # pylint: disable=too-many-arguments - return self._get_weight_minimum( - synapse_info.weights, self.__n_connections) - @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( self, pre_slices, pre_slice_index, post_slices, diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index da00a60475..d774a75761 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -38,6 +38,8 @@ from spinn_front_end_common.interface.simulation import simulation_utilities from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS from spinn_front_end_common.interface.profiling import profile_utils +from spinn_front_end_common.interface.provenance import ( + AbstractProvidesLocalProvenanceData) from spynnaker.pyNN.models.common import ( AbstractSpikeRecordable, AbstractNeuronRecordable, NeuronRecorder) from spynnaker.pyNN.utilities import constants @@ -78,7 +80,7 @@ class AbstractPopulationVertex( AbstractChangableAfterRun, AbstractRewritesDataSpecification, AbstractReadParametersBeforeSet, AbstractAcceptsIncomingSynapses, ProvidesKeyToAtomMappingImpl, - AbstractCanReset): + AbstractCanReset, AbstractProvidesLocalProvenanceData): """ Underlying vertex model for Neural Populations. """ __slots__ = [ @@ -117,6 +119,7 @@ class AbstractPopulationVertex( def __init__( self, n_neurons, label, constraints, max_atoms_per_core, spikes_per_second, ring_buffer_sigma, min_weights, + weight_random_sigma, max_stdp_spike_delta, incoming_spike_buffer_size, neuron_impl, pynn_model): # pylint: disable=too-many-arguments, too-many-locals super(AbstractPopulationVertex, self).__init__( @@ -157,7 +160,8 @@ def __init__( # Set up synapse handling self.__synapse_manager = SynapticManager( self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma, - spikes_per_second, min_weights, config) + spikes_per_second, min_weights, weight_random_sigma, + max_stdp_spike_delta, config) # bool for if state has changed. self.__change_requires_mapping = True @@ -856,3 +860,8 @@ def reset_to_first_timestep(self): if self.__synapse_manager.changes_during_run: self.__change_requires_data_generation = True self.__change_requires_neuron_parameters_reload = False + + @overrides(AbstractProvidesLocalProvenanceData.get_local_provenance_data) + def get_local_provenance_data(self): + return self.__synapse_manager.get_weight_provenance( + list(self.__neuron_impl.get_synapse_targets())) diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py index 48695fb53f..dcd9891fe9 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py @@ -21,7 +21,8 @@ _population_parameters = { "spikes_per_second": None, "ring_buffer_sigma": None, - "min_weights": None, "incoming_spike_buffer_size": None + "min_weights": None, "weight_random_sigma": None, + "max_stdp_spike_delta": None, "incoming_spike_buffer_size": None } @@ -52,10 +53,12 @@ def get_max_atoms_per_core(cls): additional_arguments=_population_parameters.keys()) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, incoming_spike_buffer_size): + ring_buffer_sigma, min_weights, weight_random_sigma, + max_stdp_spike_delta, incoming_spike_buffer_size): # pylint: disable=arguments-differ max_atoms = self.get_max_atoms_per_core() return AbstractPopulationVertex( n_neurons, label, constraints, max_atoms, spikes_per_second, - ring_buffer_sigma, min_weights, incoming_spike_buffer_size, + ring_buffer_sigma, min_weights, weight_random_sigma, + max_stdp_spike_delta, incoming_spike_buffer_size, self.__model, self) diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py index a9f3a9def2..7b94086d2f 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py @@ -55,10 +55,12 @@ def __init__( additional_arguments={"n_steps_per_timestep"}) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, incoming_spike_buffer_size, + ring_buffer_sigma, min_weights, weight_random_sigma, + max_stdp_spike_delta, incoming_spike_buffer_size, n_steps_per_timestep): # pylint: disable=arguments-differ self._model.n_steps_per_timestep = n_steps_per_timestep return super(AbstractPyNNNeuronModelStandard, self).create_vertex( n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, incoming_spike_buffer_size) + ring_buffer_sigma, min_weights, weight_random_sigma, + max_stdp_spike_delta, incoming_spike_buffer_size) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py index 9c477036ac..589209c814 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/common/plasticity_helpers.py @@ -45,16 +45,39 @@ def get_exp_lut_array(time_step, time_constant, shift=0): return numpy.concatenate((header, a.astype("uint16"))).view("uint32") -def get_min_lut_value(exp_lut_array): +def _get_last_non_zero_value(values): + """ Get the last non-zero value (rescaled) from a LUT array + """ + # Either the ultimate or penultimate value must be non-zero as generated + # from the above function + if values[-1] != 0: + return values[-1] / STDP_FIXED_POINT_ONE + return values[-2] / STDP_FIXED_POINT_ONE + + +def get_min_lut_value( + exp_lut_array, time_step=None, max_stdp_spike_delta=None): """ Get the smallest non-zero value of an exponential lookup array,\ - or zero if no such value + or None if no such value :param numpy.ndarray exp_lut_array: The lookup array + :param float time_step: The time step in milliseconds + :param float max_stdp_spike_delta: The maximum expected difference between + spike times in milliseconds :rtype: float """ if not len(exp_lut_array): - return 0 + return None values = exp_lut_array.view("uint16") - if values[-1] != 0: - return values[-1] / STDP_FIXED_POINT_ONE - return values[-2] / STDP_FIXED_POINT_ONE + + # If there isn't a time step and a limit + if time_step is None or max_stdp_spike_delta is None: + return _get_last_non_zero_value(values) + + # If there is a time step and limit, use it to work out which value + pos = int(math.ceil(max_stdp_spike_delta / time_step)) + 1 + if pos >= len(values): + return _get_last_non_zero_value(values) + + # Make sure we haven't just picked the last value which happens to be 0 + return _get_last_non_zero_value(values[:pos]) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py index 5c27729e93..35d8e9b1d7 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/abstract_timing_dependence.py @@ -84,11 +84,14 @@ def get_parameter_names(self): :rtype: iterable(str) """ - @abstractproperty - def minimum_delta(self): + @abstractmethod + def minimum_delta(self, max_stdp_spike_delta): """ The smallest non-zero changes that will be passed to the weight\ rule + :param float max_stdp_spike_delta: The maximum expected time difference + between two spikes in milliseconds + :return: An array of minimum change values, one for potentiation,\ one for depression. If this requires a 2-parameter weight rule, each of the values of the arrays must then be an array of arrays diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py index c446699609..7892db5df1 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py @@ -118,18 +118,19 @@ def synaptic_structure(self): def get_parameter_names(self): return ['tau_plus', 'tau_minus', 'tau_x', 'tau_y'] - @property @overrides(AbstractTimingDependence.minimum_delta) - def minimum_delta(self): + def minimum_delta(self, max_stdp_spike_delta): + ts = get_simulator().machine_time_step / 1000.0 + # The minimums for potentiation min_decayed_r1 = get_min_lut_value(self.__tau_plus_data) min_decayed_r1_o2 = min_decayed_r1 * get_min_lut_value( - self.__tau_y_data) + self.__tau_y_data, ts, max_stdp_spike_delta) # The minimums for depression min_decayed_o1 = get_min_lut_value(self.__tau_minus_data) min_decayed_o1_r2 = min_decayed_o1 * get_min_lut_value( - self.__tau_x_data) + self.__tau_x_data, ts, max_stdp_spike_delta) return [[min_decayed_r1, min_decayed_r1_o2], [min_decayed_o1, min_decayed_o1_r2]] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py index dd06057361..cd13e3aac7 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py @@ -137,8 +137,7 @@ def get_parameter_names(self): return ['accumulator_depression', 'accumulator_potentiation', 'mean_pre_window', 'mean_post_window', 'dual_fsm'] - @property @overrides(AbstractTimingDependence.minimum_delta) - def minimum_delta(self): + def minimum_delta(self, max_stdp_spike_delta): # This rule always has a delta of 1 return [1.0, 1.0] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py index fa2ae038c8..4024337142 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py @@ -98,8 +98,9 @@ def synaptic_structure(self): def get_parameter_names(self): return ['tau_plus', 'tau_minus'] - @property @overrides(AbstractTimingDependence.minimum_delta) - def minimum_delta(self): - return [get_min_lut_value(self.__tau_plus_data), - get_min_lut_value(self.__tau_minus_data)] + def minimum_delta(self, max_stdp_spike_delta): + ts = get_simulator().machine_time_step / 1000.0 + return [ + get_min_lut_value(self.__tau_plus_data, ts, max_stdp_spike_delta), + get_min_lut_value(self.__tau_minus_data, ts, max_stdp_spike_delta)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py index 55d9369688..5dde977bd6 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py @@ -96,8 +96,9 @@ def synaptic_structure(self): def get_parameter_names(self): return ['tau_plus', 'tau_minus'] - @property @overrides(AbstractTimingDependence.minimum_delta) - def minimum_delta(self): - return [get_min_lut_value(self.__tau_plus_data), - get_min_lut_value(self.__tau_minus_data)] + def minimum_delta(self, max_stdp_spike_delta): + ts = get_simulator().machine_time_step / 1000.0 + return [ + get_min_lut_value(self.__tau_plus_data, ts, max_stdp_spike_delta), + get_min_lut_value(self.__tau_minus_data, ts, max_stdp_spike_delta)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py index cf330618b8..c0ec71d605 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py @@ -101,8 +101,8 @@ def synaptic_structure(self): def get_parameter_names(self): return ['alpha', 'tau'] - @property @overrides(AbstractTimingDependence.minimum_delta) - def minimum_delta(self): - min_tau = get_min_lut_value(self.__tau_data) + def minimum_delta(self, max_stdp_spike_delta): + ts = get_simulator().machine_time_step / 1000.0 + min_tau = get_min_lut_value(self.__tau_data, ts, max_stdp_spike_delta) return [min_tau - self.__alpha, min_tau] diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 49eb23ef4f..0771349750 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -464,11 +464,14 @@ def get_weight_variance(self, connector, weights): # has to be given as no variance return 0.0 - def get_weight_min_delta(self): + def get_weight_min_delta(self, max_stdp_spike_delta): """ Get the minimum non-zero weight change + + :param float max_stdp_spike_delta: The maximum expected time between + spikes in milliseconds """ return self.__weight_dependence.weight_change_minimum( - self.__timing_dependence.minimum_delta) + self.__timing_dependence.minimum_delta(max_stdp_spike_delta)) @overrides(AbstractPlasticSynapseDynamics.get_weight_maximum) def get_weight_maximum(self, connector, synapse_info): diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 20e3cd8815..9212db5ef3 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -16,6 +16,7 @@ import math import struct import numpy +import sys from data_specification.enums import DataType from spinn_front_end_common.utilities.helpful_functions import ( locate_memory_region_for_placement, read_config) @@ -35,7 +36,7 @@ MasterPopTableAsBinarySearch) from spynnaker.pyNN.exceptions import SynapticConfigurationException from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsSTDP -import sys +from spinn_front_end_common.utilities.utility_objs.provenance_data_item import ProvenanceDataItem TIME_STAMP_BYTES = BYTES_PER_WORD @@ -76,17 +77,23 @@ class SynapticManager(object): "__synapse_dynamics", "__synapse_io", "__weight_scales", + "__weight_random_sigma", + "__max_stdp_spike_delta", "__gen_on_machine", "__max_row_info", - "__synapse_indices"] + "__synapse_indices", + "__weight_provenance"] def __init__( self, n_synapse_types, ring_buffer_sigma, spikes_per_second, - min_weights, config, population_table_type=None, synapse_io=None): + min_weights, weight_random_sigma, max_stdp_spike_delta, config, + population_table_type=None, synapse_io=None): self.__n_synapse_types = n_synapse_types self.__ring_buffer_sigma = ring_buffer_sigma self.__spikes_per_second = spikes_per_second self.__min_weights = min_weights + self.__weight_random_sigma = weight_random_sigma + self.__max_stdp_spike_delta = max_stdp_spike_delta # Get the type of population table self.__poptable_type = population_table_type @@ -123,6 +130,14 @@ def __init__( " match the number of synapses ({})".format( self.__min_weights, self.__n_synapse_types)) + # Read the other minimum weight configuration parameters + if self.__weight_random_sigma is None: + self.__weight_random_sigma = config.getfloat( + "Simulation", "weight_random_sigma") + if self.__max_stdp_spike_delta is None: + self.__max_stdp_spike_delta = config.getfloat( + "Simulation", "max_stdp_spike_delta") + # Prepare for dealing with STDP - there can only be one (non-static) # synapse dynamics per vertex at present self.__synapse_dynamics = None @@ -150,6 +165,10 @@ def __init__( # A map of synapse information for each machine pre vertex to index self.__synapse_indices = dict() + # Store weight provenance information mapping from + # (real weight, represented weight) -> list of edges + self.__weight_provenance = defaultdict(list) + @property def synapse_dynamics(self): return self.__synapse_dynamics @@ -744,7 +763,6 @@ def __get_closest_weight(self, value): def _calculate_min_weights( self, application_vertex, application_graph, weight_scale): min_weights = [sys.maxsize for _ in range(self.__n_synapse_types)] - stdp_min_deltas = [sys.maxsize for _ in range(self.__n_synapse_types)] for app_edge in application_graph.get_edges_ending_at_vertex( application_vertex): if isinstance(app_edge, ProjectionApplicationEdge): @@ -752,7 +770,8 @@ def _calculate_min_weights( synapse_type = synapse_info.synapse_type connector = synapse_info.connector - weight_min = connector.get_weight_minimum(synapse_info) + weight_min = connector.get_weight_minimum( + synapse_info.weights, self.__weight_random_sigma) weight_min *= weight_scale if weight_min != 0: min_weights[synapse_type] = min( @@ -760,12 +779,12 @@ def _calculate_min_weights( synapse_dynamics = synapse_info.synapse_dynamics if isinstance(synapse_dynamics, SynapseDynamicsSTDP): - min_delta = synapse_dynamics.get_weight_min_delta() - stdp_min_deltas[synapse_type] = min( - stdp_min_deltas[synapse_type], min_delta) - - # Try to allow STDP weights to get as small as they want to, but try - # to keep a reasonable upper range too + min_delta = synapse_dynamics.get_weight_min_delta( + self.__max_stdp_spike_delta) + min_delta *= weight_scale + if min_delta is not None and min_delta != 0: + min_weights[synapse_type] = min( + min_weights[synapse_type], min_delta) # Convert values to their closest representable value to ensure # that division works for the minimum value @@ -775,8 +794,29 @@ def _calculate_min_weights( # The minimum weight shouldn't be 0 unless set above (and then it # doesn't matter that we use the min as there are no weights); so # set the weight to the smallest representable value if 0 - return [m if m > 0 else DataType.S1615.decode_from_int(1) - for m in min_weights] + min_weights = [m if m > 0 else DataType.S1615.decode_from_int(1) + for m in min_weights] + + self.__check_weights( + min_weights, application_graph, application_vertex) + return min_weights + + def __check_weights(self, min_weights, app_graph, app_vertex): + """ Warn the user about weights that can't be represented properly + where possible + """ + for app_edge in app_graph.get_edges_ending_at_vertex(app_vertex): + if isinstance(app_edge, ProjectionApplicationEdge): + for synapse_info in app_edge.synapse_information: + weight = synapse_info.weights + if numpy.isscalar(weight): + synapse_type = synapse_info.synapse_type + r_weight = weight / min_weights[synapse_type] + r_weight = DataType.UINT16.closest_representable_value( + r_weight) * min_weights[synapse_type] + if weight != r_weight: + self.__weight_provenance[weight, r_weight].append( + (app_edge, synapse_info)) def _get_min_weights( self, application_vertex, application_graph, weight_scale): @@ -1093,3 +1133,23 @@ def changes_during_run(self): if self.__synapse_dynamics is None: return False return self.__synapse_dynamics.changes_during_run + + def get_weight_provenance(self, synapse_names): + prov_items = list() + # Record the min weight used for each synapse type + for i, weight in enumerate(self.__min_weights): + prov_items.append(ProvenanceDataItem( + [self._label, "min_weight_{}".format(synapse_names[i])], + weight)) + + # Report any known weights that couldn't be represented + for (weight, r_weight) in self.__weight_provenance: + (app_edge, s_info) = self.__weight_provenance[weight, r_weight] + prov_items.append(ProvenanceDataItem( + [self._label, app_edge.label, + s_info.connector.__class__.__name__, + "weight_representation"], r_weight, + report=True, + message="Weight of {} could not be represented precisely;" + " a weight of {} was used instead".format( + weight, r_weight))) From 3df29fd7df2271afe6800677602fab20d92aebd2 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Tue, 19 May 2020 16:23:51 +0100 Subject: [PATCH 052/198] Fix provenance --- .../neuron/abstract_population_vertex.py | 2 +- .../pyNN/models/neuron/synaptic_manager.py | 29 ++++++++++++------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index d774a75761..aa52de9653 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -864,4 +864,4 @@ def reset_to_first_timestep(self): @overrides(AbstractProvidesLocalProvenanceData.get_local_provenance_data) def get_local_provenance_data(self): return self.__synapse_manager.get_weight_provenance( - list(self.__neuron_impl.get_synapse_targets())) + self._label, list(self.__neuron_impl.get_synapse_targets())) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 9212db5ef3..7eb2c4cf90 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -1134,22 +1134,29 @@ def changes_during_run(self): return False return self.__synapse_dynamics.changes_during_run - def get_weight_provenance(self, synapse_names): + def get_weight_provenance(self, label, synapse_names): + """ Get provenance data for weights + + :param str label: The label of the vertex + :param list synapse_names: List of the names of the synapses + :return: A list of provenance items + """ prov_items = list() # Record the min weight used for each synapse type for i, weight in enumerate(self.__min_weights): prov_items.append(ProvenanceDataItem( - [self._label, "min_weight_{}".format(synapse_names[i])], + [label, "min_weight_{}".format(synapse_names[i])], weight)) # Report any known weights that couldn't be represented for (weight, r_weight) in self.__weight_provenance: - (app_edge, s_info) = self.__weight_provenance[weight, r_weight] - prov_items.append(ProvenanceDataItem( - [self._label, app_edge.label, - s_info.connector.__class__.__name__, - "weight_representation"], r_weight, - report=True, - message="Weight of {} could not be represented precisely;" - " a weight of {} was used instead".format( - weight, r_weight))) + edge_info = self.__weight_provenance[weight, r_weight] + for i, (app_edge, s_info) in enumerate(edge_info): + prov_items.append(ProvenanceDataItem( + [label, app_edge.label, + s_info.connector.__class__.__name__, + "weight_representation"], r_weight, + report=(i == 0), + message="Weight of {} could not be represented precisely;" + " a weight of {} was used instead".format( + weight, r_weight))) From b944223e2b76afacfca575e14067680632141b13 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Tue, 19 May 2020 16:27:40 +0100 Subject: [PATCH 053/198] Return the provenance --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 7eb2c4cf90..fe98b65eb4 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -1160,3 +1160,5 @@ def get_weight_provenance(self, label, synapse_names): message="Weight of {} could not be represented precisely;" " a weight of {} was used instead".format( weight, r_weight))) + + return prov_items From 1284951d629f8457346a8b1a65166f90b1b870aa Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Wed, 20 May 2020 09:36:44 +0100 Subject: [PATCH 054/198] Fix unit tests --- unittests/mocks.py | 4 ++- .../neuron/test_synaptic_manager.py | 31 ++++++++++++------- unittests/test_populations/test_vertex.py | 5 +-- 3 files changed, 25 insertions(+), 15 deletions(-) diff --git a/unittests/mocks.py b/unittests/mocks.py index b452d15bcb..a9d577db86 100644 --- a/unittests/mocks.py +++ b/unittests/mocks.py @@ -93,7 +93,9 @@ def __init__(self): "incoming_spike_buffer_size": "256", "ring_buffer_sigma": "5", "one_to_one_connection_dtcm_max_bytes": "0", - "min_weights": "None"} + "min_weights": "None", + "weight_random_sigma": 2, + "max_stdp_spike_delta": 10} self.config["Buffers"] = {"time_between_requests": "10", "minimum_buffer_sdram": "10", "use_auto_pause_and_resume": "True", diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index d9d21d151d..87a7d70147 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -119,7 +119,8 @@ def test_retrieve_synaptic_block(self): synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, - min_weights=None, config=config, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config, population_table_type=MockMasterPopulationTable( {key: [(1, 0, False)]}), synapse_io=MockSynapseIO()) @@ -173,7 +174,8 @@ def test_retrieve_direct_block(self): synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, - min_weights=None, config=config, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config, population_table_type=MockMasterPopulationTable( {key: [(1, 0, True), (1, n_rows * 4, True)]}), synapse_io=MockSynapseIO()) @@ -288,8 +290,9 @@ def test_write_synaptic_matrix_and_master_population_table(self): spec.reserve_memory_region(synapse_region, all_syn_block_sz) synaptic_manager = SynapticManager( - n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, min_weights=None, config=config) + n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config) synaptic_manager._write_synaptic_matrix_and_master_population_table( spec, [post_vertex_slice], post_slice_index, post_vertex, post_vertex_slice, all_syn_block_sz, weight_scales, @@ -402,8 +405,9 @@ def test_set_synapse_dynamics(self): config = conf_loader.load_config( AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) synaptic_manager = SynapticManager( - n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, min_weights=None, config=config) + n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config) static = SynapseDynamicsStatic() stdp = SynapseDynamicsSTDP( @@ -494,8 +498,9 @@ def test_set_synapse_dynamics(self): # Try starting again to get a couple more combinations synaptic_manager = SynapticManager( - n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, min_weights=None, config=config) + n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config) # STDP followed by structural STDP should result in Structural STDP synaptic_manager.synapse_dynamics = stdp @@ -515,8 +520,9 @@ def test_set_synapse_dynamics(self): # One more time! synaptic_manager = SynapticManager( - n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, min_weights=None, config=config) + n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config) # Static followed by static structural should result in static # structural @@ -551,8 +557,9 @@ def test_set_synapse_dynamics(self): # OK, just one more, honest synaptic_manager = SynapticManager( - n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, min_weights=None, config=config) + n_synapse_types=2, ring_buffer_sigma=5.0, spikes_per_second=100.0, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config) synaptic_manager.synapse_dynamics = static_struct synaptic_manager.synapse_dynamics = stdp_struct diff --git a/unittests/test_populations/test_vertex.py b/unittests/test_populations/test_vertex.py index 6b863bb375..07308dc067 100644 --- a/unittests/test_populations/test_vertex.py +++ b/unittests/test_populations/test_vertex.py @@ -116,8 +116,9 @@ def __init__(self): super(MockNeuron, self).__init__( n_neurons=5, label="Mock", constraints=None, max_atoms_per_core=None, spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, - neuron_impl=foo_bar.model, pynn_model=foo_bar, min_weights=None) + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, + neuron_impl=foo_bar.model, pynn_model=foo_bar) def test_initializable(): From ad128f869850747016d5d484746ae94273e6ecf3 Mon Sep 17 00:00:00 2001 From: Andrew Rowley Date: Wed, 20 May 2020 10:19:04 +0100 Subject: [PATCH 055/198] Need to pass the parameters! --- .../external_device_lif_control_vertex.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index 95ef4a3450..3b16fc6a3b 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -109,6 +109,7 @@ def __init__( super(ExternalDeviceLifControlVertex, self).__init__( len(devices), label, constraints, max_atoms_per_core, spikes_per_second, ring_buffer_sigma, min_weights, + weight_random_sigma, max_stdp_spike_delta, incoming_spike_buffer_size, neuron_impl, pynn_model) def routing_key_partition_atom_mapping(self, routing_info, partition): From d4132ff10f1f0c86d7ae7ae7838d341c1238fcfd Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 28 Jul 2020 17:10:24 +0100 Subject: [PATCH 056/198] flake8 line lengths, unused imports --- .../pyNN/models/neuron/synaptic_manager.py | 206 +----------------- 1 file changed, 3 insertions(+), 203 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 4b9ac23363..549ecad6f3 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -20,8 +20,7 @@ from data_specification.enums import DataType from spinn_front_end_common.utilities.helpful_functions import ( locate_memory_region_for_placement, read_config) -from spinn_front_end_common.utilities.constants import ( - BYTES_PER_WORD, MICRO_TO_SECOND_CONVERSION) +from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spynnaker.pyNN.models.neuron.generator_data import GeneratorData from spynnaker.pyNN.models.neural_projections.connectors import ( AbstractGenerateConnectorOnMachine) @@ -37,8 +36,8 @@ MasterPopTableAsBinarySearch) from spynnaker.pyNN.exceptions import SynapticConfigurationException from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsSTDP -from spinn_front_end_common.utilities.utility_objs.provenance_data_item import ( - ProvenanceDataItem) +from spinn_front_end_common.utilities.utility_objs\ + .provenance_data_item import ProvenanceDataItem TIME_STAMP_BYTES = BYTES_PER_WORD @@ -561,205 +560,6 @@ def _reserve_memory_regions( size=synapse_structural_dynamics_sz, label='synapseDynamicsStructuralParams') -# @staticmethod -# def _ring_buffer_expected_upper_bound( -# weight_mean, weight_std_dev, spikes_per_second, -# machine_timestep, n_synapses_in, sigma): -# """ Provides expected upper bound on accumulated values in a ring\ -# buffer element. -# -# Requires an assessment of maximum Poisson input rate. -# -# Assumes knowledge of mean and SD of weight distribution, fan-in\ -# and timestep. -# -# All arguments should be assumed real values except n_synapses_in\ -# which will be an integer. -# -# :param float weight_mean: Mean of weight distribution (in either nA or\ -# microSiemens as required) -# :param float weight_std_dev: SD of weight distribution -# :param float spikes_per_second: Maximum expected Poisson rate in Hz -# :param int machine_timestep: in us -# :param int n_synapses_in: No of connected synapses -# :param float sigma: How many SD above the mean to go for upper bound;\ -# a good starting choice is 5.0. Given length of simulation we can\ -# set this for approximate number of saturation events. -# :rtype: float -# """ -# # E[ number of spikes ] in a timestep -# steps_per_second = MICRO_TO_SECOND_CONVERSION / machine_timestep -# average_spikes_per_timestep = ( -# float(n_synapses_in * spikes_per_second) / steps_per_second) -# -# # Exact variance contribution from inherent Poisson variation -# poisson_variance = average_spikes_per_timestep * (weight_mean ** 2) -# -# # Upper end of range for Poisson summation required below -# # upper_bound needs to be an integer -# upper_bound = int(round(average_spikes_per_timestep + -# POSSION_SIGMA_SUMMATION_LIMIT * -# math.sqrt(average_spikes_per_timestep))) -# -# # Closed-form exact solution for summation that gives the variance -# # contributed by weight distribution variation when modulated by -# # Poisson PDF. Requires scipy.special for gamma and incomplete gamma -# # functions. Beware: incomplete gamma doesn't work the same as -# # Mathematica because (1) it's regularised and needs a further -# # multiplication and (2) it's actually the complement that is needed -# # i.e. 'gammaincc'] -# -# weight_variance = 0.0 -# -# if weight_std_dev > 0: -# # pylint: disable=no-member -# lngamma = special.gammaln(1 + upper_bound) -# gammai = special.gammaincc( -# 1 + upper_bound, average_spikes_per_timestep) -# -# big_ratio = (math.log(average_spikes_per_timestep) * upper_bound - -# lngamma) -# -# if -701.0 < big_ratio < 701.0 and big_ratio != 0.0: -# log_weight_variance = ( -# -average_spikes_per_timestep + -# math.log(average_spikes_per_timestep) + -# 2.0 * math.log(weight_std_dev) + -# math.log(math.exp(average_spikes_per_timestep) * gammai - -# math.exp(big_ratio))) -# weight_variance = math.exp(log_weight_variance) -# -# # upper bound calculation -> mean + n * SD -# return ((average_spikes_per_timestep * weight_mean) + -# (sigma * math.sqrt(poisson_variance + weight_variance))) -# -# def _get_ring_buffer_to_input_left_shifts( -# self, application_vertex, application_graph, machine_timestep, -# weight_scale): -# """ Get the scaling of the ring buffer to provide as much accuracy as\ -# possible without too much overflow -# -# :param .ApplicationVertex application_vertex: -# :param .ApplicationGraph application_graph: -# :param int machine_timestep: -# :param float weight_scale: -# :rtype: list(int) -# """ -# weight_scale_squared = weight_scale * weight_scale -# n_synapse_types = self.__n_synapse_types -# running_totals = [RunningStats() for _ in range(n_synapse_types)] -# delay_running_totals = [RunningStats() for _ in range(n_synapse_types)] -# total_weights = numpy.zeros(n_synapse_types) -# biggest_weight = numpy.zeros(n_synapse_types) -# weights_signed = False -# rate_stats = [RunningStats() for _ in range(n_synapse_types)] -# steps_per_second = MICRO_TO_SECOND_CONVERSION / machine_timestep -# -# for app_edge in application_graph.get_edges_ending_at_vertex( -# application_vertex): -# if isinstance(app_edge, ProjectionApplicationEdge): -# for synapse_info in app_edge.synapse_information: -# synapse_type = synapse_info.synapse_type -# synapse_dynamics = synapse_info.synapse_dynamics -# connector = synapse_info.connector -# -# weight_mean = ( -# synapse_dynamics.get_weight_mean( -# connector, synapse_info) * weight_scale) -# n_connections = \ -# connector.get_n_connections_to_post_vertex_maximum( -# synapse_info) -# weight_variance = synapse_dynamics.get_weight_variance( -# connector, synapse_info.weights) * weight_scale_squared -# running_totals[synapse_type].add_items( -# weight_mean, weight_variance, n_connections) -# -# delay_variance = synapse_dynamics.get_delay_variance( -# connector, synapse_info.delays) -# delay_running_totals[synapse_type].add_items( -# 0.0, delay_variance, n_connections) -# -# weight_max = (synapse_dynamics.get_weight_maximum( -# connector, synapse_info) * weight_scale) -# biggest_weight[synapse_type] = max( -# biggest_weight[synapse_type], weight_max) -# -# spikes_per_tick = max( -# 1.0, self.__spikes_per_second / steps_per_second) -# spikes_per_second = self.__spikes_per_second -# if isinstance(app_edge.pre_vertex, -# SpikeSourcePoissonVertex): -# rate = app_edge.pre_vertex.max_rate -# # If non-zero rate then use it; otherwise keep default -# if rate != 0: -# spikes_per_second = rate -# if hasattr(spikes_per_second, "__getitem__"): -# spikes_per_second = numpy.max(spikes_per_second) -# elif isinstance(spikes_per_second, RandomDistribution): -# spikes_per_second = get_maximum_probable_value( -# spikes_per_second, app_edge.pre_vertex.n_atoms) -# prob = 1.0 - ( -# (1.0 / 100.0) / app_edge.pre_vertex.n_atoms) -# spikes_per_tick = spikes_per_second / steps_per_second -# spikes_per_tick = scipy.stats.poisson.ppf( -# prob, spikes_per_tick) -# rate_stats[synapse_type].add_items( -# spikes_per_second, 0, n_connections) -# total_weights[synapse_type] += spikes_per_tick * ( -# weight_max * n_connections) -# -# if synapse_dynamics.are_weights_signed(): -# weights_signed = True -# -# max_weights = numpy.zeros(n_synapse_types) -# for synapse_type in range(n_synapse_types): -# if delay_running_totals[synapse_type].variance == 0.0: -# max_weights[synapse_type] = max(total_weights[synapse_type], -# biggest_weight[synapse_type]) -# else: -# stats = running_totals[synapse_type] -# rates = rate_stats[synapse_type] -# max_weights[synapse_type] = min( -# self._ring_buffer_expected_upper_bound( -# stats.mean, stats.standard_deviation, rates.mean, -# machine_timestep, stats.n_items, -# self.__ring_buffer_sigma), -# total_weights[synapse_type]) -# max_weights[synapse_type] = max( -# max_weights[synapse_type], biggest_weight[synapse_type]) -# -# # Convert these to powers; we could use int.bit_length() for this if -# # they were integers, but they aren't... -# max_weight_powers = ( -# 0 if w <= 0 else int(math.ceil(max(0, math.log(w, 2)))) -# for w in max_weights) -# -# # If 2^max_weight_power equals the max weight, we have to add another -# # power, as range is 0 - (just under 2^max_weight_power)! -# max_weight_powers = ( -# w + 1 if (2 ** w) <= a else w -# for w, a in zip(max_weight_powers, max_weights)) -# -# # If we have synapse dynamics that uses signed weights, -# # Add another bit of shift to prevent overflows -# if weights_signed: -# max_weight_powers = (m + 1 for m in max_weight_powers) -# -# return list(max_weight_powers) -# -# @staticmethod -# def __get_weight_scale(ring_buffer_to_input_left_shift): -# """ Return the amount to scale the weights by to convert them from \ -# floating point values to 16-bit fixed point numbers which can be \ -# shifted left by ring_buffer_to_input_left_shift to produce an\ -# s1615 fixed point number -# -# :param int ring_buffer_to_input_left_shift: -# :rtype: float -# """ -# return float(math.pow(2, 16 - (ring_buffer_to_input_left_shift + 1))) -# -# >>>>>>> refs/remotes/origin/master def _write_synapse_parameters( self, spec, min_weights, weight_scale): """Get the ring buffer shifts and scaling factors. From 20d210a6eac17de034eeb615ed9076ea858982af Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 30 Jul 2020 15:59:12 +0100 Subject: [PATCH 057/198] Updates to fix integration tests, particularly for kernel connectors - also fixes for structural plasticity --- .../connectors/abstract_connector.py | 8 +++- .../connectors/from_list_connector.py | 13 +++++- .../connectors/kernel_connector.py | 12 +++++- .../neural_projections/synapse_information.py | 8 ++++ .../pyNN/models/neuron/synaptic_manager.py | 43 ++++++++++++++----- spynnaker/pyNN/utilities/utility_calls.py | 29 +++++++++++++ 6 files changed, 99 insertions(+), 14 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index f3e2b6003a..a11d0eceff 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -294,7 +294,13 @@ def get_weight_minimum(self, weights, weight_random_sigma): elif numpy.isscalar(weights): return abs(weights) elif hasattr(weights, "__getitem__"): - return numpy.amax(numpy.abs(weights)) + # Have to assume here that the list of weights that has been + # provided has different (non-zero) values in it. In order to + # represent these correctly, it's the greatest common divisor + # across the array of weights that we need + non_zero_weights = numpy.abs(weights)[ + numpy.nonzero(numpy.abs(weights))] + return utility_calls.float_gcd_of_array(non_zero_weights) raise Exception("Unrecognised weight format") def _get_weight_maximum(self, weights, n_connections): diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index 19ff0e70d3..d6a21a236e 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -80,6 +80,16 @@ def __init__(self, conn_list, safe=True, callback=None, verbose=False, # Call the conn_list setter, as this sets the internal values self.conn_list = conn_list + @overrides(AbstractConnector.set_projection_information) + def set_projection_information(self, machine_time_step, synapse_info): + AbstractConnector.set_projection_information( + self, machine_time_step, synapse_info) + # now we want to tell the synapse_info about weights and delays + if self.__weights is not None: + synapse_info.weights = self.__weights.flatten() + if self.__delays is not None: + synapse_info.delays = self.__delays + @overrides(AbstractConnector.get_delay_maximum) def get_delay_maximum(self, synapse_info): if self.__delays is None: @@ -225,7 +235,8 @@ def get_weight_minimum(self, weights, weight_random_sigma): if self.__weights is None: return super(FromListConnector, self).get_weight_minimum( weights, weight_random_sigma) - return numpy.amin(numpy.abs(self.__weights)) + return super(FromListConnector, self).get_weight_minimum( + self.__weights, weight_random_sigma) @overrides(AbstractConnector.create_synaptic_block) def create_synaptic_block( diff --git a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py index 94c4373008..c165fb83d9 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py @@ -297,6 +297,16 @@ def __compute_statistics( numpy.array(all_pre_ids, dtype='uint32'), numpy.array(all_delays), numpy.array(all_weights)) + @overrides(AbstractConnector.set_projection_information) + def set_projection_information(self, machine_time_step, synapse_info): + AbstractConnector.set_projection_information( + self, machine_time_step, synapse_info) + # now we want to tell the synapse_info about weights and delays + if self._krn_weights is not None: + synapse_info.weights = self._krn_weights.flatten() + if self._krn_delays is not None: + synapse_info.delays = self._krn_delays + @overrides(AbstractConnector.get_delay_maximum) def get_delay_maximum(self, synapse_info): # I think this is overestimated, but not by much @@ -342,7 +352,7 @@ def get_weight_maximum(self, synapse_info): def get_weight_minimum(self, weights, weight_random_sigma): # Use the kernel weights if user has supplied them if self._krn_weights is not None: - return self.get_weight_minimum( + return super(KernelConnector, self).get_weight_minimum( self._krn_weights, weight_random_sigma) return super(KernelConnector, self).get_weight_minimum( diff --git a/spynnaker/pyNN/models/neural_projections/synapse_information.py b/spynnaker/pyNN/models/neural_projections/synapse_information.py index d41264f61c..a7f35cf012 100644 --- a/spynnaker/pyNN/models/neural_projections/synapse_information.py +++ b/spynnaker/pyNN/models/neural_projections/synapse_information.py @@ -158,6 +158,10 @@ def weights(self): """ return self.__weights + @weights.setter + def weights(self, weights): + self.__weights = weights + @property def delays(self): """ The total synaptic delays (if any) @@ -166,6 +170,10 @@ def delays(self): """ return self.__delays + @delays.setter + def delays(self, delays): + self.__delays = delays + def may_generate_on_machine(self): """ Do we describe a collection of synapses whose synaptic matrix may be generated on SpiNNaker instead of needing to be calculated in diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 549ecad6f3..f0f94f7fb5 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -35,7 +35,8 @@ from spynnaker.pyNN.models.neuron.master_pop_table import ( MasterPopTableAsBinarySearch) from spynnaker.pyNN.exceptions import SynapticConfigurationException -from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsSTDP +from spynnaker.pyNN.models.neuron.synapse_dynamics import ( + SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic) from spinn_front_end_common.utilities.utility_objs\ .provenance_data_item import ProvenanceDataItem @@ -1003,7 +1004,8 @@ def __get_closest_weight(self, value): """ if abs(value) < 1.0: return DataType.S1615.closest_representable_value(value) - return 1 / (DataType.S1615.closest_representable_value(1 / value)) + return 1 / ( + DataType.S1615.closest_representable_value_above(1 / value)) def _calculate_min_weights( self, application_vertex, application_graph, weight_scale): @@ -1030,6 +1032,13 @@ def _calculate_min_weights( if min_delta is not None and min_delta != 0: min_weights[synapse_type] = min( min_weights[synapse_type], min_delta) + elif isinstance(synapse_dynamics, + SynapseDynamicsStructuralStatic): + weight_min = synapse_dynamics.initial_weight + weight_min *= weight_scale + if weight_min != 0: + min_weights[synapse_type] = min( + min_weights[synapse_type], weight_min) # Convert values to their closest representable value to ensure # that division works for the minimum value @@ -1053,15 +1062,27 @@ def __check_weights(self, min_weights, app_graph, app_vertex): for app_edge in app_graph.get_edges_ending_at_vertex(app_vertex): if isinstance(app_edge, ProjectionApplicationEdge): for synapse_info in app_edge.synapse_information: - weight = synapse_info.weights - if numpy.isscalar(weight): - synapse_type = synapse_info.synapse_type - r_weight = weight / min_weights[synapse_type] - r_weight = DataType.UINT16.closest_representable_value( - r_weight) * min_weights[synapse_type] - if weight != r_weight: - self.__weight_provenance[weight, r_weight].append( - (app_edge, synapse_info)) + weights = synapse_info.weights + synapse_type = synapse_info.synapse_type + min_weight = min_weights[synapse_type] + if numpy.isscalar(weights): + self.__check_weight( + min_weight, weights, app_edge, synapse_info) + elif hasattr(weights, "__getitem__"): + for w in weights: + self.__check_weight( + min_weight, w, app_edge, synapse_info) + + def __check_weight(self, min_weight, weight, app_edge, synapse_info): + """ Warn the user about a weight that can't be represented properly + where possible + """ + r_weight = weight / min_weight + r_weight = DataType.UINT16.closest_representable_value( + r_weight) * min_weight + if weight != r_weight: + self.__weight_provenance[weight, r_weight].append( + (app_edge, synapse_info)) def _get_min_weights( self, application_vertex, application_graph, weight_scale): diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 4cc881fffa..b38ef83c77 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -299,3 +299,32 @@ def get_n_bits(n_values): if n_values == 1: return 1 return int(math.ceil(math.log(n_values, 2))) + + +def float_gcd(a, b): + """ Floating point gcd of two values + """ + if (a < b) : + return float_gcd(b, a) + + # base case + if (abs(b) < 0.001) : + return a + else : + return (float_gcd(b, a - math.floor(a / b) * b)) + + +def float_gcd_of_array(input): + """ Work out the floating point gcd of an array of numbers + + :param numpy.float(array) input: the input array + :return: the floating point gcd of the array + :rtype: float + """ + gcd = float_gcd(input[0], input[1]) + + for i in range(2, len(input)): + gcd = float_gcd(gcd, input[i]) + + return gcd + From be89efd33e7f2f3e79d18de9fa1c85d85a2dd97a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 30 Jul 2020 17:04:16 +0100 Subject: [PATCH 058/198] Deal with array length of 1 --- spynnaker/pyNN/utilities/utility_calls.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index b38ef83c77..b34c336660 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -321,6 +321,9 @@ def float_gcd_of_array(input): :return: the floating point gcd of the array :rtype: float """ + if len(input) == 1: + return input[0] + gcd = float_gcd(input[0], input[1]) for i in range(2, len(input)): From 979891a37958d5ee95c752e05f232023edee1333 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 30 Jul 2020 17:13:25 +0100 Subject: [PATCH 059/198] flake8 whitespaces --- spynnaker/pyNN/utilities/utility_calls.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index b34c336660..5b7a5aadd1 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -304,13 +304,13 @@ def get_n_bits(n_values): def float_gcd(a, b): """ Floating point gcd of two values """ - if (a < b) : + if (a < b): return float_gcd(b, a) # base case - if (abs(b) < 0.001) : + if (abs(b) < 0.001): return a - else : + else: return (float_gcd(b, a - math.floor(a / b) * b)) @@ -330,4 +330,3 @@ def float_gcd_of_array(input): gcd = float_gcd(gcd, input[i]) return gcd - From 459ff56dd73a6cb7f5c9e03f39829e719b823291 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 31 Jul 2020 10:27:19 +0100 Subject: [PATCH 060/198] Check representable weights properly when weight_scale isn't 1 --- .../pyNN/models/neuron/synaptic_manager.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index f0f94f7fb5..9aae320551 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -1052,10 +1052,11 @@ def _calculate_min_weights( for m in min_weights] self.__check_weights( - min_weights, application_graph, application_vertex) + min_weights, weight_scale, application_graph, application_vertex) return min_weights - def __check_weights(self, min_weights, app_graph, app_vertex): + def __check_weights( + self, min_weights, weight_scale, app_graph, app_vertex): """ Warn the user about weights that can't be represented properly where possible """ @@ -1067,19 +1068,22 @@ def __check_weights(self, min_weights, app_graph, app_vertex): min_weight = min_weights[synapse_type] if numpy.isscalar(weights): self.__check_weight( - min_weight, weights, app_edge, synapse_info) + min_weight, weights, weight_scale, app_edge, + synapse_info) elif hasattr(weights, "__getitem__"): for w in weights: self.__check_weight( - min_weight, w, app_edge, synapse_info) + min_weight, w, weight_scale, app_edge, + synapse_info) - def __check_weight(self, min_weight, weight, app_edge, synapse_info): + def __check_weight( + self, min_weight, weight, weight_scale, app_edge, synapse_info): """ Warn the user about a weight that can't be represented properly where possible """ - r_weight = weight / min_weight - r_weight = DataType.UINT16.closest_representable_value( - r_weight) * min_weight + r_weight = weight * weight_scale / min_weight + r_weight = (DataType.UINT16.closest_representable_value( + r_weight) * min_weight) / weight_scale if weight != r_weight: self.__weight_provenance[weight, r_weight].append( (app_edge, synapse_info)) From 780d513489edab1d800c5ae864f8b7eb548e984b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 31 Jul 2020 11:51:33 +0100 Subject: [PATCH 061/198] Also need to use (float) gcd in STDP cases for better representation --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 9aae320551..f5773606a7 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -31,7 +31,7 @@ from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased from spynnaker.pyNN.models.utility_models.delays import DelayExtensionVertex from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS -from spynnaker.pyNN.utilities.utility_calls import get_n_bits +from spynnaker.pyNN.utilities.utility_calls import get_n_bits, float_gcd from spynnaker.pyNN.models.neuron.master_pop_table import ( MasterPopTableAsBinarySearch) from spynnaker.pyNN.exceptions import SynapticConfigurationException @@ -1029,6 +1029,8 @@ def _calculate_min_weights( min_delta = synapse_dynamics.get_weight_min_delta( self.__max_stdp_spike_delta) min_delta *= weight_scale + # This also depends on the earlier calculated minimum + min_delta = float_gcd(min_delta, weight_min) if min_delta is not None and min_delta != 0: min_weights[synapse_type] = min( min_weights[synapse_type], min_delta) From 3802b285e475b58ba7b17a68ac41a281b75b3155 Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Mon, 3 Aug 2020 10:57:51 +0100 Subject: [PATCH 062/198] tagged IMPLICIT WEIGHT SCALING with a comment --- .../src/neuron/input_types/input_type_conductance.h | 2 +- .../pyNN/models/neuron/input_types/input_type_conductance.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/input_types/input_type_conductance.h b/neural_modelling/src/neuron/input_types/input_type_conductance.h index 59f9d16afb..b81ab5aa9e 100644 --- a/neural_modelling/src/neuron/input_types/input_type_conductance.h +++ b/neural_modelling/src/neuron/input_types/input_type_conductance.h @@ -44,7 +44,7 @@ static inline input_t* input_type_get_input_value( input_t* value, input_type_pointer_t input_type, uint16_t num_receptors) { use(input_type); for (int i = 0; i < num_receptors; i++) { - value[i] = value[i] >> 5; + value[i] = value[i] >> 5; // IMPLICIT WEIGHT SCALING -- the default in main branch is >> 10 (2**10) } return &value[0]; } diff --git a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py index d2d0827b3e..c320505609 100644 --- a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py +++ b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py @@ -76,7 +76,7 @@ def update_values(self, values, parameters, state_variables): @overrides(AbstractInputType.get_global_weight_scale) def get_global_weight_scale(self): - return float(2**5) + return float(2**5) # IMPLICIT WEIGHT SCALING -- the default in main branch is 2**10 @property def e_rev_E(self): From 7f11fbfdae12f849f8efd50114216e3092925c5c Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Wed, 12 Aug 2020 15:37:07 +0100 Subject: [PATCH 063/198] round to nearest wherever I can --- .../input_types/input_type_conductance.h | 19 ++++++++++++++----- .../src/neuron/models/neuron_model_lif_impl.c | 11 +++++++++-- .../synapse_types_exponential_impl.h | 13 +++++++++++-- 3 files changed, 34 insertions(+), 9 deletions(-) diff --git a/neural_modelling/src/neuron/input_types/input_type_conductance.h b/neural_modelling/src/neuron/input_types/input_type_conductance.h index b81ab5aa9e..5bf1873aab 100644 --- a/neural_modelling/src/neuron/input_types/input_type_conductance.h +++ b/neural_modelling/src/neuron/input_types/input_type_conductance.h @@ -31,6 +31,7 @@ #endif #include "input_type.h" +#include "round.h" typedef struct input_type_t { // reversal voltage - Excitatory [mV] @@ -44,7 +45,7 @@ static inline input_t* input_type_get_input_value( input_t* value, input_type_pointer_t input_type, uint16_t num_receptors) { use(input_type); for (int i = 0; i < num_receptors; i++) { - value[i] = value[i] >> 5; // IMPLICIT WEIGHT SCALING -- the default in main branch is >> 10 (2**10) + value[i] = value[i] >> 5; } return &value[0]; } @@ -53,8 +54,12 @@ static inline void input_type_convert_excitatory_input_to_current( input_t* exc_input, input_type_pointer_t input_type, state_t membrane_voltage) { for (int i=0; i < NUM_EXCITATORY_RECEPTORS; i++) { - exc_input[i] = exc_input[i] * - (input_type->V_rev_E - membrane_voltage); + // accum = accum * (accum - accum) +// exc_input[i] = exc_input[i] * +// (input_type->V_rev_E - membrane_voltage); + // RTN accum + exc_input[i] = MULT_ROUND_NEAREST_ACCUM(exc_input[i], + (input_type->V_rev_E - membrane_voltage)); } } @@ -62,8 +67,12 @@ static inline void input_type_convert_inhibitory_input_to_current( input_t* inh_input, input_type_pointer_t input_type, state_t membrane_voltage) { for (int i=0; i < NUM_INHIBITORY_RECEPTORS; i++) { - inh_input[i] = -inh_input[i] * - (input_type->V_rev_I - membrane_voltage); + // accum = accum * (accum - accum) +// inh_input[i] = -inh_input[i] * +// (input_type->V_rev_I - membrane_voltage); + // RTN accum + inh_input[i] = MULT_ROUND_NEAREST_ACCUM(-inh_input[i], + (input_type->V_rev_I - membrane_voltage)); } } diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c index 3bac73de40..45051b9c49 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c @@ -16,16 +16,23 @@ */ #include "neuron_model_lif_impl.h" +#include "round.h" #include // simple Leaky I&F ODE static inline void lif_neuron_closed_form( neuron_pointer_t neuron, REAL V_prev, input_t input_this_timestep) { - REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + // accum = accum * accum + accum +// REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; + REAL alpha = MULT_ROUND_NEAREST_ACCUM ( + input_this_timestep, neuron->R_membrane) + neuron->V_rest; // update membrane voltage - neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); + // accum - (accum * (accum - accum)) +// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); + neuron->V_membrane = alpha - MULT_ROUND_NEAREST_ACCUM( + neuron->exp_TC, (alpha - V_prev)); } void neuron_model_set_global_neuron_params( diff --git a/neural_modelling/src/neuron/synapse_types/synapse_types_exponential_impl.h b/neural_modelling/src/neuron/synapse_types/synapse_types_exponential_impl.h index 37ac9b0146..5ee7dbe6c1 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_types_exponential_impl.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_types_exponential_impl.h @@ -41,6 +41,7 @@ #include #include #include "synapse_types.h" +#include "round.h" //--------------------------------------- @@ -77,9 +78,14 @@ typedef enum input_buffer_regions { //! \param[in] parameter: the pointer to the parameters to use //! \return nothing static inline void exp_shaping(exp_params_t* exp_params) { + // RTN // decay value according to decay constant +// exp_params->synaptic_input_value = +// decay_s1615(exp_params->synaptic_input_value, +// exp_params->decay); + exp_params->synaptic_input_value = - decay_s1615(exp_params->synaptic_input_value, + MULT_ROUND_NEAREST_ACCUM(exp_params->synaptic_input_value, exp_params->decay); } @@ -95,8 +101,11 @@ static inline void synapse_types_shape_input( //! \param[in] input the inputs to add. //! \return None static inline void add_input_exp(exp_params_t* exp_params, input_t input) { + // RTN +// exp_params->synaptic_input_value = exp_params->synaptic_input_value + +// decay_s1615(input, exp_params->init); exp_params->synaptic_input_value = exp_params->synaptic_input_value + - decay_s1615(input, exp_params->init); + MULT_ROUND_NEAREST_ACCUM(input, exp_params->init); } //! \brief adds the inputs for a give timer period to a given neuron that is From c06ecdddc5ce4c3deed1b05c83534e19826a7a80 Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Wed, 12 Aug 2020 16:02:43 +0100 Subject: [PATCH 064/198] exponential time constant is now an unsigned long fract --- neural_modelling/src/neuron/models/neuron_model_lif_impl.c | 2 +- neural_modelling/src/neuron/models/neuron_model_lif_impl.h | 2 +- .../neuron_models/neuron_model_leaky_integrate_and_fire.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c index 45051b9c49..ff050889e0 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c @@ -29,7 +29,7 @@ static inline void lif_neuron_closed_form( input_this_timestep, neuron->R_membrane) + neuron->V_rest; // update membrane voltage - // accum - (accum * (accum - accum)) + // accum - (ufract * (accum - accum)) // neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); neuron->V_membrane = alpha - MULT_ROUND_NEAREST_ACCUM( neuron->exp_TC, (alpha - V_prev)); diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 73c122a8a9..3c07c282df 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -35,7 +35,7 @@ typedef struct neuron_t { // 'fixed' computation parameter - time constant multiplier for // closed-form solution // exp(-(machine time step in ms)/(R * C)) [.] - REAL exp_TC; + UFRACT exp_TC; // offset current [nA] REAL I_offset; diff --git a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py index 851f5c1c9c..3c5d18e069 100644 --- a/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py +++ b/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py @@ -54,7 +54,7 @@ def __init__( [DataType.S1615, # v DataType.S1615, # v_rest DataType.S1615, # r_membrane (= tau_m / cm) - DataType.S1615, # exp_tc (= e^(-ts / tau_m)) + DataType.U032, # exp_tc (= e^(-ts / tau_m)) DataType.S1615, # i_offset DataType.INT32, # count_refrac DataType.S1615, # v_reset From 2aa4f945aa46e0f2a12e0c5d93db2ffc6058904e Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Fri, 14 Aug 2020 09:51:06 +0100 Subject: [PATCH 065/198] potentially fixed v recording --- .../src/neuron/implementations/neuron_impl_standard.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index 583d61a8a6..d043828b07 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -225,6 +225,10 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, &additional_input_array[neuron_index]; synapse_param_pointer_t synapse_type = &neuron_synapse_shaping_params[neuron_index]; + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); + // Record the voltage + recorded_variable_values[V_RECORDING_INDEX] = voltage; // Store whether the neuron has spiked bool spike = false; @@ -232,8 +236,6 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Loop however many times requested for (uint32_t i = n_steps_per_timestep; i > 0; i--) { - // Get the voltage - state_t voltage = neuron_model_get_membrane_voltage(neuron); // Get the exc and inh values from the synapses input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); @@ -258,7 +260,6 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, // Call functions to get the input values to be recorded if (i == n_steps_per_timestep) { - recorded_variable_values[V_RECORDING_INDEX] = voltage; recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; } From 129a45ebc2a3454e2376b1202630828b179d8a4b Mon Sep 17 00:00:00 2001 From: Petrut Bogdan Date: Mon, 17 Aug 2020 11:43:38 +0100 Subject: [PATCH 066/198] correct recording of v --- .../src/neuron/implementations/neuron_impl_standard.h | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index d043828b07..97ae54a6ad 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -225,17 +225,14 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, &additional_input_array[neuron_index]; synapse_param_pointer_t synapse_type = &neuron_synapse_shaping_params[neuron_index]; - // Get the voltage - state_t voltage = neuron_model_get_membrane_voltage(neuron); - // Record the voltage - recorded_variable_values[V_RECORDING_INDEX] = voltage; // Store whether the neuron has spiked bool spike = false; // Loop however many times requested for (uint32_t i = n_steps_per_timestep; i > 0; i--) { - + // Get the voltage + state_t voltage = neuron_model_get_membrane_voltage(neuron); // Get the exc and inh values from the synapses input_t* exc_value = synapse_types_get_excitatory_input(synapse_type); @@ -262,6 +259,8 @@ static bool neuron_impl_do_timestep_update(index_t neuron_index, if (i == n_steps_per_timestep) { recorded_variable_values[GSYN_EXCITATORY_RECORDING_INDEX] = total_exc; recorded_variable_values[GSYN_INHIBITORY_RECORDING_INDEX] = total_inh; + // Record the voltage + recorded_variable_values[V_RECORDING_INDEX] = voltage; } // Call functions to convert exc_input and inh_input to current From 01a55b4cc91bb4fefbe082b16e851a856da8a7c5 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 20 Aug 2020 16:44:29 +0100 Subject: [PATCH 067/198] Again Python 2 appears to be doing something weird --- spynnaker/pyNN/utilities/utility_calls.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 5b7a5aadd1..f6f1a50ff2 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -304,6 +304,11 @@ def get_n_bits(n_values): def float_gcd(a, b): """ Floating point gcd of two values """ + # Using absolute values for cases where a user has supplied a negative + # weight appears necessary for Python 2.7 + a = abs(a) + b = abs(b) + if (a < b): return float_gcd(b, a) From b3da07b386f6dc019975bea59c6e0d9624acab69 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 1 Sep 2020 16:44:04 +0100 Subject: [PATCH 068/198] Got the merge slightly wrong... --- .../src/neuron/plasticity/synapse_dynamics_static_impl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c index 4caabb5207..9faaec05c4 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c @@ -39,7 +39,7 @@ static uint32_t synapse_type_mask; bool synapse_dynamics_initialise( UNUSED address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - UNUSED uint32_t *ring_buffer_to_input_buffer_left_shifts) { + UNUSED REAL *min_weights) { uint32_t n_neurons_power_2 = n_neurons; uint32_t log_n_neurons = 1; if (n_neurons != 1) { From 16bad5d5b211b37a2922c0769e478b7a4e99fae4 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 1 Sep 2020 16:45:39 +0100 Subject: [PATCH 069/198] Wrong argument again... --- .../src/neuron/plasticity/synapse_dynamics_static_impl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c index 9faaec05c4..35554fd095 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c @@ -86,7 +86,7 @@ input_t synapse_dynamics_get_intrinsic_bias( void synapse_dynamics_print_plastic_synapses( UNUSED address_t plastic_region_address, UNUSED address_t fixed_region_address, - UNUSED uint32_t *ring_buffer_to_input_left_shifts) { + UNUSED REAL *min_weights) { } uint32_t synapse_dynamics_get_plastic_pre_synaptic_events(void) { From baf36a1e81f95a6d690ed18848b5c717599baafe Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 17 Sep 2020 09:58:48 +0100 Subject: [PATCH 070/198] Use a better tolerance for float gcd, and give constant a name --- spynnaker/pyNN/utilities/utility_calls.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 4f5ea7f657..ea225f487b 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -40,6 +40,8 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 +FLOAT_GCD_TOLERANCE = 0.00001 + STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), 'gamma': RandomStatsGammaImpl(), @@ -339,7 +341,7 @@ def float_gcd(a, b): return float_gcd(b, a) # base case - if (abs(b) < 0.001): + if (abs(b) < FLOAT_GCD_TOLERANCE): return a else: return (float_gcd(b, a - math.floor(a / b) * b)) From 4627b955ae7311b099df09e6b2ef0f664532b2a1 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 17 Sep 2020 10:16:06 +0100 Subject: [PATCH 071/198] flake8 unused import --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 232ada85c1..8ba6fcb5f1 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -17,7 +17,7 @@ import struct import numpy import sys -from scipy import special # @UnresolvedImport + from data_specification.enums import DataType from spinn_front_end_common.utilities.helpful_functions import ( locate_memory_region_for_placement, read_config) From 687f791bc3702fa79c82808a8a791cd39e5ae99b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 18 Sep 2020 09:26:00 +0100 Subject: [PATCH 072/198] Get closest representable weight before doing gcd, edit tolerance --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 ++ spynnaker/pyNN/utilities/utility_calls.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 8ba6fcb5f1..fcff3f55dc 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -1036,6 +1036,7 @@ def _calculate_min_weights( connector = synapse_info.connector weight_min = connector.get_weight_minimum( synapse_info.weights, self.__weight_random_sigma) + weight_min = self.__get_closest_weight(weight_min) weight_min *= weight_scale if weight_min != 0: min_weights[synapse_type] = min( @@ -1045,6 +1046,7 @@ def _calculate_min_weights( if isinstance(synapse_dynamics, SynapseDynamicsSTDP): min_delta = synapse_dynamics.get_weight_min_delta( self.__max_stdp_spike_delta) + min_delta = self.__get_closest_weight(min_delta) min_delta *= weight_scale # This also depends on the earlier calculated minimum min_delta = float_gcd(min_delta, weight_min) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index ea225f487b..47bb661d82 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -40,7 +40,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.00001 +FLOAT_GCD_TOLERANCE = 0.0001 STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From b817ead54a6dae7d168a78fe29775c344aab8af0 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 18 Sep 2020 10:46:05 +0100 Subject: [PATCH 073/198] Fiddle with tolerance again --- spynnaker/pyNN/utilities/utility_calls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 47bb661d82..b4a0b3bce2 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -40,7 +40,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.0001 +FLOAT_GCD_TOLERANCE = 0.0002 STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From 9e5ac2aed153480cca8d248c99532f9a87270a79 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 6 Oct 2020 10:10:04 +0100 Subject: [PATCH 074/198] Avoid dividing by zero... --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 5ce4c2a2f0..3983def401 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -1036,7 +1036,8 @@ def _calculate_min_weights( connector = synapse_info.connector weight_min = connector.get_weight_minimum( synapse_info.weights, self.__weight_random_sigma) - weight_min = self.__get_closest_weight(weight_min) + if weight_min != 0: + weight_min = self.__get_closest_weight(weight_min) weight_min *= weight_scale if weight_min != 0: min_weights[synapse_type] = min( From 15be8d3b6ad61a22baaeeb99cf896388ce90d559 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 15 Oct 2020 15:11:32 +0100 Subject: [PATCH 075/198] flake8 sort imports out --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index a4ad3d68de..11e45190b9 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -17,7 +17,7 @@ import numpy import sys -from collections import defaultdict, namedtuple +from collections import defaultdict from six import itervalues from spinn_utilities.progress_bar import ProgressBar @@ -30,14 +30,12 @@ .provenance_data_item import ProvenanceDataItem from spynnaker.pyNN.models.neural_projections import ProjectionMachineEdge -from spynnaker.pyNN.models.abstract_models import AbstractMaxSpikes from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased from spynnaker.pyNN.models.neuron.synapse_dynamics import ( SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic) -from spynnaker.pyNN.utilities.constants import ( - POPULATION_BASED_REGIONS, POSSION_SIGMA_SUMMATION_LIMIT) +from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS from spynnaker.pyNN.utilities.utility_calls import get_n_bits, float_gcd -from spynnaker.pyNN.utilities.running_stats import RunningStats +from spynnaker.pyNN.exceptions import SynapticConfigurationException from .synapse_dynamics import ( AbstractSynapseDynamics, AbstractSynapseDynamicsStructural) @@ -545,7 +543,7 @@ def __check_weights( :param ~.MachineVertex machine_vertex: The machine vertex """ for machine_edge in machine_graph.get_edges_ending_at_vertex( - machine_vertex): + machine_vertex): if isinstance(machine_edge, ProjectionMachineEdge): for synapse_info in machine_edge.synapse_information: weights = synapse_info.weights From 454762420b6a3729999fbe874b88066af9025f7a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 15 Oct 2020 15:13:39 +0100 Subject: [PATCH 076/198] Missing arguments in unittest functions --- .../model_tests/neuron/test_synaptic_manager.py | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index 0fe8e2dcc1..cc2a6c2678 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -319,7 +319,8 @@ def test_set_synapse_dynamics(): AbstractSpiNNakerCommon.CONFIG_FILE_NAME, default_config_paths) synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config, drop_late_spikes=True) + spikes_per_second=100.0, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config, drop_late_spikes=True) static = SynapseDynamicsStatic() stdp = SynapseDynamicsSTDP( @@ -411,7 +412,8 @@ def test_set_synapse_dynamics(): # Try starting again to get a couple more combinations synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config, drop_late_spikes=True) + spikes_per_second=100.0, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config, drop_late_spikes=True) # STDP followed by structural STDP should result in Structural STDP synaptic_manager.synapse_dynamics = stdp @@ -432,7 +434,8 @@ def test_set_synapse_dynamics(): # One more time! synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config, drop_late_spikes=True) + spikes_per_second=100.0, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config, drop_late_spikes=True) # Static followed by static structural should result in static # structural @@ -468,7 +471,8 @@ def test_set_synapse_dynamics(): # OK, just one more, honest synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config, drop_late_spikes=True) + spikes_per_second=100.0, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config, drop_late_spikes=True) synaptic_manager.synapse_dynamics = static_struct synaptic_manager.synapse_dynamics = stdp_struct @@ -601,7 +605,8 @@ def test_pop_based_master_pop_table_standard( spec = DataSpecificationGenerator(io.FileIO(temp_spec, "wb"), None) synaptic_manager = SynapticManager( n_synapse_types=2, ring_buffer_sigma=5.0, - spikes_per_second=100.0, config=config, drop_late_spikes=True) + spikes_per_second=100.0, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, config=config, drop_late_spikes=True) synaptic_manager.write_data_spec( spec, post_app_vertex, post_vertex_slice, post_mac_vertex, mac_graph, app_graph, routing_info, 1.0, 1.0) From 0b67f242c80de59606736d33e8cfae666e013f67 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 16 Oct 2020 09:19:58 +0100 Subject: [PATCH 077/198] Update to work properly for cases with multiple edges into same vertex --- .../connectors/abstract_connector.py | 2 + .../pyNN/models/neuron/synaptic_manager.py | 54 +++++++------------ .../neuron/test_synaptic_manager.py | 12 +++-- 3 files changed, 29 insertions(+), 39 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 31a228ce65..7a825c7dbe 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -300,6 +300,8 @@ def get_weight_minimum(self, weights, weight_random_sigma): # across the array of weights that we need non_zero_weights = numpy.abs(weights)[ numpy.nonzero(numpy.abs(weights))] + if len(non_zero_weights) == 0: + return 0.0 return utility_calls.float_gcd_of_array(non_zero_weights) raise Exception("Unrecognised weight format") diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 11e45190b9..90b83be558 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -29,7 +29,7 @@ from spinn_front_end_common.utilities.utility_objs\ .provenance_data_item import ProvenanceDataItem -from spynnaker.pyNN.models.neural_projections import ProjectionMachineEdge +from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased from spynnaker.pyNN.models.neuron.synapse_dynamics import ( SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic) @@ -436,27 +436,6 @@ def _reserve_memory_regions( size=synapse_structural_dynamics_sz, label='synapseDynamicsStructuralParams') -# def _write_synapse_parameters( -# self, spec, min_weights, weight_scale): -# """Get the ring buffer shifts and scaling factors. -# :param ~.DataSpecificationGenerator spec: The data spec to reserve in -# :param ~numpy.ndarray min_weights: Minimum weights per synapse type -# :param float weight_scale: The weight scale value to use when writing -# :rtype: ~numpy.ndarray -# """ -# spec.switch_write_focus(self._synapse_params_region) -# -# # write the bool for deleting packets that were too late for a timer -# spec.write_value(int(self.__drop_late_spikes)) -# -# # Write the minimum weights -# for w in min_weights: -# spec.write_value(w, data_type=DataType.S1615) -# -# # Return the weight scaling factors -# return numpy.array([(1 / w) * weight_scale if w != 0 else 0 -# for w in min_weights]) - def __get_closest_weight(self, value): """ Get the best representation of the weight so that both weight and 1 / w work @@ -482,10 +461,11 @@ def _calculate_min_weights( synapse_map = dict() for machine_edge in machine_graph.get_edges_ending_at_vertex( machine_vertex): - if isinstance(machine_edge, ProjectionMachineEdge): - for synapse_info in machine_edge.synapse_information: + app_edge = machine_edge.app_edge + if isinstance(app_edge, ProjectionApplicationEdge): + for synapse_info in app_edge.synapse_information: # Per synapse info we need any one of the edges - synapse_map[synapse_info] = machine_edge + synapse_map[synapse_info] = app_edge for synapse_info in synapse_map: synapse_type = synapse_info.synapse_type @@ -498,6 +478,7 @@ def _calculate_min_weights( weight_min = self.__get_closest_weight(weight_min) weight_min *= weight_scale if weight_min != 0: + weight_min = float_gcd(min_weights[synapse_type], weight_min) min_weights[synapse_type] = min( min_weights[synapse_type], weight_min) @@ -515,6 +496,8 @@ def _calculate_min_weights( weight_min = synapse_dynamics.initial_weight weight_min *= weight_scale if weight_min != 0: + weight_min = float_gcd(min_weights[synapse_type], + weight_min) min_weights[synapse_type] = min( min_weights[synapse_type], weight_min) @@ -531,6 +514,7 @@ def _calculate_min_weights( self.__check_weights( min_weights, weight_scale, machine_graph, machine_vertex) + return min_weights def __check_weights( @@ -544,23 +528,24 @@ def __check_weights( """ for machine_edge in machine_graph.get_edges_ending_at_vertex( machine_vertex): - if isinstance(machine_edge, ProjectionMachineEdge): - for synapse_info in machine_edge.synapse_information: + app_edge = machine_edge.app_edge + if isinstance(app_edge, ProjectionApplicationEdge): + for synapse_info in app_edge.synapse_information: weights = synapse_info.weights synapse_type = synapse_info.synapse_type min_weight = min_weights[synapse_type] if numpy.isscalar(weights): self.__check_weight( - min_weight, weights, weight_scale, machine_edge, - synapse_info) + min_weight, weights, weight_scale, + app_edge, synapse_info) elif hasattr(weights, "__getitem__"): for w in weights: self.__check_weight( - min_weight, w, weight_scale, machine_edge, - synapse_info) + min_weight, w, weight_scale, + app_edge, synapse_info) def __check_weight( - self, min_weight, weight, weight_scale, machine_edge, + self, min_weight, weight, weight_scale, app_edge, synapse_info): """ Warn the user about a weight that can't be represented properly where possible @@ -575,10 +560,9 @@ def __check_weight( r_weight) * min_weight) / weight_scale if weight != r_weight: self.__weight_provenance[weight, r_weight].append( - (machine_edge, synapse_info)) + (app_edge, synapse_info)) - def _get_min_weights( - self, machine_vertex, machine_graph, weight_scale): + def _get_min_weights(self, machine_vertex, machine_graph, weight_scale): if self.__min_weights is None: self.__min_weights = self._calculate_min_weights( machine_vertex, machine_graph, weight_scale) diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index cc2a6c2678..473614e6d7 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -275,7 +275,8 @@ def test_write_data_spec(): # Check that all the connections have the right weight and delay assert len(connections_1) == post_vertex_slice.n_atoms - assert all([conn["weight"] == 1.5 for conn in connections_1]) + assert all([numpy.isclose(conn["weight"], 1.5, atol=0.001) + for conn in connections_1]) assert all([conn["delay"] == 1.0 for conn in connections_1]) connections_2 = synaptic_manager.get_connections_from_machine( @@ -284,7 +285,8 @@ def test_write_data_spec(): # Check that all the connections have the right weight and delay assert len(connections_2) == post_vertex_slice.n_atoms - assert all([conn["weight"] == 2.5 for conn in connections_2]) + assert all([numpy.isclose(conn["weight"], 2.5, atol=0.001) + for conn in connections_2]) assert all([conn["delay"] == 2.0 for conn in connections_2]) connections_3 = synaptic_manager.get_connections_from_machine( @@ -294,7 +296,8 @@ def test_write_data_spec(): # Check that all the connections have the right weight and delay assert len(connections_3) == \ post_vertex_slice.n_atoms * pre_vertex_slice.n_atoms - assert all([conn["weight"] == 4.5 for conn in connections_3]) + assert all([numpy.isclose(conn["weight"], 4.5, atol=0.001) + for conn in connections_3]) assert all([conn["delay"] == 4.0 for conn in connections_3]) connections_4 = synaptic_manager.get_connections_from_machine( @@ -304,7 +307,8 @@ def test_write_data_spec(): assert len(connections_4) == len(from_list_list) list_weights = [values[2] for values in from_list_list] list_delays = [values[3] for values in from_list_list] - assert all(list_weights == connections_4["weight"]) + assert numpy.allclose( + list_weights, connections_4["weight"], atol=0.001) assert all(list_delays == connections_4["delay"]) finally: shutil.rmtree(report_folder, ignore_errors=True) From ce0f6030fef635ae14dd1889509dda0454c4e023 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 20 Oct 2020 11:50:38 +0100 Subject: [PATCH 078/198] Smarter multiplication needed... --- neural_modelling/src/neuron/synapses.h | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/synapses.h b/neural_modelling/src/neuron/synapses.h index c4c6d4ce32..b41463cb2d 100644 --- a/neural_modelling/src/neuron/synapses.h +++ b/neural_modelling/src/neuron/synapses.h @@ -57,16 +57,20 @@ static inline index_t synapses_get_ring_buffer_index_combined( //! \brief Converts a weight stored in a synapse row to an input //! \param[in] weight: the weight to convert in synapse-row form -//! \param[in] left_shift: the shift to use when decoding +//! \param[in] min_weight: the minimum weight to use in the conversion //! \return the actual input weight for the model static inline input_t synapses_convert_weight_to_input( weight_t weight, REAL min_weight) { - return weight * min_weight; + // Simply doing weight * min_weight adds unnecessary compiler instructions + uint64_t mw = (uint64_t) bitsk(min_weight); + uint64_t w = (uint64_t) (weight); + + return kbits((int_k_t) (mw * w)); } //! \brief Print the weight of a synapse //! \param[in] weight: the weight to print in synapse-row form -//! \param[in] left_shift: the shift to use when decoding +//! \param[in] min_weight: the minimum weight to use in the conversion static inline void synapses_print_weight( weight_t weight, REAL min_weight) { if (weight != 0) { From 7f8098ae5c41624da85b36764afa83de77ea24b3 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 20 Oct 2020 14:32:59 +0100 Subject: [PATCH 079/198] Update to fix STDP examples --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 5 ++--- spynnaker/pyNN/utilities/utility_calls.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 90b83be558..e1bdaeb1c7 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -17,6 +17,8 @@ import numpy import sys +from __future__ import division + from collections import defaultdict from six import itervalues @@ -474,8 +476,6 @@ def _calculate_min_weights( weight_min = connector.get_weight_minimum( synapse_info.weights, self.__weight_random_sigma) - if weight_min != 0: - weight_min = self.__get_closest_weight(weight_min) weight_min *= weight_scale if weight_min != 0: weight_min = float_gcd(min_weights[synapse_type], weight_min) @@ -485,7 +485,6 @@ def _calculate_min_weights( if isinstance(synapse_dynamics, SynapseDynamicsSTDP): min_delta = synapse_dynamics.get_weight_min_delta( self.__max_stdp_spike_delta) - min_delta = self.__get_closest_weight(min_delta) min_delta *= weight_scale # This also depends on the earlier calculated minimum min_delta = float_gcd(min_delta, weight_min) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index b4a0b3bce2..ea225f487b 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -40,7 +40,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.0002 +FLOAT_GCD_TOLERANCE = 0.00001 STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From 04782bc5a433cb87dbcaf5338928987bdaa91949 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 20 Oct 2020 14:38:21 +0100 Subject: [PATCH 080/198] Seriously... --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index e1bdaeb1c7..799c036bd1 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -13,12 +13,12 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from __future__ import division + import struct import numpy import sys -from __future__ import division - from collections import defaultdict from six import itervalues From 156a8e01dc1a256c6813f8e15de21589db447652 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 20 Oct 2020 14:56:09 +0100 Subject: [PATCH 081/198] Another one to stop python 2.7 complaining --- spynnaker/pyNN/utilities/utility_calls.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index ea225f487b..3ae4b355bf 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -16,6 +16,8 @@ """ utility class containing simple helper methods """ +from __future__ import division + import os import logging import math From 4397eefb2ea986280b054f4999d312bea863d30a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 21 Oct 2020 16:44:54 +0100 Subject: [PATCH 082/198] Calculation should happen after checking None / zero --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 799c036bd1..c912081877 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -486,9 +486,9 @@ def _calculate_min_weights( min_delta = synapse_dynamics.get_weight_min_delta( self.__max_stdp_spike_delta) min_delta *= weight_scale - # This also depends on the earlier calculated minimum - min_delta = float_gcd(min_delta, weight_min) if min_delta is not None and min_delta != 0: + # This also depends on the earlier calculated minimum + min_delta = float_gcd(min_delta, weight_min) min_weights[synapse_type] = min( min_weights[synapse_type], min_delta) elif isinstance(synapse_dynamics, SynapseDynamicsStructuralStatic): From 4dc06593e64bc093ba7acae21313a19f50cf43fa Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 15 Jan 2021 10:22:01 +0000 Subject: [PATCH 083/198] flake8 unused import --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 1 - 1 file changed, 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index cd478141ad..d53fa6753c 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -32,7 +32,6 @@ .provenance_data_item import ProvenanceDataItem from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge -from spynnaker.pyNN.models.abstract_models import AbstractMaxSpikes from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased from spynnaker.pyNN.models.neuron.synapse_dynamics import ( SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic) From e182a4271bef4e456a2113d755c4fd5afeb56065 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 15 Jan 2021 10:32:33 +0000 Subject: [PATCH 084/198] missing argument --- .../external_device_lif_control_vertex.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index 0abc485eac..2a999e36df 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -54,7 +54,7 @@ def __init__( pynn_model, translator=None, spikes_per_second=None, label=None, ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, max_stdp_spike_delta=None, incoming_spike_buffer_size=None, - drop_late_spikes=None, constraints=None): + drop_late_spikes=None, constraints=None, splitter=None): """ :param list(AbstractMulticastControllableDevice) devices: The AbstractMulticastControllableDevice instances to be controlled @@ -115,7 +115,7 @@ def __init__( spikes_per_second, ring_buffer_sigma, min_weights, weight_random_sigma, max_stdp_spike_delta, incoming_spike_buffer_size, neuron_impl, pynn_model, - drop_late_spikes) + drop_late_spikes, splitter) def routing_key_partition_atom_mapping(self, routing_info, partition): # pylint: disable=arguments-differ From 2a3915468f3337657113787450995a9fd3e337fe Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 15 Jan 2021 12:03:26 +0000 Subject: [PATCH 085/198] Attempt to bypass python2 issue --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index d53fa6753c..81730ae606 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -483,7 +483,7 @@ def _calculate_min_weights( weight_min = connector.get_weight_minimum( synapse_info.weights, self.__weight_random_sigma) weight_min *= weight_scale - if weight_min != 0: + if weight_min != 0 or not numpy.isnan(weight_min): weight_min = float_gcd(min_weights[synapse_type], weight_min) min_weights[synapse_type] = min( min_weights[synapse_type], weight_min) From 7501669d3675e03c774072c8cb4092e05049135b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 15 Jan 2021 12:13:23 +0000 Subject: [PATCH 086/198] Helps if you actually write what you meant to... and, not or... --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 81730ae606..6636591720 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -483,7 +483,7 @@ def _calculate_min_weights( weight_min = connector.get_weight_minimum( synapse_info.weights, self.__weight_random_sigma) weight_min *= weight_scale - if weight_min != 0 or not numpy.isnan(weight_min): + if weight_min != 0 and not numpy.isnan(weight_min): weight_min = float_gcd(min_weights[synapse_type], weight_min) min_weights[synapse_type] = min( min_weights[synapse_type], weight_min) From 898651eadc7892d1e76cfbbb18c465bbbd1755ae Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 19 Jan 2021 16:25:39 +0000 Subject: [PATCH 087/198] Fix multiplicative STDP rule --- .../weight_multiplicative_impl.c | 6 ++-- .../weight_multiplicative_impl.h | 28 ++++++++++--------- .../weight_dependence_multiplicative.py | 6 ++-- spynnaker/pyNN/utilities/utility_calls.py | 2 +- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c index 5fc610e323..c4dabd1eb9 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c @@ -30,8 +30,8 @@ plasticity_weight_region_data_t *plasticity_weight_region_data; typedef struct { int32_t min_weight; int32_t max_weight; - int32_t a2_plus; - int32_t a2_minus; + REAL a2_plus; + REAL a2_minus; } multiplicative_config_t; //--------------------------------------- @@ -60,7 +60,7 @@ address_t weight_initialise( dtcm_copy[s].a2_plus = config->a2_plus; dtcm_copy[s].a2_minus = config->a2_minus; - log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d", + log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%k, A2-:%k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus); } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h index f31ff0b81d..6393468e6f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h @@ -35,8 +35,8 @@ typedef struct { int32_t min_weight; //!< Minimum weight int32_t max_weight; //!< Maximum weight - int32_t a2_plus; //!< Amount to move weight on potentiation - int32_t a2_minus; //!< Amount to move weight on depression + REAL a2_plus; //!< Amount to move weight on potentiation + REAL a2_minus; //!< Amount to move weight on depression } plasticity_weight_region_data_t; //! The current state data for the rule @@ -83,15 +83,16 @@ static inline weight_state_t weight_get_initial( static inline weight_state_t weight_one_term_apply_depression( weight_state_t state, int32_t depression) { // Calculate scale - // **NOTE** this calculation must be done at runtime-defined weight - // fixed-point format - int32_t scale = maths_fixed_mul16( - state.weight - state.weight_region->min_weight, - state.weight_region->a2_minus, 0); + int32_t scale = mulik((state.weight - state.weight_region->min_weight), + state.weight_region->a2_minus); // Multiply scale by depression and subtract // **NOTE** using standard STDP fixed-point format handles format conversion state.weight -= STDP_FIXED_MUL_16X16(scale, depression); + + log_debug("weight, min_weight (dep), scale, depression %d %d %d %d", + state.weight, state.weight_region->min_weight, scale, depression); + return state; } //--------------------------------------- @@ -102,15 +103,16 @@ static inline weight_state_t weight_one_term_apply_depression( static inline weight_state_t weight_one_term_apply_potentiation( weight_state_t state, int32_t potentiation) { // Calculate scale - // **NOTE** this calculation must be done at runtime-defined weight - // fixed-point format - int32_t scale = maths_fixed_mul16( - state.weight_region->max_weight - state.weight, - state.weight_region->a2_plus, 0); + int32_t scale = mulik((state.weight_region->max_weight - state.weight), + state.weight_region->a2_plus); // Multiply scale by potentiation and add // **NOTE** using standard STDP fixed-point format handles format conversion state.weight += STDP_FIXED_MUL_16X16(scale, potentiation); + + log_debug("weight, max_weight (pot), scale, potentiation: %d %d %d %d", + state.weight, state.weight_region->max_weight, scale, potentiation); + return state; } //--------------------------------------- @@ -120,7 +122,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t new_state) { - log_debug("\tnew_weight:%d\n", new_state.weight); + log_info("\tnew_weight:%d\n", new_state.weight); return (weight_t) new_state.weight; } diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index d369abeb48..d45dc2d784 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -97,10 +97,8 @@ def write_parameters( spec.write_value( data=int(round(self.__w_max * w)), data_type=DataType.INT32) - spec.write_value( - data=int(round(self.A_plus * w)), data_type=DataType.INT32) - spec.write_value( - data=int(round(self.A_minus * w)), data_type=DataType.INT32) + spec.write_value(data=self.A_plus, data_type=DataType.S1615) + spec.write_value(data=self.A_minus, data_type=DataType.S1615) @property def weight_maximum(self): diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index e96c4a0b76..8e5ff7c6ec 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -43,7 +43,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.00001 +FLOAT_GCD_TOLERANCE = 0.0001 STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From d44b9e752f1cf1e6058906ce73364f1301269cb8 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 19 Jan 2021 17:19:26 +0000 Subject: [PATCH 088/198] Try this tolerance instead... ? --- spynnaker/pyNN/utilities/utility_calls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 8e5ff7c6ec..a6922087e9 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -43,7 +43,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.0001 +FLOAT_GCD_TOLERANCE = 0.0002 STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From 87f490047ceed2e386ae681c754c50b2aacdcf42 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 5 Feb 2021 17:09:27 +0000 Subject: [PATCH 089/198] missed these when merging --- .../test_using_virtual_board/test_from_file_connector.py | 8 ++++---- .../test_using_virtual_board/test_from_list_connector.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/unittests/test_using_virtual_board/test_from_file_connector.py b/unittests/test_using_virtual_board/test_from_file_connector.py index d42c861aef..02022bb3c1 100644 --- a/unittests/test_using_virtual_board/test_from_file_connector.py +++ b/unittests/test_using_virtual_board/test_from_file_connector.py @@ -41,13 +41,13 @@ def check_weights( self.assertEqual(from_as[0], source) self.assertEqual(from_as[1], dest) if w_index: - self.assertAlmostEqual(from_as[w_index], weight, 4) + self.assertAlmostEqual(from_as[w_index], weight, 3) else: - self.assertEqual(WEIGHT, weight) + self.assertAlmostEqual(WEIGHT, weight, 3) if d_index: - self.assertAlmostEqual(from_as[d_index], delay, 4) + self.assertAlmostEqual(from_as[d_index], delay, 3) else: - self.assertEqual(DELAY, delay) + self.assertAlmostEqual(DELAY, delay, 3) as_index += 1 while as_index < len(aslist): from_as = aslist[as_index] diff --git a/unittests/test_using_virtual_board/test_from_list_connector.py b/unittests/test_using_virtual_board/test_from_list_connector.py index e0a36993ec..61439fd644 100644 --- a/unittests/test_using_virtual_board/test_from_list_connector.py +++ b/unittests/test_using_virtual_board/test_from_list_connector.py @@ -38,13 +38,13 @@ def check_weights( self.assertEqual(from_as[0], source) self.assertEqual(from_as[1], dest) if w_index: - self.assertAlmostEqual(from_as[w_index], weight, 4) + self.assertAlmostEqual(from_as[w_index], weight, 3) else: - self.assertEqual(WEIGHT, weight) + self.assertAlmostEqual(WEIGHT, weight, 3) if d_index: - self.assertAlmostEqual(from_as[d_index], delay, 4) + self.assertAlmostEqual(from_as[d_index], delay, 3) else: - self.assertEqual(DELAY, delay) + self.assertAlmostEqual(DELAY, delay, 3) as_index += 1 while as_index < len(aslist): from_as = aslist[as_index] From 18d3268a2223ee02a7344941e5af3119478f4ea5 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 1 Mar 2021 09:23:13 +0000 Subject: [PATCH 090/198] flake8 imports --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 11 ++--------- spynnaker/pyNN/utilities/utility_calls.py | 1 + 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 3d3035c3a4..3b2a6ac18a 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -13,32 +13,25 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import struct -import math import sys - import numpy -from scipy import special # @UnresolvedImport from collections import defaultdict from spinn_utilities.progress_bar import ProgressBar from data_specification.enums import DataType from spinn_front_end_common.utilities.helpful_functions import read_config -from spinn_front_end_common.utilities.constants import ( - BYTES_PER_WORD, MICRO_TO_SECOND_CONVERSION) +from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spinn_front_end_common.utilities.utility_objs\ .provenance_data_item import ProvenanceDataItem from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased from spynnaker.pyNN.models.neuron.synapse_dynamics import ( SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic) -from spynnaker.pyNN.utilities.constants import ( - POPULATION_BASED_REGIONS, POSSION_SIGMA_SUMMATION_LIMIT) +from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS from spynnaker.pyNN.utilities.utility_calls import get_n_bits, float_gcd from spynnaker.pyNN.exceptions import SynapticConfigurationException -from spynnaker.pyNN.utilities.running_stats import RunningStats from .synapse_dynamics import ( AbstractSynapseDynamics, AbstractSynapseDynamicsStructural) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 8656912a20..2eb8a85882 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -380,6 +380,7 @@ def float_gcd_of_array(input): return gcd + def moved_in_v6(old_location, new_location): """ Warns the users that they are using an old import. From cb2a3cedc37d4784fb3d4670287d9bcfc416c47c Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 1 Mar 2021 14:17:30 +0000 Subject: [PATCH 091/198] Further tidying up --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 3b2a6ac18a..b5f809a32c 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -15,7 +15,6 @@ import sys import numpy - from collections import defaultdict from spinn_utilities.progress_bar import ProgressBar @@ -25,6 +24,7 @@ from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spinn_front_end_common.utilities.utility_objs\ .provenance_data_item import ProvenanceDataItem + from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased from spynnaker.pyNN.models.neuron.synapse_dynamics import ( From 68fd70de57004dffdfcd782314fd2e8739631262 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 13 Apr 2021 11:33:16 +0100 Subject: [PATCH 092/198] Don't need to check weight if using distance-dependent; fix test --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 15 ++++++++------- .../test_distance_dependent_weights_and_delays.py | 2 +- 2 files changed, 9 insertions(+), 8 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 7c0414061f..75635187e1 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -530,15 +530,16 @@ def __check_weights( weights = synapse_info.weights synapse_type = synapse_info.synapse_type min_weight = min_weights[synapse_type] - if numpy.isscalar(weights): - self.__check_weight( - min_weight, weights, weight_scale, - app_edge, synapse_info) - elif hasattr(weights, "__getitem__"): - for w in weights: + if not isinstance(weights, str): + if numpy.isscalar(weights): self.__check_weight( - min_weight, w, weight_scale, + min_weight, weights, weight_scale, app_edge, synapse_info) + elif hasattr(weights, "__getitem__"): + for w in weights: + self.__check_weight( + min_weight, w, weight_scale, + app_edge, synapse_info) def __check_weight( self, min_weight, weight, weight_scale, app_edge, diff --git a/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py b/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py index a5ea731607..c6efd5c159 100644 --- a/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py +++ b/spynnaker_integration_tests/test_grid_based_connectors/test_distance_dependent_weights_and_delays.py @@ -119,7 +119,7 @@ def check_exc_weights(self, exc_weights_delays): def check_inh_weights(self, inh_weights_delays): for conn in inh_weights_delays: # weights are constant - self.assertEqual(1.5, conn[2]) + self.assertAlmostEqual(1.5, conn[2], places=3) source_pos = self.POSITIONS[conn[0]] target_pos = self.POSITIONS[conn[1]] dist = math.sqrt((source_pos[0]-target_pos[0])**2 + From 3aa4ef50b253d354aa658de35b13bdcd40cff663 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 29 Apr 2021 17:27:05 +0100 Subject: [PATCH 093/198] Missed a removed config --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index a4c8d06608..168ae3582d 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -162,7 +162,7 @@ def __init__( self.__synapse_manager = SynapticManager( self.__neuron_impl.get_n_synapse_types(), ring_buffer_sigma, spikes_per_second, min_weights, weight_random_sigma, - max_stdp_spike_delta, config, drop_late_spikes) + max_stdp_spike_delta, drop_late_spikes) # bool for if state has changed. self.__change_requires_mapping = True From 03c2d340fcea3f5e831ef99defca1396ad839be9 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 29 Apr 2021 17:31:32 +0100 Subject: [PATCH 094/198] Actually import function --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 4543ce545e..6a27676449 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -21,7 +21,7 @@ from data_specification.enums import DataType from spinn_utilities.config_holder import ( - get_config_float, get_config_int, get_config_bool) + get_config_float, get_config_int, get_config_bool, get_config_str_list) from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spinn_front_end_common.utilities.utility_objs\ .provenance_data_item import ProvenanceDataItem From f16aee3fc7a1da0db7c1470407ff4e018f2f5c79 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 29 Apr 2021 17:42:48 +0100 Subject: [PATCH 095/198] Use get_config_str instead? --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 6a27676449..6b8efcf929 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -21,7 +21,7 @@ from data_specification.enums import DataType from spinn_utilities.config_holder import ( - get_config_float, get_config_int, get_config_bool, get_config_str_list) + get_config_float, get_config_int, get_config_bool, get_config_str) from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spinn_front_end_common.utilities.utility_objs\ .provenance_data_item import ProvenanceDataItem @@ -174,8 +174,7 @@ def __init__( # meaning "auto calculate"; the number of weights needs to match # the number of synapse types if self.__min_weights is None: - config_min_weights = get_config_str_list( - "Simulation", "min_weights") + config_min_weights = get_config_str("Simulation", "min_weights") if config_min_weights is not None: self.__min_weights = [float(v) for v in config_min_weights.split(',')] From d57a79f53646f58d871946b0ec13544dcc76bd9e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 29 Apr 2021 17:48:44 +0100 Subject: [PATCH 096/198] Switch to get_config_float for these parameters --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 6b8efcf929..b763dea150 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -189,10 +189,10 @@ def __init__( # Read the other minimum weight configuration parameters if self.__weight_random_sigma is None: - self.__weight_random_sigma = config.getfloat( + self.__weight_random_sigma = get_config_float( "Simulation", "weight_random_sigma") if self.__max_stdp_spike_delta is None: - self.__max_stdp_spike_delta = config.getfloat( + self.__max_stdp_spike_delta = get_config_float( "Simulation", "max_stdp_spike_delta") # Get drop_late_spikes from config if not set From 40e36105957f2087771ffb175ca22543d539fe73 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 10 Jun 2021 09:20:30 +0100 Subject: [PATCH 097/198] Unneeded argument --- .../neural_projections/connectors/from_list_connector.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py index 75750ef5f7..9a5744edb7 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py @@ -87,9 +87,8 @@ def __init__(self, conn_list, safe=True, verbose=False, column_names=None, self.conn_list = conn_list @overrides(AbstractConnector.set_projection_information) - def set_projection_information(self, machine_time_step, synapse_info): - AbstractConnector.set_projection_information( - self, machine_time_step, synapse_info) + def set_projection_information(self, synapse_info): + AbstractConnector.set_projection_information(self, synapse_info) # now we want to tell the synapse_info about weights and delays if self.__weights is not None: synapse_info.weights = self.__weights.flatten() From e641aef2607d7cc9e4cdfcf51e569c378b1c81cb Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 10 Jun 2021 09:43:54 +0100 Subject: [PATCH 098/198] Argument left in here too somehow; removed --- .../models/neural_projections/connectors/kernel_connector.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py index d110819ad5..e22faec763 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/kernel_connector.py @@ -310,9 +310,8 @@ def __compute_statistics( numpy.array(all_delays), numpy.array(all_weights)) @overrides(AbstractConnector.set_projection_information) - def set_projection_information(self, machine_time_step, synapse_info): - AbstractConnector.set_projection_information( - self, machine_time_step, synapse_info) + def set_projection_information(self, synapse_info): + AbstractConnector.set_projection_information(self, synapse_info) # now we want to tell the synapse_info about weights and delays if self._krn_weights is not None: synapse_info.weights = self._krn_weights.flatten() From 9c6d00a79392d035fe8dc86f0c0d9892868d4a19 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 10 Jun 2021 09:51:04 +0100 Subject: [PATCH 099/198] flake8 unused imports; replace get_simulator --- .../timing_dependence_pfister_spike_triplet.py | 2 +- .../timing_dependence/timing_dependence_spike_nearest_pair.py | 2 +- .../stdp/timing_dependence/timing_dependence_spike_pair.py | 2 +- .../stdp/timing_dependence/timing_dependence_vogels_2011.py | 2 +- spynnaker/pyNN/models/neuron/synaptic_manager.py | 2 -- .../test_struct_pl/test_structural_shared.py | 2 +- 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py index 66b2491a79..3fc7ac6898 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py @@ -193,7 +193,7 @@ def get_parameter_names(self): @overrides(AbstractTimingDependence.minimum_delta) def minimum_delta(self, max_stdp_spike_delta): - ts = get_simulator().machine_time_step / 1000.0 + ts = machine_time_step_ms() # The minimums for potentiation min_decayed_r1 = get_min_lut_value(self.__tau_plus_data) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py index bf5fd883a5..d839974187 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_nearest_pair.py @@ -157,7 +157,7 @@ def get_parameter_names(self): @overrides(AbstractTimingDependence.minimum_delta) def minimum_delta(self, max_stdp_spike_delta): - ts = get_simulator().machine_time_step / 1000.0 + ts = machine_time_step_ms() return [ get_min_lut_value(self.__tau_plus_data, ts, max_stdp_spike_delta), get_min_lut_value(self.__tau_minus_data, ts, max_stdp_spike_delta)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py index 00e404aa1c..c08dedb6ee 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py @@ -157,7 +157,7 @@ def get_parameter_names(self): @overrides(AbstractTimingDependence.minimum_delta) def minimum_delta(self, max_stdp_spike_delta): - ts = get_simulator().machine_time_step / 1000.0 + ts = machine_time_step_ms() return [ get_min_lut_value(self.__tau_plus_data, ts, max_stdp_spike_delta), get_min_lut_value(self.__tau_minus_data, ts, max_stdp_spike_delta)] diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py index 11a032d207..1b3af739a6 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py @@ -160,6 +160,6 @@ def get_parameter_names(self): @overrides(AbstractTimingDependence.minimum_delta) def minimum_delta(self, max_stdp_spike_delta): - ts = get_simulator().machine_time_step / 1000.0 + ts = machine_time_step_ms() min_tau = get_min_lut_value(self.__tau_data, ts, max_stdp_spike_delta) return [min_tau - self.__alpha, min_tau] diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 56e5dd062e..2878f408ea 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -25,8 +25,6 @@ from spinn_front_end_common.utilities.constants import BYTES_PER_WORD from spinn_front_end_common.utilities.utility_objs\ .provenance_data_item import ProvenanceDataItem -from spinn_front_end_common.utilities.globals_variables import ( - machine_time_step) from spynnaker.pyNN.models.neural_projections import ProjectionApplicationEdge from spynnaker.pyNN.models.neuron.synapse_io import SynapseIORowBased from spynnaker.pyNN.models.neuron.synapse_dynamics import ( diff --git a/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py b/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py index 866ca892d5..1f1fb9705c 100644 --- a/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py +++ b/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py @@ -119,4 +119,4 @@ def test_structural_shared(self): if __name__ == "__main__": - structural_shared() \ No newline at end of file + structural_shared() From 6f4f681622c28ca0dfdbf98edbafad8ab2e93c22 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 9 Jul 2021 10:54:32 +0100 Subject: [PATCH 100/198] Use correct arguments in create_vertex --- .../model_tests/neuron/test_synaptic_manager.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index ea4cef540a..6d5bfc0c93 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -272,7 +272,8 @@ def test_set_synapse_dynamics(): post_app_model = IFCurrExpBase() post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) static = SynapseDynamicsStatic() @@ -365,7 +366,8 @@ def test_set_synapse_dynamics(): # Try starting again to get a couple more combinations post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) # STDP followed by structural STDP should result in Structural STDP @@ -387,7 +389,8 @@ def test_set_synapse_dynamics(): # One more time! post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) # Static followed by static structural should result in static @@ -424,7 +427,8 @@ def test_set_synapse_dynamics(): # OK, just one more, honest post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, - ring_buffer_sigma=None, incoming_spike_buffer_size=None, + ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None, incoming_spike_buffer_size=None, n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) post_app_vertex.synapse_dynamics = static_struct post_app_vertex.synapse_dynamics = stdp_struct From d00063f820bf1b45920d7565dee1da788a1ee6ed Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 9 Jul 2021 11:04:53 +0100 Subject: [PATCH 101/198] doc fix --- .../pyNN/models/neuron/abstract_population_vertex.py | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index f6c468e9df..11830a5b70 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -942,11 +942,6 @@ def get_weight_scales(self, min_weights): [(1 / w) * weight_scale if w != 0 else 0 for w in min_weights]) return self.__weight_scales - # def reset_min_weights(self): - # if self.__min_weights_auto: - # self.__min_weights = None - # self.__weight_scales = None - @overrides(AbstractAcceptsIncomingSynapses.get_connections_from_machine) def get_connections_from_machine( self, transceiver, placements, app_edge, synapse_info): @@ -1291,9 +1286,7 @@ def incoming_projections(self): def get_local_provenance_data(self): """ Get provenance data for weights - :param str label: The label of the vertex - :param list synapse_names: List of the names of the synapses - :return: A list of provenance items + :rtype: list(~ProvenanceDataItem) """ prov_items = list() synapse_names = list(self.__neuron_impl.get_synapse_targets()) From dcfa1184ca79c4d0e550089676e2bf5c1e16445c Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 9 Jul 2021 11:28:06 +0100 Subject: [PATCH 102/198] try this doc fix instead --- .../pyNN/models/neuron/abstract_population_vertex.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 11830a5b70..bde8a11702 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1174,7 +1174,6 @@ def get_neuron_variable_sdram(self, vertex_slice): return self.__neuron_recorder.get_variable_sdram_usage(vertex_slice) def get_synapse_variable_sdram(self, vertex_slice): - """ Get the amount of SDRAM per timestep used by synapse parts :param ~pacman.model.graphs.common.Slice vertex_slice: @@ -1189,7 +1188,6 @@ def get_synapse_variable_sdram(self, vertex_slice): return self.__synapse_recorder.get_variable_sdram_usage(vertex_slice) def get_neuron_constant_sdram(self, vertex_slice, neuron_regions): - """ Get the amount of fixed SDRAM used by neuron parts :param ~pacman.model.graphs.common.Slice vertex_slice: @@ -1284,9 +1282,11 @@ def incoming_projections(self): @overrides(AbstractProvidesLocalProvenanceData.get_local_provenance_data) def get_local_provenance_data(self): - """ Get provenance data for weights + """ Get provenance data items relating to weight representations - :rtype: list(~ProvenanceDataItem) + :return: the provenance items + :rtype: + iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ prov_items = list() synapse_names = list(self.__neuron_impl.get_synapse_targets()) From 3728d61d38b23767b6a956f08eda29fa8b69ac93 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 9 Jul 2021 11:53:50 +0100 Subject: [PATCH 103/198] It might be these docs that are wrong instead --- .../neuron/abstract_population_vertex.py | 25 +++++++++++++++---- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index bde8a11702..c24d371972 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -818,6 +818,7 @@ def reset_to_first_timestep(self): def __get_closest_weight(self, value): """ Get the best representation of the weight so that both weight and 1 / w work + :param float value: value to get the closest weight of """ if abs(value) < 1.0: @@ -825,10 +826,14 @@ def __get_closest_weight(self, value): return 1 / ( DataType.S1615.closest_representable_value_above(1 / value)) - def _calculate_min_weights(self, incoming_projections): + def __calculate_min_weights(self, incoming_projections): """ Calculate the minimum weights required to best represent all the - possible weights coming into the specified machine vertex - :param list(~.Projection) incoming_projections: a list of incoming proj + possible weights coming into this vertex + + :param list(~.Projection) incoming_projections: incoming proj to vertex + + :return: list of minimum weights + :rtype: list(float) """ # Initialise to a maximum value min_weights = [sys.maxsize for _ in range( @@ -888,6 +893,7 @@ def __check_weights( self, min_weights, weight_scale, incoming_projections): """ Warn the user about weights that can't be represented properly where possible + :param ~numpy.ndarray min_weights: Minimum weights per synapse type :param float weight_scale: The weight_scale from the synapse input_type :param list(~.Projection) incoming_projections: A list of incoming proj @@ -911,6 +917,7 @@ def __check_weight( synapse_info): """ Warn the user about a weight that can't be represented properly where possible + :param float min_weight: Minimum weight value :param float weight: weight value being checked :param float weight_scale: The weight_scale from the synapse input_type @@ -925,8 +932,16 @@ def __check_weight( (projection, synapse_info)) def get_min_weights(self, incoming_projections): + """ Calculate the minimum weights required to best represent all the + possible weights coming into this vertex + + :param list(~.Projection) incoming_projections: incoming proj to vertex + + :return: list of minimum weights + :rtype: list(float) + """ if self.__min_weights is None: - self.__min_weights = self._calculate_min_weights( + self.__min_weights = self.__calculate_min_weights( incoming_projections) return self.__min_weights @@ -1284,7 +1299,7 @@ def incoming_projections(self): def get_local_provenance_data(self): """ Get provenance data items relating to weight representations - :return: the provenance items + :return: a list of the provenance data items :rtype: iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) """ From 4cd4d9f0e8974f140bc80f5354a34af33d415908 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 9 Jul 2021 12:01:58 +0100 Subject: [PATCH 104/198] I guess this doc isn't actually necessary... --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index c24d371972..e17e4f9be3 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1297,14 +1297,9 @@ def incoming_projections(self): @overrides(AbstractProvidesLocalProvenanceData.get_local_provenance_data) def get_local_provenance_data(self): - """ Get provenance data items relating to weight representations - - :return: a list of the provenance data items - :rtype: - iterable(~spinn_front_end_common.utilities.utility_objs.ProvenanceDataItem) - """ prov_items = list() synapse_names = list(self.__neuron_impl.get_synapse_targets()) + # Record the min weight used for each synapse type for i, weight in enumerate(self.__min_weights): prov_items.append(ProvenanceDataItem( From 1f5f197ebe6c2de0db42db12039773e600d0381e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 29 Jul 2021 17:20:36 +0100 Subject: [PATCH 105/198] These tolerances / constants seem to strike the right balance for now --- spynnaker/pyNN/spynnaker.cfg | 2 +- spynnaker/pyNN/utilities/utility_calls.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index 287970692e..90b4c1cec0 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -39,7 +39,7 @@ min_weights = None # Expected maximum time in ms between spikes for STDP. This is used in the # minimum weight calculation. It is ignored if the minimum weights are # specified. -max_stdp_spike_delta = 10 +max_stdp_spike_delta = 20 # Number of standard deviations from the mean to account for in the calculation # of the minimum weight when a random weight is specified diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 6a9cb3b7d2..270a09401b 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -45,7 +45,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.00005 +FLOAT_GCD_TOLERANCE = 0.0002 STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From 6a579ca864b6b9e44c2c3809a33061aeb043406a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 29 Jul 2021 18:01:18 +0100 Subject: [PATCH 106/198] Reset min_weights after connection cache is cleared --- .../pyNN/models/neuron/abstract_population_vertex.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index e17e4f9be3..67ad90bae6 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -696,7 +696,13 @@ def clear_connection_cache(self): for post_vertex in self.machine_vertices: if isinstance(post_vertex, HasSynapses): post_vertex.clear_connection_cache() - # post_vertex._app_vertex.reset_min_weights() + post_vertex._app_vertex.reset_min_weights() + + def reset_min_weights(self): + """ Reset min_weights if set to auto-calculate + """ + if self.__min_weights_auto: + self.__min_weights = None @overrides(AbstractProvidesOutgoingPartitionConstraints. get_outgoing_partition_constraints) From 27736f8edd3134e9b7b3193ada8dc9918c6f8d88 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 30 Jul 2021 10:49:40 +0100 Subject: [PATCH 107/198] Only reset min_weights if requried to do so --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 67ad90bae6..98fb1b931c 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -696,9 +696,10 @@ def clear_connection_cache(self): for post_vertex in self.machine_vertices: if isinstance(post_vertex, HasSynapses): post_vertex.clear_connection_cache() - post_vertex._app_vertex.reset_min_weights() + if self.__change_requires_mapping: + self.__reset_min_weights() - def reset_min_weights(self): + def __reset_min_weights(self): """ Reset min_weights if set to auto-calculate """ if self.__min_weights_auto: From 1421414d2a0d1d218594d82a5c68d6764e2b1199 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 11 Oct 2021 16:34:58 +0100 Subject: [PATCH 108/198] Do the minimum weight calculation for RandomDistributions better --- .../neural_projections/connectors/abstract_connector.py | 7 ++++++- .../pyNN/models/neuron/abstract_population_vertex.py | 9 +++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index d12790f9ea..9706e6d835 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -335,16 +335,21 @@ def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): if isinstance(weights, RandomDistribution): mean_weight = utility_calls.get_mean(weights) weight_sd = math.sqrt(utility_calls.get_variance(weights)) - min_weight = mean_weight - (weight_sd * weight_random_sigma) if mean_weight < 0: + min_weight = mean_weight + (weight_sd * weight_random_sigma) + if min_weight > 0: + min_weight = -min_weight high = utility_calls.high(weights) if high is None: return abs(min_weight) return abs(max(min_weight, high)) else: + min_weight = mean_weight - (weight_sd * weight_random_sigma) low = utility_calls.low(weights) if low is None: return abs(min_weight) + if min_weight < 0: + min_weight = abs(min_weight) return abs(min(min_weight, low)) elif isinstance(weights, str): diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 98fb1b931c..22e3dca90f 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -857,9 +857,14 @@ def __calculate_min_weights(self, incoming_projections): weight_min = connector.get_weight_minimum( synapse_info.weights, self.__weight_random_sigma, synapse_info) + + if weight_min == 0: + weight_min = DataType.S1615.decode_from_int(1) weight_min *= weight_scale - if weight_min != 0 and not numpy.isnan(weight_min): - weight_min = float_gcd(min_weights[synapse_type], weight_min) + if not numpy.isnan(weight_min): + if min_weights[synapse_type] != sys.maxsize: + weight_min = float_gcd( + min_weights[synapse_type], weight_min) min_weights[synapse_type] = min( min_weights[synapse_type], weight_min) From 130f2b87fd72ea2d19c3a2a7e8f364cf6276d42e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 12 Oct 2021 12:02:44 +0100 Subject: [PATCH 109/198] Use the maximum weight if necessary in the minimum weight calculation --- .../neuron/abstract_population_vertex.py | 19 +++++++++++++++++-- .../abstract_weight_dependence.py | 8 ++++++++ .../weight_dependence_additive.py | 9 +++++++++ .../weight_dependence_additive_triplet.py | 5 +++++ .../weight_dependence_multiplicative.py | 9 +++++++++ .../abstract_synapse_dynamics.py | 11 +++++++++++ .../synapse_dynamics/synapse_dynamics_stdp.py | 8 ++++++++ .../synapse_dynamics_structural_static.py | 6 ++++++ .../synapse_dynamics_structural_stdp.py | 6 ++++++ spynnaker/pyNN/spynnaker.cfg | 2 +- 10 files changed, 80 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 22e3dca90f..99451cb55f 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -855,8 +855,8 @@ def __calculate_min_weights(self, incoming_projections): connector = synapse_info.connector synapse_dynamics = synapse_info.synapse_dynamics - weight_min = connector.get_weight_minimum( - synapse_info.weights, self.__weight_random_sigma, synapse_info) + weight_min = synapse_dynamics.get_weight_minimum( + connector, self.__weight_random_sigma, synapse_info) if weight_min == 0: weight_min = DataType.S1615.decode_from_int(1) @@ -897,6 +897,21 @@ def __calculate_min_weights(self, incoming_projections): min_weights = [m if m > 0 else DataType.S1615.decode_from_int(1) for m in min_weights] + # Now check that the maximum weight isn't too big + for proj in incoming_projections: + synapse_info = proj._synapse_information + synapse_type = synapse_info.synapse_type + connector = synapse_info.connector + synapse_dynamics = synapse_info.synapse_dynamics + + weight_max = synapse_dynamics.get_weight_maximum( + connector, synapse_info) + + weight_scale_limit = float(DataType.S1615.scale) + if weight_scale_limit * min_weights[synapse_type] < weight_max: + max_weight = self.__get_closest_weight(weight_max) + min_weights[synapse_type] = max_weight / weight_scale_limit + self.__check_weights(min_weights, weight_scale, incoming_projections) return min_weights diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py index f808929802..9f0e696cd0 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/abstract_weight_dependence.py @@ -82,6 +82,14 @@ def weight_maximum(self): :rtype: float """ + @abstractproperty + def weight_minimum(self): + """ The minimum weight that will ever be set in a synapse as a result\ + of this rule + + :rtype: float + """ + @abstractmethod def weight_change_minimum(self, min_delta): """ The minimum non-zero change in weight that will occur diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py index ffcb4234ca..f78a4cf125 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py @@ -115,6 +115,15 @@ def weight_maximum(self): """ return self.__w_max + @property + def weight_minimum(self): + """ The minimum weight that will ever be set in a synapse as a result\ + of this rule + + :rtype: float + """ + return self.__w_min + @overrides(AbstractWeightDependence.weight_change_minimum) def weight_change_minimum(self, min_delta): pot, dep = min_delta diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py index af2b86b98f..21eb84062b 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py @@ -149,6 +149,11 @@ def write_parameters( def weight_maximum(self): return self.__w_max + @property + @overrides(AbstractWeightDependence.weight_minimum) + def weight_minimum(self): + return self.__w_min + @overrides(AbstractWeightDependence.weight_change_minimum) def weight_change_minimum(self, min_delta): pot, dep = min_delta diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py index dd793227fc..2fab98a2e2 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py @@ -110,6 +110,15 @@ def weight_maximum(self): """ return self.__w_max + @property + def weight_minimum(self): + """ The minimum weight that will ever be set in a synapse as a result\ + of this rule + + :rtype: float + """ + return self.__w_min + @overrides(AbstractWeightDependence.weight_change_minimum) def weight_change_minimum(self, min_delta): pot, dep = min_delta diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 7f0790583a..925cb0fa38 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -179,6 +179,17 @@ def get_weight_maximum(self, connector, synapse_info): # pylint: disable=too-many-arguments return connector.get_weight_maximum(synapse_info) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + """ Get the minimum weight for the synapses + + :param AbstractConnector connector: + :param float weight_random_sigma: + :param SynapseInformation synapse_info: + """ + # pylint: disable=too-many-arguments + return connector.get_weight_minimum( + synapse_info.weights, weight_random_sigma, synapse_info) + def get_weight_variance(self, connector, weights, synapse_info): """ Get the variance in weight for the synapses diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 7524e92fa1..e6339ff21c 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -474,6 +474,14 @@ def get_weight_maximum(self, connector, synapse_info): # the weight dependence return max(w_max, self.__weight_dependence.weight_maximum) + @overrides(AbstractPlasticSynapseDynamics.get_weight_minimum) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + w_min = super().get_weight_minimum( + connector, weight_random_sigma, synapse_info) + # The minimum weight is the largest that it could be set to from + # the weight dependence + return min(w_min, self.__weight_dependence.weight_minimum) + @overrides(AbstractSynapseDynamics.get_provenance_data) def get_provenance_data(self, pre_population_label, post_population_label): yield from self.__timing_dependence.get_provenance_data( diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py index 45507bc475..c041cf149a 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py @@ -267,6 +267,12 @@ def get_weight_maximum(self, connector, synapse_info): w_m = super().get_weight_maximum(connector, synapse_info) return max(w_m, self.__initial_weight) + @overrides(SynapseDynamicsStatic.get_weight_minimum) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + w_min = super().get_weight_minimum( + connector, weight_random_sigma, synapse_info) + return min(w_min, self.__initial_weight) + @overrides(SynapseDynamicsStatic.get_delay_maximum) def get_delay_maximum(self, connector, synapse_info): d_m = super().get_delay_maximum(connector, synapse_info) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py index 814e96b5b9..c678638de2 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_stdp.py @@ -267,6 +267,12 @@ def get_weight_maximum(self, connector, synapse_info): w_max = super().get_weight_maximum(connector, synapse_info) return max(w_max, self.__initial_weight) + @overrides(SynapseDynamicsSTDP.get_weight_minimum) + def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): + w_min = super().get_weight_minimum( + connector, weight_random_sigma, synapse_info) + return min(w_min, self.__initial_weight) + @overrides(SynapseDynamicsSTDP.get_delay_maximum) def get_delay_maximum(self, connector, synapse_info): d_m = super().get_delay_maximum(connector, synapse_info) diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index 90b4c1cec0..70e677e453 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -39,7 +39,7 @@ min_weights = None # Expected maximum time in ms between spikes for STDP. This is used in the # minimum weight calculation. It is ignored if the minimum weights are # specified. -max_stdp_spike_delta = 20 +max_stdp_spike_delta = 50 # Number of standard deviations from the mean to account for in the calculation # of the minimum weight when a random weight is specified From f4d2bcdbb1cde67c4ff095d39702cb565d545922 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 18 Oct 2021 10:41:14 +0100 Subject: [PATCH 110/198] Better choice of tolerance for microcircuit that still works with STDP --- spynnaker/pyNN/utilities/utility_calls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index 270a09401b..c70b34d44d 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -45,7 +45,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.0002 +FLOAT_GCD_TOLERANCE = 0.00001 STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From 587b489f340560757aac515a28edbf41a8d5f6e7 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 18 Oct 2021 12:57:40 +0100 Subject: [PATCH 111/198] Tolerance shouldn't be lower than smallest representable value --- spynnaker/pyNN/utilities/utility_calls.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index c70b34d44d..307e9e58d1 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -24,6 +24,7 @@ from scipy.stats import binom from spinn_utilities.log import FormatAdapter from spinn_utilities.safe_eval import SafeEval +from data_specification.enums.data_type import DataType from spinn_front_end_common.utilities.exceptions import ConfigurationException from spynnaker.pyNN.utilities.random_stats import ( RandomStatsExponentialImpl, RandomStatsGammaImpl, RandomStatsLogNormalImpl, @@ -45,7 +46,7 @@ ARBITRARY_Y = 13031301 MARS_C_MAX = 698769068 -FLOAT_GCD_TOLERANCE = 0.00001 +FLOAT_GCD_TOLERANCE = DataType.S1615.decode_from_int(1) STATS_BY_NAME = { 'binomial': RandomStatsBinomialImpl(), From b8a6581b634d4b39a69ebcc06814404768e1e0b7 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 28 Oct 2021 10:34:29 +0100 Subject: [PATCH 112/198] Remove print statement --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 1 - 1 file changed, 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 640322a78d..8fc1e5a88b 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1326,7 +1326,6 @@ def get_local_provenance_data(self): with ProvenanceWriter() as db: for i, weight in enumerate(self.__min_weights): - print("i, weight ", self.label, synapse_names[i], weight) db.insert_app_vertex( self.label, synapse_names[i], "min_weight", From fa1e9096e1f22e95737a88f9506a92ad00750593 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 10 Nov 2021 13:32:56 +0000 Subject: [PATCH 113/198] Fix STDP weight rules; works for current-based neurons... --- .../src/neuron/c_main_synapse_common.h | 2 +- neural_modelling/src/neuron/neuron.c | 2 +- .../stdp/synapse_dynamics_stdp_common.h | 6 ++- ...dynamics_stdp_izhikevich_neuromodulation.c | 37 +++++++++++-------- .../stdp/synapse_dynamics_stdp_mad_impl.c | 6 ++- .../stdp/weight_dependence/weight.h | 3 +- .../weight_additive_one_term_impl.c | 18 +++++++-- .../weight_additive_one_term_impl.h | 12 +++++- .../weight_additive_two_term_impl.c | 18 +++++++-- .../weight_additive_two_term_impl.h | 11 +++++- .../weight_multiplicative_impl.c | 18 +++++++-- .../weight_multiplicative_impl.h | 13 +++++-- .../src/neuron/plasticity/synapse_dynamics.h | 3 +- .../plasticity/synapse_dynamics_static_impl.c | 2 +- neural_modelling/src/neuron/synapses.c | 9 ++++- .../neuron/abstract_population_vertex.py | 1 + .../test_STDP_nearest_pair_multiplicative.py | 4 +- .../test_STDP_pair_multiplicative.py | 2 +- 18 files changed, 123 insertions(+), 44 deletions(-) diff --git a/neural_modelling/src/neuron/c_main_synapse_common.h b/neural_modelling/src/neuron/c_main_synapse_common.h index 0803f5aac8..f2f745e0c6 100644 --- a/neural_modelling/src/neuron/c_main_synapse_common.h +++ b/neural_modelling/src/neuron/c_main_synapse_common.h @@ -129,7 +129,7 @@ static inline bool initialise_synapse_regions( // Set up the synapse dynamics if (!synapse_dynamics_initialise( data_specification_get_region(regions.synapse_dynamics, ds_regions), - n_neurons, n_synapse_types)) { + n_neurons, n_synapse_types, min_weights)) { return false; } diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index 962fc88a47..1026ed0c34 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -120,7 +120,7 @@ bool neuron_initialise( n_synapse_types = params->n_synapse_types; // Set up ring buffer left shifts - uint32_t min_weights_bytes = n_synapse_types * sizeof(uint32_t); + uint32_t min_weights_bytes = n_synapse_types * sizeof(REAL); min_weights = spin1_malloc(min_weights_bytes); if (min_weights == NULL) { log_error("Not enough memory to allocate min_weights"); diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h index 07f256594e..1cc864ac01 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h @@ -112,7 +112,8 @@ static uint32_t plastic_saturation_count = 0; #endif static inline bool synapse_dynamics_stdp_init( - address_t *address, stdp_params *params, uint32_t n_synapse_types) { + address_t *address, stdp_params *params, uint32_t n_synapse_types, + REAL *min_weights) { // Load parameters stdp_params *sdram_params = (stdp_params *) *address; @@ -126,7 +127,8 @@ static inline bool synapse_dynamics_stdp_init( } // Load weight dependence data - address_t weight_result = weight_initialise(weight_region_address, n_synapse_types); + address_t weight_result = weight_initialise( + weight_region_address, n_synapse_types, min_weights); if (weight_result == NULL) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c index baab421035..beba4af3a1 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c @@ -34,6 +34,7 @@ typedef struct neuromodulated_synapse_t { typedef struct nm_update_state_t { accum weight; + REAL min_weight; update_state_t eligibility_state; } nm_update_state_t; @@ -74,7 +75,7 @@ static int16_lut *tau_c_lookup; static int16_lut *tau_d_lookup; -//static uint32_t *nm_weight_shift; +static REAL *nm_min_weight; #define DECAY_LOOKUP_TAU_C(time) \ maths_lut_exponential_decay(time, tau_c_lookup) @@ -83,9 +84,13 @@ static int16_lut *tau_d_lookup; static inline nm_update_state_t get_nm_update_state( neuromodulated_synapse_t synapse, index_t synapse_type) { - accum s1615_weight = kbits(synapse.weight); + uint64_t mw = (uint64_t) bitsk(nm_min_weight[synapse_type]); + uint64_t w = (uint64_t) (synapse.weight); + + accum s1615_weight = kbits((int_k_t) mw * w); nm_update_state_t update_state = { .weight=s1615_weight, + .min_weight=nm_min_weight[synapse_type], .eligibility_state=synapse_structure_get_update_state( synapse.eligibility_synapse, synapse_type) }; @@ -99,7 +104,7 @@ static inline nm_final_state_t get_nm_final_state( update_state.weight = kbits(MIN(bitsk(update_state.weight), bitsk(nm_params.max_weight))); nm_final_state_t final_state = { - .weight=(weight_t) (bitsk(update_state.weight)), + .weight=(weight_t) (bitsk(update_state.weight) / bitsk(update_state.min_weight)), .final_state=synapse_structure_get_final_state( update_state.eligibility_state) }; @@ -271,9 +276,11 @@ static inline nm_final_state_t izhikevich_neuromodulation_plasticity_update_syna } bool synapse_dynamics_initialise( - address_t address, uint32_t n_neurons, uint32_t n_synapse_types) { + address_t address, uint32_t n_neurons, uint32_t n_synapse_types, + REAL *min_weights) { - if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types)) { + if (!synapse_dynamics_stdp_init( + &address, ¶ms, n_synapse_types, min_weights)) { return false; } @@ -295,16 +302,16 @@ bool synapse_dynamics_initialise( tau_c_lookup = maths_copy_int16_lut(&lut_address); tau_d_lookup = maths_copy_int16_lut(&lut_address); -// // Store weight shifts -// nm_weight_shift = spin1_malloc(sizeof(uint32_t) * n_synapse_types); -// if (nm_weight_shift == NULL) { -// log_error("Could not initialise weight region data"); -// return NULL; -// } -// for (uint32_t s = 0; s < n_synapse_types; s++) { -// nm_weight_shift[s] = ring_buffer_to_input_buffer_left_shifts[s]; -// log_info("Weight shift %u = %u", s, nm_weight_shift[s]); -// } + // Store min weights + nm_min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (nm_min_weight == NULL) { + log_error("Could not initialise min weight region data"); + return NULL; + } + for (uint32_t s = 0; s < n_synapse_types; s++) { + nm_min_weight[s] = min_weights[s]; + log_info("Min weight %u = %k", s, nm_min_weight[s]); + } return true; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c index 7abd363519..c1789bc647 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c @@ -102,9 +102,11 @@ static inline final_state_t plasticity_update_synapse( } bool synapse_dynamics_initialise( - address_t address, uint32_t n_neurons, uint32_t n_synapse_types) { + address_t address, uint32_t n_neurons, uint32_t n_synapse_types, + REAL *min_weights) { - if (!synapse_dynamics_stdp_init(&address, ¶ms, n_synapse_types)) { + if (!synapse_dynamics_stdp_init( + &address, ¶ms, n_synapse_types, min_weights)) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h index ff09cdfabd..19020ec760 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h @@ -45,7 +45,8 @@ * \param[in] min_weights: The value of the weight of the LSB of the weight * \return the end of the weight region as an absolute SDRAM memory address. */ -address_t weight_initialise(address_t address, uint32_t n_synapse_types); +address_t weight_initialise( + address_t address, uint32_t n_synapse_types, REAL *min_weights); /*! * \brief Gets the initial weight state. diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c index 0167d07112..8e8c587ca6 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c @@ -25,6 +25,9 @@ //! Global plasticity parameter data plasticity_weight_region_data_t *plasticity_weight_region_data; +//! Plasticity min_weight array, in DTCM +REAL *min_weight; + //! \brief How the configuration data for additive_one_term is laid out in //! SDRAM. The layout is an array of these. typedef struct { @@ -37,7 +40,8 @@ typedef struct { //--------------------------------------- // Functions //--------------------------------------- -address_t weight_initialise(address_t address, uint32_t n_synapse_types) { +address_t weight_initialise( + address_t address, uint32_t n_synapse_types, REAL *min_weights) { log_debug("weight_initialise: starting"); log_debug("\tSTDP additive one-term weight dependence"); @@ -52,15 +56,23 @@ address_t weight_initialise(address_t address, uint32_t n_synapse_types) { return NULL; } + min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } + for (uint32_t s = 0; s < n_synapse_types; s++, config++) { dtcm_copy[s].min_weight = config->min_weight; dtcm_copy[s].max_weight = config->max_weight; dtcm_copy[s].a2_plus = config->a2_plus; dtcm_copy[s].a2_minus = config->a2_minus; - log_debug("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k", + min_weight[s] = min_weights[s]; + + log_info("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k min_weight %k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, - dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus); + dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, min_weight[s]); } log_debug("weight_initialise: completed successfully"); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h index 4d32922610..541a85155b 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h @@ -43,6 +43,8 @@ typedef struct { typedef struct { accum weight; //!< The starting weight + REAL min_weight; //!< Min weight + //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; } weight_state_t; @@ -60,10 +62,16 @@ typedef struct { */ static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; + extern REAL *min_weight; + + uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); + uint64_t w = (uint64_t) (weight); + + accum s1615_weight = kbits((int_k_t) mw * w); - accum s1615_weight = kbits(weight); return (weight_state_t) { .weight = s1615_weight, + .min_weight = min_weight[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -99,7 +107,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { - return (weight_t) (bitsk(state.weight)); + return (weight_t) (bitsk(state.weight) / bitsk(state.min_weight)); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c index 6363e97bdd..0450044763 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c @@ -36,10 +36,14 @@ typedef struct { accum a3_minus; } additive_two_term_config_t; +//! Plasticity min_weight array, in DTCM +REAL *min_weight; + //--------------------------------------- // Functions //--------------------------------------- -address_t weight_initialise(address_t address, uint32_t n_synapse_types) { +address_t weight_initialise( + address_t address, uint32_t n_synapse_types, REAL *min_weights) { log_debug("weight_initialise: starting"); log_debug("\tSTDP additive two-term weight dependance"); @@ -56,6 +60,12 @@ address_t weight_initialise(address_t address, uint32_t n_synapse_types) { return NULL; } + min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } + for (uint32_t s = 0; s < n_synapse_types; s++, config++) { dtcm_copy[s].min_weight = config->min_weight; dtcm_copy[s].max_weight = config->max_weight; @@ -64,11 +74,13 @@ address_t weight_initialise(address_t address, uint32_t n_synapse_types) { dtcm_copy[s].a3_plus = config->a3_plus; dtcm_copy[s].a3_minus = config->a3_minus; - log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d," + min_weight[s] = min_weights[s]; + + log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k" " A3+:%d, A3-:%d", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, - dtcm_copy[s].a3_plus, dtcm_copy[s].a3_minus); + dtcm_copy[s].a3_plus, dtcm_copy[s].a3_minus, min_weight[s]); } log_debug("weight_initialise: completed successfully"); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h index c0530875d2..ffcf82c9f0 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h @@ -45,6 +45,8 @@ typedef struct plasticity_weight_region_data_two_term_t { typedef struct weight_state_t { accum weight; //!< The weight + REAL min_weight; //!< The min_weight + //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; } weight_state_t; @@ -62,11 +64,16 @@ typedef struct weight_state_t { */ static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; + extern REAL *min_weight; + + uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); + uint64_t w = (uint64_t) (weight); - accum s1615_weight = kbits(weight); + accum s1615_weight = kbits((int_k_t) mw * w); return (weight_state_t) { .weight = s1615_weight, + .min_weight = min_weight[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -106,7 +113,7 @@ static inline weight_state_t weight_two_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { - return (weight_t) (bitsk(state.weight)); + return (weight_t) (bitsk(state.weight) / bitsk(state.min_weight)); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c index 2bb99ccb9c..c9f02343c3 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c @@ -25,6 +25,9 @@ //! Global plasticity parameter data array, in DTCM plasticity_weight_region_data_t *plasticity_weight_region_data; +//! Plasticity min_weight array, in DTCM +REAL *min_weight; + //! \brief How the configuration data for multiplicative is laid out in SDRAM. //! The layout is an array of these. typedef struct { @@ -38,7 +41,8 @@ typedef struct { // Functions //--------------------------------------- -address_t weight_initialise(address_t address, uint32_t n_synapse_types) { +address_t weight_initialise( + address_t address, uint32_t n_synapse_types, REAL *min_weights) { log_debug("weight_initialise: starting"); log_debug("\tSTDP multiplicative weight dependence"); @@ -51,6 +55,12 @@ address_t weight_initialise(address_t address, uint32_t n_synapse_types) { return NULL; } + min_weight = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } + multiplicative_config_t *config = (multiplicative_config_t *) address; for (uint32_t s = 0; s < n_synapse_types; s++, config++) { // Copy parameters @@ -59,9 +69,11 @@ address_t weight_initialise(address_t address, uint32_t n_synapse_types) { dtcm_copy[s].a2_plus = config->a2_plus; dtcm_copy[s].a2_minus = config->a2_minus; - log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d", + min_weight[s] = min_weights[s]; + + log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, - dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus); + dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, min_weight[s]); } log_debug("weight_initialise: completed successfully"); diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h index 6d45987b18..66859fe428 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h @@ -43,6 +43,8 @@ typedef struct { typedef struct { accum weight; //!< The current weight + REAL min_weight; //!< The min weight + //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; } weight_state_t; @@ -67,11 +69,16 @@ extern plasticity_weight_region_data_t *plasticity_weight_region_data; static inline weight_state_t weight_get_initial( weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; + extern REAL *min_weight; + + uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); + uint64_t w = (uint64_t) (weight); - accum s1615_weight = kbits(weight); + accum s1615_weight = kbits((int_k_t) mw * w); - return (weight_state_t) { + return (weight_state_t) { .weight = s1615_weight, + .min_weight = min_weight[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -118,7 +125,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( */ static inline weight_t weight_get_final(weight_state_t state) { // log_info("\tnew_weight:%d\n", state.weight); - return (weight_t) (bitsk(state.weight)); + return (weight_t) (bitsk(state.weight) / bitsk(state.min_weight)); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h index 7dd309f057..d623beac21 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h @@ -33,7 +33,8 @@ //! How to interpret the values from the ring buffers //! \return Whether the initialisation succeeded. bool synapse_dynamics_initialise( - address_t address, uint32_t n_neurons, uint32_t n_synapse_types); + address_t address, uint32_t n_neurons, uint32_t n_synapse_types, + REAL *min_weights); //! \brief Process the dynamics of the synapses //! \param[in,out] plastic_region_data: Where the plastic data is diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c index 8bc84807a6..41fa832886 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c @@ -29,7 +29,7 @@ bool synapse_dynamics_initialise( UNUSED address_t address, UNUSED uint32_t n_neurons, - UNUSED uint32_t n_synapse_types) { + UNUSED uint32_t n_synapse_types, UNUSED REAL *min_weights) { return true; } diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index be09e6f8ee..b3676bd95c 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -275,15 +275,20 @@ bool synapses_initialise( uint32_t log_max_delay = params->log_max_delay; // Set up min_weights - min_weights = spin1_malloc(n_synapse_types * sizeof(REAL)); + uint32_t min_weights_bytes = n_synapse_types * sizeof(REAL); + min_weights = spin1_malloc(min_weights_bytes); if (min_weights == NULL) { log_error("Not enough memory to allocate min weights"); return false; } // read in min_weights - spin1_memcpy(min_weights, synapse_params_address, n_synapse_types * sizeof(REAL)); + spin1_memcpy(min_weights, params->min_weights, min_weights_bytes); *min_weights_out = min_weights; + for (uint32_t s = 0; s < n_synapse_types; s++) { + log_info("synapse initialise, min_weights_out[%u] = %k %k", + s, min_weights_out[s], min_weights[s]); + } log_debug("synapses_initialise: completed successfully"); print_synapse_parameters(); diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 362dd3a674..86d7114b84 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -907,6 +907,7 @@ def __calculate_min_weights(self, incoming_projections): weight_max = synapse_dynamics.get_weight_maximum( connector, synapse_info) + weight_max *= weight_scale weight_scale_limit = float(DataType.S1615.scale) if weight_scale_limit * min_weights[synapse_type] < weight_max: diff --git a/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py b/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py index ef7df82460..7aff960f72 100644 --- a/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py +++ b/spynnaker_integration_tests/test_stdp/test_STDP_nearest_pair_multiplicative.py @@ -19,7 +19,7 @@ import unittest -class TestSTDPNearestPairAdditive(BaseTestCase): +class TestSTDPNearestPairMultiplicative(BaseTestCase): def potentiation_and_depression(self): p.setup(1) @@ -137,6 +137,8 @@ def potentiation_and_depression(self): target_spikes = [1014, 1032, 1053] self.assertListEqual(list(post_spikes), target_spikes) + print("weights, new_weight_exact: ", weights[0], new_weight_exact) + self.assertTrue(numpy.allclose( weights[0], new_weight_exact, atol=0.001)) diff --git a/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py b/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py index 2a99c6d9fd..0d3688a021 100644 --- a/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py +++ b/spynnaker_integration_tests/test_stdp/test_STDP_pair_multiplicative.py @@ -171,7 +171,7 @@ def potentiation_and_depression(): assert(numpy.allclose(weights, new_weight_exact, rtol=0.001)) -class TestSTDPPairAdditive(BaseTestCase): +class TestSTDPPairMultiplicative(BaseTestCase): def test_potentiation_and_depression(self): self.runsafe(potentiation_and_depression) From 9d64eb24208c4a4a381a0f6de1f0d2e69934a387 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 13:36:53 +0000 Subject: [PATCH 114/198] moved function to header but not the associated header... --- neural_modelling/src/neuron/models/neuron_model_lif_impl.c | 1 - neural_modelling/src/neuron/models/neuron_model_lif_impl.h | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c index 535336ac1c..cc048303c8 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.c +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.c @@ -18,7 +18,6 @@ //! \file //! \brief Leaky Integrate and Fire neuron implementation #include "neuron_model_lif_impl.h" -#include "round.h" #include diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 793bd2f2d2..9ca47867ee 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -21,6 +21,7 @@ #define _NEURON_MODEL_LIF_CURR_IMPL_H_ #include "neuron_model.h" +#include "round.h" ///////////////////////////////////////////////////////////// //! definition for LIF neuron parameters From f2aac78789360e9a4e1728c56ac32a1e2b9cd34a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 13:44:46 +0000 Subject: [PATCH 115/198] Move IF_cond_alpha from spynnaker8 repo --- spynnaker8/__init__.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/spynnaker8/__init__.py b/spynnaker8/__init__.py index ac082264d5..3d1345f2c9 100644 --- a/spynnaker8/__init__.py +++ b/spynnaker8/__init__.py @@ -85,6 +85,9 @@ from spynnaker.pyNN.models.neuron.builds.if_curr_alpha import ( IFCurrAlpha as IF_curr_alpha) # noinspection PyUnresolvedReferences +from spynnaker.pyNN.models.neuron.builds.if_cond_alpha import \ + IFCondAlpha as IF_cond_alpha +# noinspection PyUnresolvedReferences from spynnaker.pyNN.models.neuron.builds.if_curr_delta import ( IFCurrDelta as IF_curr_delta) # noinspection PyUnresolvedReferences @@ -139,8 +142,8 @@ 'LastNeuronSelection', 'RandomSelection', 'DistanceDependentFormation', 'RandomByWeightElimination', # neuron stuff - 'IF_cond_exp', 'IF_curr_exp', "IF_curr_alpha", "IF_curr_delta", - 'Izhikevich', 'SpikeSourceArray', 'SpikeSourcePoisson', + 'IF_cond_exp', 'IF_curr_exp', "IF_curr_alpha", "IF_cond_alpha", + "IF_curr_delta", 'Izhikevich', 'SpikeSourceArray', 'SpikeSourcePoisson', # pops 'Assembly', 'Population', 'PopulationView', # projection From bcc4809150a537272049a45bed690830b68feb29 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 14:50:58 +0000 Subject: [PATCH 116/198] spinncer single cell experiment running --- spynnaker/pyNN/abstract_spinnaker_common.py | 40 +++++++++---------- .../external_device_lif_control.py | 4 +- .../neuron/abstract_population_vertex.py | 1 + .../neuron/abstract_pynn_neuron_model.py | 2 +- 4 files changed, 24 insertions(+), 23 deletions(-) diff --git a/spynnaker/pyNN/abstract_spinnaker_common.py b/spynnaker/pyNN/abstract_spinnaker_common.py index ed575a8004..fc4b05c875 100644 --- a/spynnaker/pyNN/abstract_spinnaker_common.py +++ b/spynnaker/pyNN/abstract_spinnaker_common.py @@ -165,26 +165,26 @@ def _set_up_timings(self, timestep, min_delay, time_scale_factor): else: self.__min_delay = self.machine_time_step_ms - # Sort out the maximum delay - natively_supported_delay_for_models = \ - constants.MAX_SUPPORTED_DELAY_TICS - delay_extension_max_supported_delay = ( - constants.MAX_DELAY_BLOCKS * - constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK) - max_delay_tics_supported = \ - natively_supported_delay_for_models + \ - delay_extension_max_supported_delay - if (max_delay is not None and max_delay * 1000.0 > - max_delay_tics_supported * machine_time_step): - raise ConfigurationException( - "Pacman does not support max delays above {} ms with the " - "current machine time step".format( - max_delay_tics_supported * self.machine_time_step_ms)) - if max_delay is not None: - self.__max_delay = max_delay - else: - self.__max_delay = ( - max_delay_tics_supported * self.machine_time_step_ms) + # # Sort out the maximum delay + # natively_supported_delay_for_models = \ + # constants.MAX_SUPPORTED_DELAY_TICS + # delay_extension_max_supported_delay = ( + # constants.MAX_DELAY_BLOCKS * + # constants.MAX_TIMER_TICS_SUPPORTED_PER_BLOCK) + # max_delay_tics_supported = \ + # natively_supported_delay_for_models + \ + # delay_extension_max_supported_delay + # if (max_delay is not None and max_delay * 1000.0 > + # max_delay_tics_supported * machine_time_step): + # raise ConfigurationException( + # "Pacman does not support max delays above {} ms with the " + # "current machine time step".format( + # max_delay_tics_supported * self.machine_time_step_ms)) + # if max_delay is not None: + # self.__max_delay = max_delay + # else: + # self.__max_delay = ( + # max_delay_tics_supported * self.machine_time_step_ms) # Sort out the time scale factor if not user specified # (including config) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index ecc1c4aa40..59a170a87e 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -97,7 +97,7 @@ def __init__( def create_vertex( self, n_neurons, label, constraints, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, - n_steps_per_timestep, drop_late_spikes, splitter): + n_steps_per_timestep, drop_late_spikes, splitter, rb_left_shifts): if n_neurons != len(self._devices): raise ConfigurationException( "Number of neurons does not match number of devices in {}" @@ -108,4 +108,4 @@ def create_vertex( self._devices, self._create_edges, max_atoms, self._model, self, self._translator, spikes_per_second, label, ring_buffer_sigma, incoming_spike_buffer_size, constraints, - drop_late_spikes, splitter) + drop_late_spikes, splitter, rb_left_shifts) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index f19f525fb4..9fb08d4809 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -860,6 +860,7 @@ def get_ring_buffer_shifts(self, incoming_projections): weights_signed = False rate_stats = [RunningStats() for _ in range(n_synapse_types)] steps_per_second = MICRO_TO_SECOND_CONVERSION / machine_time_step() + min_max_weight = numpy.ones(n_synapse_types) * 2 ** 32 for proj in incoming_projections: synapse_info = proj._synapse_information diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py index 9ae3506ba2..970842ce24 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py @@ -25,7 +25,7 @@ _population_parameters = { "spikes_per_second": None, "ring_buffer_sigma": None, "incoming_spike_buffer_size": None, "drop_late_spikes": None, - "splitter": None + "splitter": None, "rb_left_shifts": None } From db29ff890a88879dfeae4252aa614ea13bdb774f Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 15:11:03 +0000 Subject: [PATCH 117/198] add rb_left_shifts to tests --- .../model_tests/neuron/test_synaptic_manager.py | 12 ++++++++---- unittests/test_populations/test_vertex.py | 2 +- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index c4a5942a9f..c951b90a0b 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -216,7 +216,8 @@ def test_set_synapse_dynamics(): post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, - n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) + n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, + rb_left_shifts=None) static = SynapseDynamicsStatic() stdp = SynapseDynamicsSTDP( @@ -326,7 +327,8 @@ def test_set_synapse_dynamics(): post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, - n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) + n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, + rb_left_shifts=None) # STDP followed by structural STDP should result in Structural STDP post_app_vertex.synapse_dynamics = stdp @@ -348,7 +350,8 @@ def test_set_synapse_dynamics(): post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, - n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) + n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, + rb_left_shifts=None) # Static followed by static structural should result in static # structural @@ -385,7 +388,8 @@ def test_set_synapse_dynamics(): post_app_vertex = post_app_model.create_vertex( n_neurons=10, label="post", constraints=None, spikes_per_second=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, - n_steps_per_timestep=1, drop_late_spikes=True, splitter=None) + n_steps_per_timestep=1, drop_late_spikes=True, splitter=None, + rb_left_shifts=None) post_app_vertex.synapse_dynamics = static_struct post_app_vertex.synapse_dynamics = stdp_struct diff --git a/unittests/test_populations/test_vertex.py b/unittests/test_populations/test_vertex.py index d16aa4e153..9bc553cb68 100644 --- a/unittests/test_populations/test_vertex.py +++ b/unittests/test_populations/test_vertex.py @@ -112,7 +112,7 @@ def __init__(self): max_atoms_per_core=None, spikes_per_second=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, neuron_impl=foo_bar.model, pynn_model=foo_bar, - drop_late_spikes=True, splitter=None) + drop_late_spikes=True, splitter=None, rb_left_shifts=None) def test_initializable(): From 44d569ed275b5f680478bdef28f5242e7f0cceef Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 16:25:19 +0000 Subject: [PATCH 118/198] Can't take logs of something that could be zero --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 9fb08d4809..3efb4ac357 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -928,7 +928,12 @@ def get_ring_buffer_shifts(self, incoming_projections): max_weights[synapse_type] = max( max_weights[synapse_type], biggest_weight[synapse_type]) # This is to deal with very small weights that are floored to 0 - mmw = 2**math.floor(math.log(min_max_weight[synapse_type], 2)) + if min_max_weight[synapse_type] != 0: + mmw = 2**math.floor(math.log(min_max_weight[synapse_type], 2)) + else: + # if it is zero then can't take logs... + small = 1.0 / 65536.0 + mmw = 2**math.floor(math.log(small, 2)) max_weights[synapse_type] = min(mmw * 2 ** 15, max_weights[synapse_type]) From bb735e66748776da0aba00b8e9867c145eca2605 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 18:22:48 +0000 Subject: [PATCH 119/198] fix to pass tests for now, but this constant was 64 earlier on branch... --- .../splitter_components/abstract_spynnaker_splitter_delay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py index 121de78f0e..bf5e7ca369 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py @@ -28,7 +28,7 @@ class AbstractSpynnakerSplitterDelay(object, metaclass=AbstractBase): __slots__ = [] # max delays supported by a slice split machine vertex - MAX_SUPPORTED_DELAY_TICS = 64 + MAX_SUPPORTED_DELAY_TICS = 16 # should this be 64? def max_support_delay(self): """ From 2a7083047e6076f837c1fffde0c80c5a15de0b0e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 18:28:21 +0000 Subject: [PATCH 120/198] flake8 --- .../pyNN/models/neuron/abstract_pynn_neuron_model_standard.py | 1 + .../pyNN/models/neuron/input_types/input_type_conductance.py | 3 ++- spynnaker/pyNN/utilities/constants.py | 3 +-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py index 48cbf09e18..9878389b0d 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py @@ -21,6 +21,7 @@ AbstractPyNNNeuronModel.default_population_parameters) _population_parameters["n_steps_per_timestep"] = 1 + class AbstractPyNNNeuronModelStandard(AbstractPyNNNeuronModel): """ A neuron model that follows the sPyNNaker standard composed model \ pattern for point neurons. diff --git a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py index ae64e2a551..73ebb8f48c 100644 --- a/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py +++ b/spynnaker/pyNN/models/neuron/input_types/input_type_conductance.py @@ -86,7 +86,8 @@ def update_values(self, values, parameters, state_variables): @overrides(AbstractInputType.get_global_weight_scale) def get_global_weight_scale(self): - return float(2**5) # IMPLICIT WEIGHT SCALING -- the default in main branch is 2**10 + # IMPLICIT WEIGHT SCALING -- the default in main branch is 2**10 + return float(2**5) @property def e_rev_E(self): diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index 5740014c8d..b92ae89dc1 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -14,7 +14,6 @@ # along with this program. If not, see . -from enum import Enum import math from spinn_front_end_common.utilities.constants import ( BYTES_PER_WORD, BYTES_PER_KB) @@ -60,7 +59,7 @@ #: natively supported delays for all abstract_models MAX_SUPPORTED_DELAY_TICS = 64 MAX_DELAY_BLOCKS = 64 -DELAY_MASK = (1 << int(math.log2(MAX_SUPPORTED_DELAY_TICS))) -1 +DELAY_MASK = (1 << int(math.log2(MAX_SUPPORTED_DELAY_TICS))) - 1 MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 #: the minimum supported delay slot between two neurons From 8318cb9c52adbd1c1636513a402a560c3eaa9ed6 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 18:36:16 +0000 Subject: [PATCH 121/198] pylint caught a missed argument --- .../external_device_lif_control_vertex.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index 778adcc78f..f04b38fcf1 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -49,7 +49,8 @@ def __init__( self, devices, create_edges, max_atoms_per_core, neuron_impl, pynn_model, translator=None, spikes_per_second=None, label=None, ring_buffer_sigma=None, incoming_spike_buffer_size=None, - drop_late_spikes=None, constraints=None, splitter=None): + drop_late_spikes=None, constraints=None, splitter=None, + rb_left_shifts=None): """ :param list(AbstractMulticastControllableDevice) devices: The AbstractMulticastControllableDevice instances to be controlled From 7b1789ff5cc5a97bd2ff967e9cb3c396bc7778f6 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 18:52:01 +0000 Subject: [PATCH 122/198] pylint, again I somehow missed this argument --- .../external_device_lif_control_vertex.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index f04b38fcf1..ab5c2847a8 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -106,7 +106,8 @@ def __init__( super().__init__( len(devices), label, constraints, max_atoms_per_core, spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, - neuron_impl, pynn_model, drop_late_spikes, splitter) + neuron_impl, pynn_model, drop_late_spikes, splitter, + rb_left_shifts) def routing_key_partition_atom_mapping(self, routing_info, partition): # pylint: disable=arguments-differ From 51196ab4162a7ccc302a0d2e0fd936ff9c8a6052 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Dec 2021 19:00:35 +0000 Subject: [PATCH 123/198] Add copyright to spike_profiling.h --- neural_modelling/src/neuron/spike_profiling.h | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/neural_modelling/src/neuron/spike_profiling.h b/neural_modelling/src/neuron/spike_profiling.h index 7b21687caf..e8f2752ca3 100644 --- a/neural_modelling/src/neuron/spike_profiling.h +++ b/neural_modelling/src/neuron/spike_profiling.h @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2017-2021 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + #include typedef struct spike_holder_t { From 3d1f36c83c397f92fba581881497e0c606d69aa5 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 7 Dec 2021 12:30:44 +0000 Subject: [PATCH 124/198] Allow up to 64 time slots for delays, update test accordingly --- .../abstract_spynnaker_splitter_delay.py | 2 +- .../model_tests/neuron/test_synaptic_manager.py | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py index bf5e7ca369..07b5aff2e2 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py @@ -28,7 +28,7 @@ class AbstractSpynnakerSplitterDelay(object, metaclass=AbstractBase): __slots__ = [] # max delays supported by a slice split machine vertex - MAX_SUPPORTED_DELAY_TICS = 16 # should this be 64? + MAX_SUPPORTED_DELAY_TICS = 64 # should this be 64? def max_support_delay(self): """ diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index c951b90a0b..c9207062cc 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -399,26 +399,26 @@ def test_set_synapse_dynamics(): "neurons_per_core,expect_app_keys,max_delay", [ # Only undelayed, all edges exist (range(10), [], 1000, 100, True, None), - # Only delayed, all edges exist - ([], range(10), 1000, 100, True, 20), + # Only delayed, all edges exist (note max_delay=20 on master) + ([], range(10), 1000, 100, True, 100), # All undelayed and delayed edges exist - (range(10), range(10), 1000, 100, True, 20), + (range(10), range(10), 1000, 100, True, 100), # Only undelayed, some edges are filtered (app keys shouldn't work) ([0, 1, 2, 3, 4], [], 1000, 100, False, None), # Only delayed, some edges are filtered (app keys shouldn't work) - ([], [5, 6, 7, 8, 9], 1000, 100, False, 20), + ([], [5, 6, 7, 8, 9], 1000, 100, False, 100), # Both delayed and undelayed, some undelayed edges don't exist # (app keys work because undelayed aren't filtered) - ([3, 4, 5, 6, 7], range(10), 1000, 100, True, 20), + ([3, 4, 5, 6, 7], range(10), 1000, 100, True, 100), # Both delayed and undelayed, some delayed edges don't exist # (app keys work because all undelayed exist) - (range(10), [4, 5, 6, 7], 1000, 100, True, 20), + (range(10), [4, 5, 6, 7], 1000, 100, True, 100), # Should work but number of neurons don't work out (range(5), [], 10000, 2048, False, None), # Should work but number of cores doesn't work out (range(2000), [], 10000, 5, False, None), # Should work but number of neurons with delays don't work out - ([], range(4), 1024, 256, False, 144) + ([], range(4), 1024, 256, False, 576) # 144 on master ]) def test_pop_based_master_pop_table_standard( undelayed_indices_connected, delayed_indices_connected, From f7de591756dfc3a8cd2a29ca3e97fe7ef6f43f41 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 8 Dec 2021 15:08:26 +0000 Subject: [PATCH 125/198] move send_spike out of sub-cycle; fix peak for n_atoms=1 --- .../implementations/neuron_impl_standard.h | 42 +++++++++---------- .../neuron/population_machine_neurons.py | 6 ++- 2 files changed, 25 insertions(+), 23 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index 30e1ceb604..a14337dfb2 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -266,6 +266,9 @@ static void neuron_impl_do_timestep_update( &additional_input_array[neuron_index]; synapse_param_t *the_synapse_type = &neuron_synapse_shaping_params[neuron_index]; + + bool spike = false; + // Loop however many times requested; do this in reverse for efficiency, // and because the index doesn't actually matter for (uint32_t i_step = n_steps_per_timestep; i_step > 0; i_step--) { @@ -328,6 +331,8 @@ static void neuron_impl_do_timestep_update( // If spike occurs, communicate to relevant parts of model if (spike_now) { + spike = true; + // Call relevant model-based functions // Tell the neuron model neuron_model_has_spiked(this_neuron); @@ -335,11 +340,11 @@ static void neuron_impl_do_timestep_update( // Tell the additional input additional_input_has_spiked(additional_inputs); - // Record the spike - neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); - - // Send the spike - send_spike(timer_count, time, neuron_index); +// // Record the spike +// neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); +// +// // Send the spike +// send_spike(timer_count, time, neuron_index); } // Shape the existing input according to the included rule @@ -349,6 +354,14 @@ static void neuron_impl_do_timestep_update( #if LOG_LEVEL >= LOG_DEBUG neuron_model_print_state_variables(this_neuron); #endif // LOG_LEVEL >= LOG_DEBUG + + if (spike) { + // Record the spike + neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); + + // Send the spike + send_spike(timer_count, time, neuron_index); + } } } @@ -364,8 +377,8 @@ static void neuron_impl_store_neuron_parameters( // Skip over the steps per timestep next += 1; - // Skip over the steps per timestep - next += 1; +// // Skip over the steps per timestep +// next += 1; if (sizeof(global_neuron_params_t)) { log_debug("writing neuron global parameters"); @@ -416,37 +429,22 @@ static void neuron_impl_store_neuron_parameters( void neuron_impl_print_inputs(uint32_t n_neurons) { bool empty = true; for (index_t i = 0; i < n_neurons; i++) { -<<<<<<< HEAD - empty = empty && (0 == bitsk( - synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) - - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]))); -======= synapse_param_t *params = &neuron_synapse_shaping_params[i]; empty = empty && (0 == bitsk( synapse_types_get_excitatory_input(params) - synapse_types_get_inhibitory_input(params))); ->>>>>>> refs/remotes/origin/master } if (!empty) { log_debug("-------------------------------------\n"); for (index_t i = 0; i < n_neurons; i++) { -<<<<<<< HEAD - input_t input = - synapse_types_get_excitatory_input(&neuron_synapse_shaping_params[i]) - - synapse_types_get_inhibitory_input(&neuron_synapse_shaping_params[i]); - if (bitsk(input) != 0) { - log_debug("%3u: %12.6k (= ", i, input); - synapse_types_print_input(&neuron_synapse_shaping_params[i]); -======= synapse_param_t *params = &neuron_synapse_shaping_params[i]; input_t input = synapse_types_get_excitatory_input(params) - synapse_types_get_inhibitory_input(params); if (bitsk(input) != 0) { log_debug("%3u: %12.6k (= ", i, input); synapse_types_print_input(params); ->>>>>>> refs/remotes/origin/master log_debug(")\n"); } } diff --git a/spynnaker/pyNN/models/neuron/population_machine_neurons.py b/spynnaker/pyNN/models/neuron/population_machine_neurons.py index b7edf1dfb8..3811a7bab1 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_neurons.py +++ b/spynnaker/pyNN/models/neuron/population_machine_neurons.py @@ -195,7 +195,11 @@ def _write_neuron_parameters(self, spec, ring_buffer_shifts): # Write the number of neurons in the block: spec.write_value(data=n_atoms) - spec.write_value(data=2**get_n_bits(n_atoms)) + # Write the peak neurons (closest above power of 2 of n_atoms) + if (n_atoms == 1): + spec.write_value(data=n_atoms) + else: + spec.write_value(data=2**get_n_bits(n_atoms)) # Write the ring buffer data # This is only the synapse types that need a ring buffer i.e. not From 9714ba404c703b4d6fb433589522e8f2fedc5cb4 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 16 Dec 2021 11:35:34 +0000 Subject: [PATCH 126/198] Add structured provenance filename reference --- spynnaker8/spinnaker.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/spynnaker8/spinnaker.py b/spynnaker8/spinnaker.py index 8920adcc00..90f08343e4 100644 --- a/spynnaker8/spinnaker.py +++ b/spynnaker8/spinnaker.py @@ -52,6 +52,9 @@ def __init__( self.__segment_counter = 0 self.__recorders = set([]) + # Structured provenance_items + self.structured_provenance_filename = None + # main pynn interface inheritance pynn_control.BaseState.__init__(self) From 20130b59c41d8baf4fa4cef9ad8f81fd050d7518 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 20 May 2022 13:38:26 +0100 Subject: [PATCH 127/198] Switch to using less current source ITCM in larger binary cases --- .../Makefile | 2 +- .../Makefile | 2 +- .../Makefile | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile index a6556cca29..83da67c914 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_stepnoisy_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile index 1a1dee5edc..4be5eeedd4 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_ac_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile index 832dda59a8..fc9ace5af2 100644 --- a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_noisy_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h From d841462c805c1c38933b03f8840eb596504caf35 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 23 May 2022 13:17:13 +0100 Subject: [PATCH 128/198] Switch some makefiles to step-only CS since DC is a subset of Step --- .../Makefile | 2 +- .../Makefile | 2 +- .../Makefile | 2 +- .../Makefile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile index 83da67c914..1a0af504a9 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_step_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile index 4be5eeedd4..f4175c4cd8 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_random_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_step_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile index 478cc2878c..0e5ccef965 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_step_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile index fc9ace5af2..382293593d 100644 --- a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_step_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h From 6d1f4725eb50d6cc9e9ecb04b851980ae1d05041 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 23 May 2022 13:45:32 +0100 Subject: [PATCH 129/198] Oops, too big --- .../Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile index 382293593d..fc9ace5af2 100644 --- a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_step_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h From d194eb72ec09b427123d06e46bef9503ef459084 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 25 May 2022 13:22:12 +0100 Subject: [PATCH 130/198] Don't use config for min_weight; move to (pop) additional_parameters Reorder arguments so min_weight related parameters are at the end --- .../external_device_lif_control.py | 9 ++-- .../external_device_lif_control_vertex.py | 19 +++++---- .../neuron/abstract_population_vertex.py | 41 ++++++++----------- .../neuron/abstract_pynn_neuron_model.py | 18 ++++---- .../abstract_pynn_neuron_model_standard.py | 12 +++--- spynnaker/pyNN/spynnaker.cfg | 12 ------ 6 files changed, 47 insertions(+), 64 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index 44c57dd4db..8c40e80bb4 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -96,9 +96,9 @@ def __init__( @overrides(AbstractPyNNNeuronModelStandard.create_vertex) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, weight_random_sigma, - max_stdp_spike_delta, incoming_spike_buffer_size, - n_steps_per_timestep, drop_late_spikes, splitter): + ring_buffer_sigma, incoming_spike_buffer_size, + n_steps_per_timestep, drop_late_spikes, splitter, min_weights, + weight_random_sigma, max_stdp_spike_delta): if n_neurons != len(self._devices): raise ConfigurationException( "Number of neurons does not match number of devices in {}" @@ -108,6 +108,5 @@ def create_vertex( return ExternalDeviceLifControlVertex( self._devices, self._create_edges, max_atoms, self._model, self, self._translator, spikes_per_second, label, ring_buffer_sigma, - min_weights, weight_random_sigma, max_stdp_spike_delta, incoming_spike_buffer_size, constraints, drop_late_spikes, - splitter) + splitter, min_weights, weight_random_sigma, max_stdp_spike_delta) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py index 4c10cf1f7c..71033a642a 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control_vertex.py @@ -47,9 +47,10 @@ class ExternalDeviceLifControlVertex( def __init__( self, devices, create_edges, max_atoms_per_core, neuron_impl, pynn_model, translator=None, spikes_per_second=None, label=None, - ring_buffer_sigma=None, min_weights=None, weight_random_sigma=None, - max_stdp_spike_delta=None, incoming_spike_buffer_size=None, - drop_late_spikes=None, constraints=None, splitter=None): + ring_buffer_sigma=None, incoming_spike_buffer_size=None, + drop_late_spikes=None, constraints=None, splitter=None, + min_weights=None, weight_random_sigma=None, + max_stdp_spike_delta=None): """ :param list(AbstractMulticastControllableDevice) devices: The AbstractMulticastControllableDevice instances to be controlled @@ -67,14 +68,14 @@ def __init__( :param float spikes_per_second: :param str label: :param float ring_buffer_sigma: - :param list min_weights: - :param float weight_random_sigma: - :param float max_stdp_spike_delta: :param int incoming_spike_buffer_size: :param splitter: splitter from app to machine :type splitter: None or ~pacman.model.partitioner_splitters.abstract_splitters.AbstractSplitterCommon :param list(~pacman.model.constraints.AbstractConstraint) constraints: + :param list min_weights: + :param float weight_random_sigma: + :param float max_stdp_spike_delta: """ # pylint: disable=too-many-arguments, too-many-locals @@ -107,10 +108,10 @@ def __init__( super().__init__( len(devices), label, constraints, max_atoms_per_core, - spikes_per_second, ring_buffer_sigma, min_weights, - weight_random_sigma, max_stdp_spike_delta, + spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, neuron_impl, pynn_model, - drop_late_spikes, splitter) + drop_late_spikes, splitter, min_weights, + weight_random_sigma, max_stdp_spike_delta) def routing_key_partition_atom_mapping(self, routing_info, partition): # pylint: disable=arguments-differ diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 9f4a5040f2..a81dc06bdd 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -149,10 +149,10 @@ class AbstractPopulationVertex( def __init__( self, n_neurons, label, constraints, max_atoms_per_core, - spikes_per_second, ring_buffer_sigma, min_weights, - weight_random_sigma, max_stdp_spike_delta, + spikes_per_second, ring_buffer_sigma, incoming_spike_buffer_size, neuron_impl, pynn_model, - drop_late_spikes, splitter): + drop_late_spikes, splitter, min_weights, weight_random_sigma, + max_stdp_spike_delta): """ :param int n_neurons: The number of neurons in the population :param str label: The label on the population @@ -167,12 +167,6 @@ def __init__( size; a good starting choice is 5.0. Given length of simulation we can set this for approximate number of saturation events. :type ring_buffer_sigma: float or None - :param min_weights: minimum weight list - :type min_weights: float or None - :param weight_random_sigma: sigma value for ? - :type weight_random_sigma: float or None - :param max_stdp_spike_delta: delta - :type max_stdp_spike_delta: float or None :param incoming_spike_buffer_size: :type incoming_spike_buffer_size: int or None :param bool drop_late_spikes: control flag for dropping late packets. @@ -183,6 +177,12 @@ def __init__( :param splitter: splitter object :type splitter: None or ~pacman.model.partitioner_splitters.abstract_splitters.AbstractSplitterCommon + :param min_weights: minimum weight list + :type min_weights: float array or None + :param weight_random_sigma: sigma value when using random weights + :type weight_random_sigma: float or None + :param max_stdp_spike_delta: the maximum expected spike time difference + :type max_stdp_spike_delta: float or None """ # pylint: disable=too-many-arguments, too-many-locals @@ -268,30 +268,20 @@ def __init__( # meaning "auto calculate"; the number of weights needs to match # the number of synapse types self.__min_weights = min_weights - if self.__min_weights is None: - config_min_weights = get_config_str("Simulation", "min_weights") - if config_min_weights is not None: - self.__min_weights = [float(v) - for v in config_min_weights.split(',')] self.__min_weights_auto = True if self.__min_weights is not None: self.__min_weights_auto = False n_synapse_types = self.__neuron_impl.get_n_synapse_types() if len(self.__min_weights) != n_synapse_types: raise SynapticConfigurationException( - "The number of minimum weights provided ({}) does not" + "The number of minimum weights provided ({} - {}) does not" " match the number of synapse types ({})".format( - self.__min_weights, n_synapse_types)) + len(self.__min_weights), self.__min_weights, + n_synapse_types)) - # Read the other minimum weight configuration parameters + # Get the other minimum weight configuration parameters self.__weight_random_sigma = weight_random_sigma - if self.__weight_random_sigma is None: - self.__weight_random_sigma = get_config_float( - "Simulation", "weight_random_sigma") self.__max_stdp_spike_delta = max_stdp_spike_delta - if self.__max_stdp_spike_delta is None: - self.__max_stdp_spike_delta = get_config_float( - "Simulation", "max_stdp_spike_delta") # Store weight provenance information mapping from # (real weight, represented weight) -> projections @@ -1045,6 +1035,11 @@ def get_min_weights(self, incoming_projections): if self.__min_weights is None: self.__min_weights = self.__calculate_min_weights( incoming_projections) + else: + weight_scale = self.__neuron_impl.get_global_weight_scale() + self.__check_weights( + self.__min_weights, weight_scale, incoming_projections) + return self.__min_weights def get_weight_scales(self, min_weights): diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py index 972985b7c5..39c41d0544 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model.py @@ -24,9 +24,9 @@ _population_parameters = { "spikes_per_second": None, "ring_buffer_sigma": None, - "min_weights": None, "weight_random_sigma": None, - "max_stdp_spike_delta": None, "incoming_spike_buffer_size": None, - "drop_late_spikes": None, "splitter": None + "incoming_spike_buffer_size": None, "drop_late_spikes": None, + "splitter": None, "min_weights": None, "weight_random_sigma": 2, + "max_stdp_spike_delta": 50, } @@ -59,13 +59,13 @@ def get_max_atoms_per_core(cls): additional_arguments=_population_parameters.keys()) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, weight_random_sigma, - max_stdp_spike_delta, incoming_spike_buffer_size, - drop_late_spikes, splitter): + ring_buffer_sigma, incoming_spike_buffer_size, + drop_late_spikes, splitter, min_weights, weight_random_sigma, + max_stdp_spike_delta): # pylint: disable=arguments-differ max_atoms = self.get_max_atoms_per_core() return AbstractPopulationVertex( n_neurons, label, constraints, max_atoms, spikes_per_second, - ring_buffer_sigma, min_weights, weight_random_sigma, - max_stdp_spike_delta, incoming_spike_buffer_size, - self.__model, self, drop_late_spikes, splitter) + ring_buffer_sigma, incoming_spike_buffer_size, + self.__model, self, drop_late_spikes, splitter, min_weights, + weight_random_sigma, max_stdp_spike_delta,) diff --git a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py index 0f5f4672e0..bc9b85ab83 100644 --- a/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py +++ b/spynnaker/pyNN/models/neuron/abstract_pynn_neuron_model_standard.py @@ -55,13 +55,13 @@ def __init__( additional_arguments={"n_steps_per_timestep"}) def create_vertex( self, n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, weight_random_sigma, - max_stdp_spike_delta, incoming_spike_buffer_size, - n_steps_per_timestep, drop_late_spikes, splitter): + ring_buffer_sigma, incoming_spike_buffer_size, + n_steps_per_timestep, drop_late_spikes, splitter, min_weights, + weight_random_sigma, max_stdp_spike_delta): # pylint: disable=arguments-differ self._model.n_steps_per_timestep = n_steps_per_timestep return super().create_vertex( n_neurons, label, constraints, spikes_per_second, - ring_buffer_sigma, min_weights, weight_random_sigma, - max_stdp_spike_delta, incoming_spike_buffer_size, - drop_late_spikes, splitter) + ring_buffer_sigma, incoming_spike_buffer_size, + drop_late_spikes, splitter, min_weights, weight_random_sigma, + max_stdp_spike_delta) diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index db0a4c6204..2c5b7f85bc 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -34,18 +34,6 @@ one_to_one_connection_dtcm_max_bytes = 2048 # performance limiter to throw away packets not processed in a given time step drop_late_spikes = True -# Auto-compute or specify the minimum weights -min_weights = None - -# Expected maximum time in ms between spikes for STDP. This is used in the -# minimum weight calculation. It is ignored if the minimum weights are -# specified. -max_stdp_spike_delta = 50 - -# Number of standard deviations from the mean to account for in the calculation -# of the minimum weight when a random weight is specified -weight_random_sigma = 2 - # The overhead to add to the transfer clocks # when using a split synapse neuron model transfer_overhead_clocks = 200 From ab813dae92116da4644431583a934d033eafbe80 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 25 May 2022 14:03:39 +0100 Subject: [PATCH 131/198] remove unused import --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index a81dc06bdd..0c5dcd8db2 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -26,7 +26,7 @@ from pacman.model.constraints.key_allocator_constraints import ( ContiguousKeyRangeContraint) from spinn_utilities.config_holder import ( - get_config_int, get_config_float, get_config_bool, get_config_str) + get_config_int, get_config_float, get_config_bool) from pacman.model.resources import MultiRegionSDRAM From 7829121d2aa9c471fdaf43f0376a90644960e7fb Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 25 May 2022 14:09:07 +0100 Subject: [PATCH 132/198] Add doc back in --- .../models/neuron/synapse_dynamics/abstract_synapse_dynamics.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 10cb131b8d..5a25b84beb 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -151,6 +151,7 @@ def get_delay_variance(self, connector, delays, synapse_info): :param AbstractConnector connector: :param SynapseInformation synapse_info: + :param ~numpy.ndarray delays: """ # pylint: disable=too-many-arguments return connector.get_delay_variance(delays, synapse_info) From bf997b6f3591f7bd00d4fc38c98af6c0e98329b2 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 25 May 2022 15:01:54 +0100 Subject: [PATCH 133/198] Simplify --- .../weight_dependence/weight_dependence_additive_triplet.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py index 06c5adf785..b888ffccfc 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive_triplet.py @@ -157,9 +157,7 @@ def weight_minimum(self): @overrides(AbstractWeightDependence.weight_change_minimum) def weight_change_minimum(self, min_delta): - pot, dep = min_delta - a2_plus, a3_plus = pot - a2_minus, a3_minus = dep + (a2_plus, a3_plus), (a2_minus, a3_minus) = min_delta min_pot = a2_plus * self.A_plus + a3_plus * self.__a3_plus min_dep = a2_minus * self.A_minus + a3_minus * self.__a3_minus return min(min_pot, min_dep) From 6d996015469a641267a5fba6a7e157fdfc10977b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 26 May 2022 12:30:56 +0100 Subject: [PATCH 134/198] Move specific min weight calculations from vertex to synapse dynamics (Also removed unnecessary check that array exists) --- .../neuron/abstract_population_vertex.py | 39 +++++++------------ .../models/neuron/plasticity/stdp/common.py | 2 - .../abstract_synapse_dynamics.py | 15 +++++++ .../synapse_dynamics/synapse_dynamics_stdp.py | 32 ++++++++++----- .../synapse_dynamics_structural_static.py | 15 ++++++- 5 files changed, 64 insertions(+), 39 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 0c5dcd8db2..b40b94d213 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -919,36 +919,23 @@ def __calculate_min_weights(self, incoming_projections): connector = synapse_info.connector synapse_dynamics = synapse_info.synapse_dynamics - weight_min = synapse_dynamics.get_weight_minimum( + conn_weight_min = synapse_dynamics.get_weight_minimum( connector, self.__weight_random_sigma, synapse_info) - if weight_min == 0: - weight_min = DataType.S1615.decode_from_int(1) - weight_min *= weight_scale - if not numpy.isnan(weight_min): + if conn_weight_min == 0: + conn_weight_min = DataType.S1615.decode_from_int(1) + conn_weight_min *= weight_scale + if not numpy.isnan(conn_weight_min): if min_weights[synapse_type] != sys.maxsize: - weight_min = float_gcd( - min_weights[synapse_type], weight_min) + conn_weight_min = float_gcd( + min_weights[synapse_type], conn_weight_min) min_weights[synapse_type] = min( - min_weights[synapse_type], weight_min) - - if isinstance(synapse_dynamics, SynapseDynamicsSTDP): - min_delta = synapse_dynamics.get_weight_min_delta( - self.__max_stdp_spike_delta) - min_delta *= weight_scale - if min_delta is not None and min_delta != 0: - # This also depends on the earlier calculated minimum - min_delta = float_gcd(min_delta, weight_min) - min_weights[synapse_type] = min( - min_weights[synapse_type], min_delta) - elif isinstance(synapse_dynamics, SynapseDynamicsStructuralStatic): - weight_min = synapse_dynamics.initial_weight - weight_min *= weight_scale - if weight_min != 0: - weight_min = float_gcd(min_weights[synapse_type], - weight_min) - min_weights[synapse_type] = min( - min_weights[synapse_type], weight_min) + min_weights[synapse_type], conn_weight_min) + + # Do any remaining calculations inside the synapse dynamics + min_weights = synapse_dynamics.calculate_min_weight( + min_weights, self.__max_stdp_spike_delta, weight_scale, + conn_weight_min, synapse_type) # Convert values to their closest representable value to ensure # that division works for the minimum value diff --git a/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py b/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py index d678b88e42..a7a62605f8 100644 --- a/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py +++ b/spynnaker/pyNN/models/neuron/plasticity/stdp/common.py @@ -74,8 +74,6 @@ def get_min_lut_value( spike times in milliseconds :rtype: float """ - if not len(exp_lut_array): - return None values = exp_lut_array.view("uint16") # If there isn't a time step and a limit diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 5a25b84beb..3a8ef5c965 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -244,3 +244,18 @@ def get_synapse_id_by_target(self, target): :rtype: int or None """ return None + + def calculate_min_weight(self, min_weights, max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type): + """ Do any further calculations required to work out the minimum + weight value used on the machine. + + :param list min_weights: the current minimum weights + :param int max_stdp_spike_delta: the max time between spikes + :param float weight_scale: the amount to scale the weights, from input + :param float conn_weight_min: the weight minimum from the connector + :param int synapse_type: the synapse ID for which to calculate the min + :rtype: list + """ + # By default no further calculation is required + return min_weights \ No newline at end of file diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 616cf7ab1e..702f3c034a 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -24,7 +24,7 @@ from spynnaker.pyNN.models.abstract_models import AbstractSettable from spynnaker.pyNN.exceptions import ( InvalidParameterType, SynapticConfigurationException) -from spynnaker.pyNN.utilities.utility_calls import get_n_bits +from spynnaker.pyNN.utilities.utility_calls import get_n_bits, float_gcd from .abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics from .abstract_synapse_dynamics_structural import ( AbstractSynapseDynamicsStructural) @@ -517,15 +517,6 @@ def get_weight_variance(self, connector, weights, synapse_info): # has to be given as no variance return 0.0 - def get_weight_min_delta(self, max_stdp_spike_delta): - """ Get the minimum non-zero weight change - - :param float max_stdp_spike_delta: The maximum expected time between - spikes in milliseconds - """ - return self.__weight_dependence.weight_change_minimum( - self.__timing_dependence.minimum_delta(max_stdp_spike_delta)) - @overrides(AbstractPlasticSynapseDynamics.get_weight_maximum) def get_weight_maximum(self, connector, synapse_info): w_max = super().get_weight_maximum(connector, synapse_info) @@ -541,6 +532,27 @@ def get_weight_minimum(self, connector, weight_random_sigma, synapse_info): # the weight dependence return min(w_min, self.__weight_dependence.weight_minimum) + def __get_weight_min_delta(self, max_stdp_spike_delta): + """ Get the minimum non-zero weight change + + :param float max_stdp_spike_delta: The maximum expected time between + spikes in milliseconds + """ + return self.__weight_dependence.weight_change_minimum( + self.__timing_dependence.minimum_delta(max_stdp_spike_delta)) + + @overrides(AbstractPlasticSynapseDynamics.calculate_min_weight) + def calculate_min_weight(self, min_weights, max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type): + min_delta = self.__get_weight_min_delta(max_stdp_spike_delta) + min_delta *= weight_scale + if min_delta is not None and min_delta != 0: + # This also depends on the earlier calculated minimum + min_delta = float_gcd(min_delta, conn_weight_min) + min_weights[synapse_type] = min( + min_weights[synapse_type], min_delta) + return min_weights + @overrides(AbstractPlasticSynapseDynamics.get_parameter_names) def get_parameter_names(self): names = ['weight', 'delay'] diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py index 5082bd04e9..7a33879741 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_structural_static.py @@ -17,7 +17,8 @@ from pyNN.standardmodels.synapses import StaticSynapse from spinn_utilities.overrides import overrides from spynnaker.pyNN.exceptions import SynapticConfigurationException -from spynnaker.pyNN.utilities.utility_calls import create_mars_kiss_seeds +from spynnaker.pyNN.utilities.utility_calls import ( + create_mars_kiss_seeds, float_gcd) from .abstract_synapse_dynamics_structural import ( AbstractSynapseDynamicsStructural) from .synapse_dynamics_structural_common import ( @@ -287,6 +288,18 @@ def get_delay_minimum(self, connector, synapse_info): def get_delay_variance(self, connector, delays, synapse_info): return 0.0 + @overrides(SynapseDynamicsStatic.calculate_min_weight) + def calculate_min_weight(self, min_weights, max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type): + weight_min = self.__initial_weight + weight_min *= weight_scale + if weight_min != 0: + weight_min = float_gcd(min_weights[synapse_type], + weight_min) + min_weights[synapse_type] = min( + min_weights[synapse_type], weight_min) + return min_weights + @overrides(_Common.get_seeds) def get_seeds(self, app_vertex=None): if app_vertex: From c5f7a758d26f9fdeb98c8d04fecabb886f5a43f1 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 26 May 2022 12:51:05 +0100 Subject: [PATCH 135/198] Remove no longer used imports and add missing line at end of file --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 3 +-- .../neuron/synapse_dynamics/abstract_synapse_dynamics.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index b40b94d213..965d2fe68f 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -60,8 +60,7 @@ SpynnakerRangeDictionary) from spynnaker.pyNN.utilities.utility_calls import float_gcd from spynnaker.pyNN.models.neuron.synapse_dynamics import ( - AbstractSynapseDynamics, AbstractSynapseDynamicsStructural, - SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic) + AbstractSynapseDynamics, AbstractSynapseDynamicsStructural) from .synapse_io import get_max_row_info from .master_pop_table import MasterPopTableAsBinarySearch from .generator_data import GeneratorData diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 3a8ef5c965..abdcfddc63 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -258,4 +258,4 @@ def calculate_min_weight(self, min_weights, max_stdp_spike_delta, :rtype: list """ # By default no further calculation is required - return min_weights \ No newline at end of file + return min_weights From 6be8bb64ed5a40ef78c0130b69b645224623de76 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 26 May 2022 13:49:28 +0100 Subject: [PATCH 136/198] Remove commented out code --- neural_modelling/src/neuron/synapse_row.h | 16 ---------------- 1 file changed, 16 deletions(-) diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index 14a94b06b0..b3ae66df52 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -232,22 +232,6 @@ static inline weight_t synapse_row_sparse_weight(uint32_t x) { return x >> (32 - SYNAPSE_WEIGHT_BITS); } -////! \brief Converts a weight stored in a synapse row to an input -////! \param[in] weight: the weight to convert in synapse-row form -////! \param[in] left_shift: the shift to use when decoding -////! \return the actual input weight for the model -//static inline input_t synapse_row_convert_weight_to_input( -// weight_t weight, uint32_t left_shift) { -// union { -// int_k_t input_type; -// s1615 output_type; -// } converter; -// -// converter.input_type = (int_k_t) (weight) << left_shift; -// -// return converter.output_type; -//} - //! \brief Converts a weight stored in a synapse row to an input //! \param[in] weight: the weight to convert in synapse-row form //! \param[in] min_weight: the minimum weight to use in the conversion From c17a073748ab937956abf79a15b9e9d0a7981520 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 26 May 2022 13:59:18 +0100 Subject: [PATCH 137/198] Test via tolerance rather than using adjustment from code function --- .../test_various/test_alltoone_with_large_weight.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py b/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py index 9e1fdad7ba..39e1cd4548 100644 --- a/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py +++ b/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py @@ -41,10 +41,9 @@ def do_run(self): sim.end() weight_sum = sum(weight[2] for weight in weight_list) - # 50.0 is not exactly representable so work out the actual value - weight_used = 1 / ( - DataType.S1615.closest_representable_value_above(1 / weights)) - self.assertAlmostEqual(weight_sum, sources * weight_used) + # 50.0 is not exactly representable so specify a relevant tolerance + self.assertAlmostEqual(weight_sum, sources * weights, + delta=sources*0.05) def test_run(self): self.runsafe(self.do_run) From a599cbe0f749659a5b68df37e9d3b58b289a3ee8 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 26 May 2022 14:47:02 +0100 Subject: [PATCH 138/198] Remove unused import --- .../test_various/test_alltoone_with_large_weight.py | 1 - 1 file changed, 1 deletion(-) diff --git a/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py b/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py index 39e1cd4548..fa312af05b 100644 --- a/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py +++ b/spynnaker_integration_tests/test_various/test_alltoone_with_large_weight.py @@ -15,7 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from data_specification.enums import DataType from spinnaker_testbase import BaseTestCase import pyNN.spiNNaker as sim From bad2c1a2f4c690d27e0cabb709a15e1b3ae5b734 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 11 Jul 2022 11:24:03 +0100 Subject: [PATCH 139/198] Missed in merge --- .../splitter_components/splitter_abstract_pop_vertex_fixed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py index f0a3de3454..c8ca8d801b 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py @@ -83,7 +83,7 @@ def create_machine_vertices(self, chip_counter): projections = app_vertex.incoming_projections constraints = get_remaining_constraints(app_vertex) min_weights = app_vertex.get_min_weights(projections) - weight_scales = app_vertex.get_weight_scales(ring_buffer_shifts) + weight_scales = app_vertex.get_weight_scales(min_weights) all_syn_block_sz = app_vertex.get_synapses_size( max_atoms_per_core, projections) structural_sz = app_vertex.get_structural_dynamics_size( From 471251ba8792628e1ef580f7dc891716a3fc5134 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 11 Jul 2022 11:29:31 +0100 Subject: [PATCH 140/198] reduce binary size --- .../Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile index 0e5ccef965..478cc2878c 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_step_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h From 7f654117db337d8baa046a60129fffbdb6743e94 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 11 Jul 2022 14:06:27 +0100 Subject: [PATCH 141/198] w_max no longer needed here --- .../test_struct_pl/test_structural_shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py b/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py index ce80334aeb..9a99acc555 100644 --- a/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py +++ b/spynnaker_integration_tests/test_struct_pl/test_structural_shared.py @@ -99,7 +99,7 @@ def structural_shared(): print(conns_4) w_final_1 = calculate_spike_pair_additive_stdp_weight( - pre_spikes, spikes_3[0], w_init_stdp, delay_init_stdp, w_max, + pre_spikes, spikes_3[0], w_init_stdp, delay_init_stdp, A_plus, A_minus, tau_plus, tau_minus) assert(len(conns) == 1) From 16422d957dfa469927b94abe1e8db4daae82bf2f Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 19 Jul 2022 14:11:13 +0100 Subject: [PATCH 142/198] Missed these prints --- .../splitter_abstract_pop_vertex_fixed.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py index c3db84718a..db5d6fe4bb 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py @@ -89,15 +89,14 @@ def create_machine_vertices(self, chip_counter): print("Using given values for RB left shifts.") ring_buffer_shifts = app_vertex.rb_left_shifts print("RB left shifts for {:20}".format(app_vertex.label), - "=", self.__ring_buffer_shifts) + "=", ring_buffer_shifts) print("-" * 80) else: print("=" * 80) print("Computing RB left shifts for", app_vertex.label) - ring_buffer_shifts = app_vertex.get_ring_buffer_shifts( - app_vertex.incoming_projections) + ring_buffer_shifts = app_vertex.get_ring_buffer_shifts(projections) print("RB left shifts for {:20}".format(app_vertex.label), - "=", self.__ring_buffer_shifts) + "=", ring_buffer_shifts) # ring_buffer_shifts = app_vertex.get_ring_buffer_shifts(projections) weight_scales = app_vertex.get_weight_scales(ring_buffer_shifts) From caf6b489ed0e47ae4b424bfc10e0caa0f0764608 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 19 Jul 2022 14:20:04 +0100 Subject: [PATCH 143/198] Move print functions under log debug --- neural_modelling/src/neuron/spike_profiling.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neural_modelling/src/neuron/spike_profiling.h b/neural_modelling/src/neuron/spike_profiling.h index e8f2752ca3..958dfd858b 100644 --- a/neural_modelling/src/neuron/spike_profiling.h +++ b/neural_modelling/src/neuron/spike_profiling.h @@ -82,6 +82,7 @@ static inline accum spike_profiling_get_spike_holder_as_accum( return x.acc; } +#if LOG_LEVEL >= LOG_DEBUG static inline void spike_profiling_print_spikes_from_spike_holder( struct spike_holder_t spikes_orig) { io_printf(IO_BUF, "Spikes from input: a %u, b %u, c %u, d %u \n", @@ -94,3 +95,4 @@ static inline void spike_profiling_print_spikes_from_int(int32_t output) { (output & 0xFF), (output >> 8 & 0xFF), (output >> 16 & 0xFF), (output >> 24 & 0xFF)); } +#endif From 2c95c1f03fab12b39bfa1728872db63ca4377204 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 19 Jul 2022 14:26:39 +0100 Subject: [PATCH 144/198] Not sure how this file came back again... --- .../push_bot_retina_viewer.py | 128 ------------------ 1 file changed, 128 deletions(-) delete mode 100644 spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py diff --git a/spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py b/spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py deleted file mode 100644 index 9f0a1d883f..0000000000 --- a/spynnaker/pyNN/external_devices_models/push_bot/push_bot_parameters/push_bot_retina_viewer.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright (c) 2017-2019 The University of Manchester -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import math -import socket -from threading import Thread -import numpy - -# Value of brightest pixel to show -_DISPLAY_MAX = 33.0 -# How regularity to display frames -_FRAME_TIME_MS = 10 -# Time constant of pixel decay -_DECAY_TIME_CONSTANT_MS = 100 -_BUFFER_SIZE = 512 - - -class PushBotRetinaViewer(Thread): - def __init__( - self, resolution, port=0, display_max=_DISPLAY_MAX, - frame_time_ms=_FRAME_TIME_MS, - decay_time_constant_ms=_DECAY_TIME_CONSTANT_MS): - # pylint: disable=too-many-arguments - try: - from matplotlib import pyplot # NOQA - from matplotlib import animation # NOQA - self.__pyplot = pyplot - self.__animation = animation - except ImportError: - raise Exception("matplotlib must be installed to use this viewer") - - super(PushBotRetinaViewer, self).__init__(name="PushBotRetinaViewer") - self.__display_max = display_max - self.__frame_time_ms = frame_time_ms - self.__image = None - self.__ani = None - - # Open socket to receive UDP - self._init_socket(port) - - # Determine mask for coordinates - self.__coordinate_mask = \ - (1 << (2 * resolution.bits_per_coordinate)) - 1 - - # Set up the image - self.__image_data = numpy.zeros(resolution.pixels * resolution.pixels) - self.__image_data_view = self.__image_data.view() - self.__image_data_view.shape = (resolution.pixels, resolution.pixels) - - # Calculate decay proportion each frame - self.__decay_proportion = math.exp( - -float(self.__frame_time_ms) / float(decay_time_constant_ms)) - - def _init_socket(self, port): - """ Open socket to receive UDP. - """ - self.__spike_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - self.__spike_socket.bind(("0.0.0.0", port)) - self.__spike_socket.setblocking(False) - - self.__local_host, self.__local_port = \ - self.__spike_socket.getsockname() - - @property - def local_host(self): - return self.__local_host - - @property - def local_port(self): - return self.__local_port - - def __recv_data(self, size=_BUFFER_SIZE): - return self.__spike_socket.recv(size) - - def _close(self): - self.__spike_socket.close() - - def _parse_raw_data(self, raw_data): - # Slice off EIEIO header and timestamp, and convert to numpy - # array of uint32 - payload = numpy.fromstring(raw_data[6:], dtype="uint32") - - # Mask out x, y coordinates - payload &= self.__coordinate_mask - - # Increment these pixels - self.__image_data[payload] += 1.0 - - def _updatefig(self): - # Read all UDP messages received during last frame - while True: - try: - self._parse_raw_data(self.__recv_data()) - except socket.error: - # Stop reading - break - - # Decay image data - self.__image_data *= self.__decay_proportion - - # Set image data - self.__image.set_array(self.__image_data_view) - return [self.__image] - - def run(self): - # Create image plot of retina output - fig = self.__pyplot.figure() - self.__image = self.__pyplot.imshow( - self.__image_data_view, cmap="viridis", vmin=0.0, - vmax=self.__display_max) - - # Play animation - self.__ani = self.__animation.FuncAnimation( - fig, (lambda _frame: self._updatefig()), - interval=self.__frame_time_ms, blit=True) - self.__pyplot.show() From f8ec96656c6dd7c3b5374f419b1a0f8136386400 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 19 Jul 2022 14:53:50 +0100 Subject: [PATCH 145/198] Turn off largest binaries for now --- neural_modelling/makefiles/neuron/Makefile | 10 +++++----- .../src/neuron/current_sources/current_source.h | 2 -- 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index 9d5ffdbce2..e5c1f06307 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -50,11 +50,11 @@ MODELS = IF_curr_exp \ IZK_cond_exp_stdp_izhikevich_neuromodulation_pair_additive \ IZK_cond_exp_stdp_izhikevich_neuromodulation_pair_multiplicative \ IF_curr_exp_stdp_izhikevich_neuromodulation_vogels_2011_additive \ - IF_curr_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight \ - IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight \ - IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight \ - IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight \ - IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight + IF_curr_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight +# IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight \ +# IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight \ +# IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight +# IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight \ all: for d in $(MODELS); do $(MAKE) -C $$d || exit $$?; done diff --git a/neural_modelling/src/neuron/current_sources/current_source.h b/neural_modelling/src/neuron/current_sources/current_source.h index 43af125046..d58ab0872d 100644 --- a/neural_modelling/src/neuron/current_sources/current_source.h +++ b/neural_modelling/src/neuron/current_sources/current_source.h @@ -60,7 +60,6 @@ static bool current_source_initialise(address_t cs_address, uint32_t n_neurons) // Avoid the loops if no current sources #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) - io_printf(IO_BUF, "no current sources defined \n"); return true; #else @@ -155,7 +154,6 @@ static bool current_source_load_parameters(address_t cs_address) { // Avoid the loops if no current sources #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) - io_printf(IO_BUF, "no current sources defined \n"); return true; #else From e51cfedde75c0f8001da4327083d46d731faa872 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 20 Jul 2022 09:32:42 +0100 Subject: [PATCH 146/198] Implement no current source; update Makefiles for large binaries --- .../Makefile | 2 +- .../Makefile | 2 +- .../Makefile | 2 +- neural_modelling/makefiles/neuron/Makefile | 10 ++++---- .../neuron/current_sources/current_source.h | 6 ++--- .../current_sources/current_source_none.h | 25 +++++++++++++++++++ .../current_source_none_impl.h | 25 +++++++++++++++++++ 7 files changed, 60 insertions(+), 12 deletions(-) create mode 100644 neural_modelling/src/neuron/current_sources/current_source_none.h create mode 100644 neural_modelling/src/neuron/current_sources/current_source_none_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile index a6556cca29..83da67c914 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_stepnoisy_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile index d86d1de57c..70595029ae 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_conductance.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_none_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile index 832dda59a8..fc9ace5af2 100644 --- a/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile +++ b/neural_modelling/makefiles/neuron/IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight/Makefile @@ -17,7 +17,7 @@ APP = $(notdir $(CURDIR)) NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_current.h -CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_noisy_only_impl.h +CURRENT_SOURCE_H = $(NEURON_DIR)/neuron/current_sources/current_source_dc_only_impl.h NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_exponential_impl.h diff --git a/neural_modelling/makefiles/neuron/Makefile b/neural_modelling/makefiles/neuron/Makefile index e5c1f06307..9d5ffdbce2 100644 --- a/neural_modelling/makefiles/neuron/Makefile +++ b/neural_modelling/makefiles/neuron/Makefile @@ -50,11 +50,11 @@ MODELS = IF_curr_exp \ IZK_cond_exp_stdp_izhikevich_neuromodulation_pair_additive \ IZK_cond_exp_stdp_izhikevich_neuromodulation_pair_multiplicative \ IF_curr_exp_stdp_izhikevich_neuromodulation_vogels_2011_additive \ - IF_curr_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight -# IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight \ -# IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight \ -# IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight -# IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight \ + IF_curr_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight \ + IF_curr_exp_stdp_mad_pair_additive_structural_random_distance_weight \ + IF_cond_exp_stdp_mad_nearest_pair_additive_structural_last_neuron_distance_weight \ + IF_cond_exp_stdp_mad_pair_additive_structural_last_neuron_distance_weight \ + IF_cond_exp_stdp_mad_pair_additive_structural_random_distance_weight all: for d in $(MODELS); do $(MAKE) -C $$d || exit $$?; done diff --git a/neural_modelling/src/neuron/current_sources/current_source.h b/neural_modelling/src/neuron/current_sources/current_source.h index d58ab0872d..a18c35fa83 100644 --- a/neural_modelling/src/neuron/current_sources/current_source.h +++ b/neural_modelling/src/neuron/current_sources/current_source.h @@ -58,8 +58,7 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \return True if successful static bool current_source_initialise(address_t cs_address, uint32_t n_neurons) { // Avoid the loops if no current sources - #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ - !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) + #if defined(_CURRENT_SOURCE_NONE_H_) return true; #else @@ -152,8 +151,7 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \return True if successful static bool current_source_load_parameters(address_t cs_address) { // Avoid the loops if no current sources - #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ - !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) + #if defined(_CURRENT_SOURCE_NONE_H_) return true; #else diff --git a/neural_modelling/src/neuron/current_sources/current_source_none.h b/neural_modelling/src/neuron/current_sources/current_source_none.h new file mode 100644 index 0000000000..53a951aa8c --- /dev/null +++ b/neural_modelling/src/neuron/current_sources/current_source_none.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +//! \dir +//! \brief None current source functions +//! \file +//! \brief Functions called for no current source +#ifndef _CURRENT_SOURCE_NONE_H_ +#define _CURRENT_SOURCE_NONE_H_ + +#endif // _CURRENT_SOURCE_NONE_H_ diff --git a/neural_modelling/src/neuron/current_sources/current_source_none_impl.h b/neural_modelling/src/neuron/current_sources/current_source_none_impl.h new file mode 100644 index 0000000000..bdea7a9b92 --- /dev/null +++ b/neural_modelling/src/neuron/current_sources/current_source_none_impl.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2017-2019 The University of Manchester + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + */ + +//! \dir +//! \brief Current source implementations +//! \file +//! \brief General API of a current source implementation +#ifndef _CURRENT_SOURCE_NONE_IMPL_H_ +#define _CURRENT_SOURCE_NONE_IMPL_H_ + +#endif // _CURRENT_SOURCE_NONE_IMPL_H_ From b11c3162adbcd824d43ca192fe9cc92c54e38c89 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 31 Aug 2022 15:01:28 +0100 Subject: [PATCH 147/198] Delay slots constant has been moved to the splitter --- .../splitter_components/abstract_spynnaker_splitter_delay.py | 2 +- spynnaker/pyNN/utilities/constants.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py index 07b5aff2e2..2dbd2fc872 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py @@ -28,7 +28,7 @@ class AbstractSpynnakerSplitterDelay(object, metaclass=AbstractBase): __slots__ = [] # max delays supported by a slice split machine vertex - MAX_SUPPORTED_DELAY_TICS = 64 # should this be 64? + MAX_SUPPORTED_DELAY_TICS = 16 # 64 # should this be 64? def max_support_delay(self): """ diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index b92ae89dc1..a9c09d4a9c 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -57,8 +57,8 @@ SCALE = WEIGHT_FLOAT_TO_FIXED_SCALE * NA_TO_PA_SCALE #: natively supported delays for all abstract_models -MAX_SUPPORTED_DELAY_TICS = 64 -MAX_DELAY_BLOCKS = 64 +MAX_SUPPORTED_DELAY_TICS = 16 # 64 +MAX_DELAY_BLOCKS = 16 # 64 DELAY_MASK = (1 << int(math.log2(MAX_SUPPORTED_DELAY_TICS))) - 1 MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 From 4fc7cf832a0daeab43d7392a93d73a3132c29378 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 9 Sep 2022 14:25:38 +0100 Subject: [PATCH 148/198] flake8 --- .../neuron/abstract_population_vertex.py | 35 +++++++++++-------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index dd19a42b3b..c3fb2810e5 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -17,7 +17,7 @@ import sys import math import numpy -from scipy import special # @UnresolvedImport +# from scipy import special # @UnresolvedImport import operator from functools import reduce from collections import defaultdict @@ -56,18 +56,19 @@ NeuronRecorder) from spynnaker.pyNN.models.abstract_models import ( AbstractPopulationInitializable, AbstractAcceptsIncomingSynapses, - AbstractPopulationSettable, AbstractContainsUnits, AbstractMaxSpikes, + AbstractPopulationSettable, AbstractContainsUnits, # AbstractMaxSpikes, HasSynapses, SupportsStructure) -from spynnaker.pyNN.exceptions import InvalidParameterType, SpynnakerException +from spynnaker.pyNN.exceptions import ( + InvalidParameterType, SpynnakerException, SynapticConfigurationException) from spynnaker.pyNN.utilities.ranged import ( SpynnakerRangeDictionary) from spynnaker.pyNN.utilities.utility_calls import float_gcd -from spynnaker.pyNN.utilities.constants import ( - POSSION_SIGMA_SUMMATION_LIMIT) -from spynnaker.pyNN.utilities.running_stats import RunningStats +# from spynnaker.pyNN.utilities.constants import ( +# POSSION_SIGMA_SUMMATION_LIMIT) +# from spynnaker.pyNN.utilities.running_stats import RunningStats from spynnaker.pyNN.models.neuron.synapse_dynamics import ( - AbstractSDRAMSynapseDynamics, AbstractSynapseDynamicsStructural, - AbstractSupportsSignedWeights) + AbstractSDRAMSynapseDynamics, AbstractSynapseDynamicsStructural) +# AbstractSupportsSignedWeights) from spynnaker.pyNN.models.neuron.local_only import AbstractLocalOnly from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsStatic from .synapse_io import get_max_row_info @@ -1492,21 +1493,24 @@ def get_local_provenance_data(self): # s_dynamics = s_info.synapse_dynamics # # n_conns = connector.get_n_connections_to_post_vertex_maximum(s_info) -# d_var = s_dynamics.get_delay_variance(connector, s_info.delays, s_info) +# d_var = s_dynamics.get_delay_variance( +# connector, s_info.delays, s_info) # # s_type_pos = s_dynamics.get_positive_synapse_index(proj) # w_mean_pos = s_dynamics.get_mean_positive_weight(proj) # w_var_pos = s_dynamics.get_variance_positive_weight(proj) # w_max_pos = s_dynamics.get_maximum_positive_weight(proj) # self.__add_details( -# proj, s_type_pos, n_conns, w_mean_pos, w_var_pos, w_max_pos, d_var) +# proj, s_type_pos, n_conns, w_mean_pos, w_var_pos, w_max_pos, +# d_var) # # s_type_neg = s_dynamics.get_negative_synapse_index(proj) # w_mean_neg = -s_dynamics.get_mean_negative_weight(proj) # w_var_neg = -s_dynamics.get_variance_negative_weight(proj) # w_max_neg = -s_dynamics.get_minimum_negative_weight(proj) # self.__add_details( -# proj, s_type_neg, n_conns, w_mean_neg, w_var_neg, w_max_neg, d_var) +# proj, s_type_neg, n_conns, w_mean_neg, w_var_neg, w_max_neg, +# d_var) # # def __add_unsigned_projection(self, proj): # # pylint: disable=protected-access @@ -1520,8 +1524,10 @@ def get_local_provenance_data(self): # w_var = s_dynamics.get_weight_variance( # connector, s_info.weights, s_info) # w_max = s_dynamics.get_weight_maximum(connector, s_info) -# d_var = s_dynamics.get_delay_variance(connector, s_info.delays, s_info) -# self.__add_details(proj, s_type, n_conns, w_mean, w_var, w_max, d_var) +# d_var = s_dynamics.get_delay_variance( +# connector, s_info.delays, s_info) +# self.__add_details( +# proj, s_type, n_conns, w_mean, w_var, w_max, d_var) # # def __add_details( # self, proj, s_type, n_conns, w_mean, w_var, w_max, d_var): @@ -1550,7 +1556,8 @@ def get_local_provenance_data(self): # # def get_max_weight(self, s_type): # if self.delay_running_totals[s_type].variance == 0.0: -# return max(self.total_weights[s_type], self.biggest_weight[s_type]) +# return max( +# self.total_weights[s_type], self.biggest_weight[s_type]) # # stats = self.running_totals[s_type] # rates = self.rate_stats[s_type] From 3848c818a098289f61abb27cfc6c47d58b49ca0c Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 9 Sep 2022 14:45:45 +0100 Subject: [PATCH 149/198] more linting --- .../splitter_components/splitter_abstract_pop_vertex_fixed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py index 890a8056ea..49a4478d22 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py @@ -365,7 +365,7 @@ def __bitfield_size(self): @overrides(AbstractSplitterCommon.reset_called) def reset_called(self): super(SplitterAbstractPopulationVertexFixed, self).reset_called() - self.__ring_buffer_shifts = None + self.__min_weights = None self.__weight_scales = None self.__all_syn_block_sz = dict() self.__structural_sz = dict() From 1523ae84fd35acd35696350481810ba967bd5cb1 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 12 Sep 2022 13:47:53 +0100 Subject: [PATCH 150/198] Override get_weight_minimum for new kernel-based connectors --- .../connectors/convolution_connector.py | 10 ++++++++++ .../connectors/pool_dense_connector.py | 10 ++++++++++ 2 files changed, 20 insertions(+) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py index ddb02a64c8..d5dc744ea5 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/convolution_connector.py @@ -274,6 +274,16 @@ def get_n_connections_to_post_vertex_maximum(self, synapse_info): def get_weight_maximum(self, synapse_info): return numpy.amax(self.__kernel_weights) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): + # Use the kernel weights if user has supplied them + if self.__kernel_weights is not None: + return super(ConvolutionConnector, self).get_weight_minimum( + self.__kernel_weights, weight_random_sigma, synapse_info) + + return super(ConvolutionConnector, self).get_weight_minimum( + weights, weight_random_sigma, synapse_info) + @overrides(AbstractConnector.get_connected_vertices) def get_connected_vertices(self, s_info, source_vertex, target_vertex): pre_vertices = numpy.array( diff --git a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py index d90a0fa14a..7105499386 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/pool_dense_connector.py @@ -238,6 +238,16 @@ def get_weight_maximum(self, synapse_info): return super(PoolDenseConnector, self)._get_weight_maximum( self.__weights, n_conns, synapse_info) + @overrides(AbstractConnector.get_weight_minimum) + def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): + # Use the kernel weights if user has supplied them + if self.__weights is not None: + return super(PoolDenseConnector, self).get_weight_minimum( + self.__weights, weight_random_sigma, synapse_info) + + return super(PoolDenseConnector, self).get_weight_minimum( + weights, weight_random_sigma, synapse_info) + def __pre_as_post(self, pre_coords): """ Write pre coords as post coords. From 7cb989806149634816d01a35592d62c68a527038 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 12 Sep 2022 15:07:57 +0100 Subject: [PATCH 151/198] Update comment --- neural_modelling/src/neuron/neuron.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index a86f05f736..0846813cfb 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -117,7 +117,7 @@ bool neuron_initialise( n_neurons_peak = params->n_neurons_peak; n_synapse_types = params->n_synapse_types; - // Set up ring buffer left shifts + // Set up min weights uint32_t min_weights_bytes = n_synapse_types * sizeof(REAL); min_weights = spin1_malloc(min_weights_bytes); if (min_weights == NULL) { From d197122fbb876122e300fc9c1fc771b8c4a3efca Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 14 Sep 2022 09:03:34 +0100 Subject: [PATCH 152/198] Fix min_weight calculation for local-only case --- .../neuron/abstract_population_vertex.py | 43 +++++++++++++------ 1 file changed, 30 insertions(+), 13 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index c3fb2810e5..af08d4dba0 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -986,27 +986,44 @@ def __calculate_min_weights(self): # Skip if this is a synapse dynamics synapse type if synapse_info.synapse_type_from_dynamics: continue - synapse_type = synapse_info.synapse_type - connector = synapse_info.connector - synapse_dynamics = synapse_info.synapse_dynamics + synapse_dynamics = synapse_info.synapse_dynamics + connector = synapse_info.connector conn_weight_min = synapse_dynamics.get_weight_minimum( connector, self.__weight_random_sigma, synapse_info) - if conn_weight_min == 0: conn_weight_min = DataType.S1615.decode_from_int(1) conn_weight_min *= weight_scale - if not numpy.isnan(conn_weight_min): - if min_weights[synapse_type] != sys.maxsize: - conn_weight_min = float_gcd( + + # If local-only then deal with both positive and negative index + if isinstance(synapse_dynamics, AbstractLocalOnly): + s_type_pos = synapse_dynamics.get_positive_synapse_index(proj) + s_type_neg = synapse_dynamics.get_negative_synapse_index(proj) + if not numpy.isnan(conn_weight_min): + for s_type in [s_type_pos, s_type_neg]: + if min_weights[s_type] != sys.maxsize: + conn_weight_min = float_gcd( + min_weights[s_type], conn_weight_min) + min_weights[s_type] = min( + min_weights[s_type], conn_weight_min) + + # Do any remaining calculations in the synapse dynamics + min_weights = synapse_dynamics.calculate_min_weight( + min_weights, self.__max_stdp_spike_delta, + weight_scale, conn_weight_min, s_type) + else: + synapse_type = synapse_info.synapse_type + if not numpy.isnan(conn_weight_min): + if min_weights[synapse_type] != sys.maxsize: + conn_weight_min = float_gcd( + min_weights[synapse_type], conn_weight_min) + min_weights[synapse_type] = min( min_weights[synapse_type], conn_weight_min) - min_weights[synapse_type] = min( - min_weights[synapse_type], conn_weight_min) - # Do any remaining calculations inside the synapse dynamics - min_weights = synapse_dynamics.calculate_min_weight( - min_weights, self.__max_stdp_spike_delta, weight_scale, - conn_weight_min, synapse_type) + # Do any remaining calculations in the synapse dynamics + min_weights = synapse_dynamics.calculate_min_weight( + min_weights, self.__max_stdp_spike_delta, + weight_scale, conn_weight_min, synapse_type) # Convert values to their closest representable value to ensure # that division works for the minimum value From 125005e808ecac496950be71e71415b102c4c8fe Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 22 Sep 2022 15:01:14 +0100 Subject: [PATCH 153/198] Fit debug builds into ITCM --- .../implementations/neuron_impl_standard.h | 3 --- .../population_table_binary_search_impl.c | 24 +++++++++---------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index 676b54b481..fa0548fed1 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -177,9 +177,6 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \param[in] n_neurons: number of neurons static void neuron_impl_load_neuron_parameters( address_t address, uint32_t next, uint32_t n_neurons) { - log_debug("reading parameters, next is %u, n_neurons is %u ", - next, n_neurons); - // Read the number of steps per timestep n_steps_per_timestep = address[next++]; if (n_steps_per_timestep > 1) { diff --git a/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c b/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c index e7415674a8..0f27318c17 100644 --- a/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c +++ b/neural_modelling/src/neuron/population_table/population_table_binary_search_impl.c @@ -320,15 +320,15 @@ bool population_table_load_bitfields(filter_region_t *filter_region) { log_debug("Master pop key: 0x%08x, mask: 0x%08x", master_population_table[mp_i].key, master_population_table[mp_i].mask); -#ifdef LOG_DEBUG - // Sanity checking code; not needed in normal operation, and costs ITCM - // With both things being in key order, this should never happen... - if (bf_i < n_filters && - filters[bf_i].key < master_population_table[mp_i].key) { - log_error("Skipping bitfield %d for key 0x%08x", bf_i, filters[bf_i].key); - rt_error(RTE_SWERR); - } -#endif +//#ifdef LOG_DEBUG +// // Sanity checking code; not needed in normal operation, and costs ITCM +// // With both things being in key order, this should never happen... +// if (bf_i < n_filters && +// filters[bf_i].key < master_population_table[mp_i].key) { +// log_error("Skipping bitfield %d for key 0x%08x", bf_i, filters[bf_i].key); +// rt_error(RTE_SWERR); +// } +//#endif // While there is a match, keep track of the start and end; note this // may recheck the first entry, but there might not be a first entry if @@ -336,7 +336,7 @@ bool population_table_load_bitfields(filter_region_t *filter_region) { uint32_t start = bf_i; uint32_t n_words_total = 0; uint32_t useful = 0; - log_debug("Starting with bit field %d with key 0x%08x", bf_i, filters[bf_i].key); +// log_debug("Starting with bit field %d with key 0x%08x", bf_i, filters[bf_i].key); while (bf_i < n_filters && matches(mp_i, filters[bf_i].key)) { log_debug("Using bit field %d with key 0x%08x, merged %d, redundant %d", bf_i, filters[bf_i].key, filters[bf_i].merged, filters[bf_i].all_ones); @@ -513,12 +513,12 @@ bool population_table_get_first_address( // neuron here. If not return false and avoid the DMA check. if (!bit_field_test( connectivity_bit_field[position], last_neuron_id)) { - log_debug("Tested and was not set"); +// log_debug("Tested and was not set"); bit_field_filtered_packets += 1; items_to_go = 0; return false; } - log_debug("Was set, carrying on"); +// log_debug("Was set, carrying on"); } else { log_debug("Bit field was not set up. " "either its due to a lack of DTCM, or because the " From 08f377e9c7653cfc561f9b0b76a0926437caadc4 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 22 Sep 2022 16:48:22 +0100 Subject: [PATCH 154/198] Delete test no longer in master, go back to spinncer delay tics values --- .../splitter_components/abstract_spynnaker_splitter_delay.py | 2 +- spynnaker/pyNN/utilities/constants.py | 4 ++-- unittests/model_tests/neuron/test_synaptic_manager.py | 2 -- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py index 2dbd2fc872..2a78e4b82b 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py @@ -28,7 +28,7 @@ class AbstractSpynnakerSplitterDelay(object, metaclass=AbstractBase): __slots__ = [] # max delays supported by a slice split machine vertex - MAX_SUPPORTED_DELAY_TICS = 16 # 64 # should this be 64? + MAX_SUPPORTED_DELAY_TICS = 64 # can this be 16? def max_support_delay(self): """ diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index a9c09d4a9c..b92ae89dc1 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -57,8 +57,8 @@ SCALE = WEIGHT_FLOAT_TO_FIXED_SCALE * NA_TO_PA_SCALE #: natively supported delays for all abstract_models -MAX_SUPPORTED_DELAY_TICS = 16 # 64 -MAX_DELAY_BLOCKS = 16 # 64 +MAX_SUPPORTED_DELAY_TICS = 64 +MAX_DELAY_BLOCKS = 64 DELAY_MASK = (1 << int(math.log2(MAX_SUPPORTED_DELAY_TICS))) - 1 MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index 75c6c9ec53..554bd24ed7 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -411,8 +411,6 @@ def test_set_synapse_dynamics(): # Both delayed and undelayed, some delayed edges don't exist # (app keys work because all undelayed exist) (range(10), [4, 5, 6, 7], 1000, 100, True, 100), - # Should work but number of neurons don't work out - (range(5), [], 10000, 2048, False, None), # Should work but number of cores doesn't work out (range(2000), [], 10000, 5, False, None), # Should work but number of neurons with delays don't work out From 0418d8ae16f2b1f3f99652b277b2dc9e325111ac Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 3 Oct 2022 12:48:29 +0100 Subject: [PATCH 155/198] Attempt to catch possible issues with ring buffer DTCM for longer delays --- .../splitter_abstract_pop_vertex_fixed.py | 24 +++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py index 00f23aa98c..d20f23a7fe 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py @@ -20,6 +20,7 @@ AbstractSplitterCommon) from pacman.utilities.algorithm_utilities\ .partition_algorithm_utilities import get_multidimensional_slices +from pacman.utilities.utility_calls import get_n_bits_for_fields from spynnaker.pyNN.models.neuron import ( AbstractPopulationVertex, PopulationMachineVertex, PopulationMachineLocalOnlyCombinedVertex, LocalOnlyProvenance) @@ -38,6 +39,12 @@ from spynnaker.pyNN.models.neuron.local_only import AbstractLocalOnly from collections import defaultdict from spynnaker.pyNN.models.utility_models.delays import DelayExtensionVertex +from spynnaker.pyNN.utilities.utility_calls import get_n_bits +from spynnaker.pyNN.exceptions import SynapticConfigurationException + +# The maximum number of bits for the ring buffer index that are likely to +# fit in DTCM (14-bits = 16,384 16-bit ring buffer entries = 32Kb DTCM +MAX_RING_BUFFER_BITS = 14 class SplitterAbstractPopulationVertexFixed( @@ -97,6 +104,23 @@ def set_governed_app_vertex(self, app_vertex): @overrides(AbstractSplitterCommon.create_machine_vertices) def create_machine_vertices(self, chip_counter): app_vertex = self._governed_app_vertex + + # Do some checks to make sure everything is likely to fit + field_sizes = [ + min(max_atoms, n) for max_atoms, n in zip( + app_vertex.get_max_atoms_per_dimension_per_core(), + app_vertex.atoms_shape)] + n_atom_bits = get_n_bits_for_fields(field_sizes) + n_synapse_types = app_vertex.neuron_impl.get_n_synapse_types() + if (n_atom_bits + get_n_bits(n_synapse_types) + + get_n_bits(self.max_support_delay())) > MAX_RING_BUFFER_BITS: + raise SynapticConfigurationException( + "The combination of the number of neurons per core ({}), " + "the number of synapse types ({}), and the maximum delay per " + "core ({}) will require too much DTCM. Please reduce one or " + "more of these values.".format( + field_sizes, n_synapse_types, self.max_support_delay())) + app_vertex.synapse_recorder.add_region_offset( len(app_vertex.neuron_recorder.get_recordable_variables())) From 10d430c40fa6c451623fdad77ce9db70bbeb3763 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 4 Oct 2022 16:40:19 +0100 Subject: [PATCH 156/198] Adjust DELAY_MASK calculations --- .../models/neuron/synapse_dynamics/synapse_dynamics_static.py | 4 ++-- spynnaker/pyNN/utilities/constants.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index 85666a05ca..7dc657f9d8 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -170,8 +170,8 @@ def read_static_synaptic_data( connections["target"] = ( (data & neuron_id_mask) + post_vertex_slice.lo_atom) connections["weight"] = (data >> 16) & 0xFFFF - connections["delay"] = (data >> (n_neuron_id_bits + - n_synapse_type_bits)) & DELAY_MASK + connections["delay"] = ((data & OxFFF) >> ( + n_neuron_id_bits + n_synapse_type_bits)) & DELAY_MASK connections["delay"][connections["delay"] == 0] = 16 return connections diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index b92ae89dc1..43cf81753e 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -58,9 +58,9 @@ #: natively supported delays for all abstract_models MAX_SUPPORTED_DELAY_TICS = 64 -MAX_DELAY_BLOCKS = 64 +# MAX_DELAY_BLOCKS = 64 DELAY_MASK = (1 << int(math.log2(MAX_SUPPORTED_DELAY_TICS))) - 1 -MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 +# MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 #: the minimum supported delay slot between two neurons MIN_SUPPORTED_DELAY = 1 From 91ea67d0a18f46e1d75c9b5b4f00098775320a01 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 4 Oct 2022 16:57:24 +0100 Subject: [PATCH 157/198] Revert "Adjust DELAY_MASK calculations" This reverts commit 10d430c40fa6c451623fdad77ce9db70bbeb3763. --- .../models/neuron/synapse_dynamics/synapse_dynamics_static.py | 4 ++-- spynnaker/pyNN/utilities/constants.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py index 7dc657f9d8..85666a05ca 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py @@ -170,8 +170,8 @@ def read_static_synaptic_data( connections["target"] = ( (data & neuron_id_mask) + post_vertex_slice.lo_atom) connections["weight"] = (data >> 16) & 0xFFFF - connections["delay"] = ((data & OxFFF) >> ( - n_neuron_id_bits + n_synapse_type_bits)) & DELAY_MASK + connections["delay"] = (data >> (n_neuron_id_bits + + n_synapse_type_bits)) & DELAY_MASK connections["delay"][connections["delay"] == 0] = 16 return connections diff --git a/spynnaker/pyNN/utilities/constants.py b/spynnaker/pyNN/utilities/constants.py index 43cf81753e..b92ae89dc1 100644 --- a/spynnaker/pyNN/utilities/constants.py +++ b/spynnaker/pyNN/utilities/constants.py @@ -58,9 +58,9 @@ #: natively supported delays for all abstract_models MAX_SUPPORTED_DELAY_TICS = 64 -# MAX_DELAY_BLOCKS = 64 +MAX_DELAY_BLOCKS = 64 DELAY_MASK = (1 << int(math.log2(MAX_SUPPORTED_DELAY_TICS))) - 1 -# MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 +MAX_TIMER_TICS_SUPPORTED_PER_BLOCK = 16 #: the minimum supported delay slot between two neurons MIN_SUPPORTED_DELAY = 1 From 6b23dbe2630580e5a44565dc49ef237bb65f319e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 18 Nov 2022 17:20:22 +0000 Subject: [PATCH 158/198] Comment out unnecessary printing --- neural_modelling/src/neuron/synapses.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index b6fbcd794b..267d505360 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -296,10 +296,10 @@ bool synapses_initialise( // read in min_weights spin1_memcpy(min_weights, params->min_weights, min_weights_bytes); *min_weights_out = min_weights; - for (uint32_t s = 0; s < n_synapse_types; s++) { - log_info("synapse initialise, min_weights_out[%u] = %k", - s, min_weights_out[s]); - } +// for (uint32_t s = 0; s < n_synapse_types; s++) { +// log_info("synapse initialise, min_weights_out[%u] = %k", +// s, min_weights_out[s]); +// } synapse_type_index_bits = log_n_neurons + log_n_synapse_types; synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; From 6261369300c7aac614fe3929588dc304e9647c37 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 18 Nov 2022 17:24:51 +0000 Subject: [PATCH 159/198] flake8 sort out imports --- .../pyNN/models/neuron/abstract_population_vertex.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 04fa881bcb..60a05f022d 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -50,15 +50,11 @@ from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.models.common import NeuronRecorder from spynnaker.pyNN.models.abstract_models import ( - AbstractAcceptsIncomingSynapses, AbstractMaxSpikes, HasSynapses, - SupportsStructure) + AbstractAcceptsIncomingSynapses, HasSynapses, SupportsStructure) +from spynnaker.pyNN.exceptions import SynapticConfigurationException from spynnaker.pyNN.utilities.utility_calls import float_gcd -# from spynnaker.pyNN.utilities.constants import ( -# POSSION_SIGMA_SUMMATION_LIMIT) -# from spynnaker.pyNN.utilities.running_stats import RunningStats from spynnaker.pyNN.models.neuron.synapse_dynamics import ( AbstractSDRAMSynapseDynamics, AbstractSynapseDynamicsStructural) -# AbstractSupportsSignedWeights) from spynnaker.pyNN.models.neuron.local_only import AbstractLocalOnly from spynnaker.pyNN.models.neuron.synapse_dynamics import SynapseDynamicsStatic from spynnaker.pyNN.utilities.utility_calls import create_mars_kiss_seeds From b205190ea7f0a0a96b9740823da9c6e800f0618e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 18 Nov 2022 17:48:58 +0000 Subject: [PATCH 160/198] Reduce ITCM further... --- neural_modelling/src/neuron/synapses.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 267d505360..358911513e 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -329,7 +329,7 @@ bool synapses_initialise( } *ring_buffers_out = ring_buffers; - log_info("Ready to process synapses for %u neurons with %u synapse types", + log_debug("Ready to process synapses for %u neurons with %u synapse types", n_neurons, n_synapse_types); return true; From 7ecb575f7f1137e8affa01bc5722e8d99038a34c Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 18 Nov 2022 18:12:13 +0000 Subject: [PATCH 161/198] See if this helps with ITCM --- neural_modelling/src/neuron/current_sources/current_source.h | 2 +- neural_modelling/src/neuron/neuron.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/neural_modelling/src/neuron/current_sources/current_source.h b/neural_modelling/src/neuron/current_sources/current_source.h index 748df60fd1..c0f327784c 100644 --- a/neural_modelling/src/neuron/current_sources/current_source.h +++ b/neural_modelling/src/neuron/current_sources/current_source.h @@ -151,7 +151,7 @@ static bool current_source_load_parameters(address_t cs_address) { // Avoid the loops if no current sources #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) - io_printf(IO_BUF, "no current sources defined \n"); +// io_printf(IO_BUF, "no current sources defined \n"); return true; #else diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index 33b83f6fa0..58fe6523f5 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -148,7 +148,7 @@ bool neuron_initialise( current_source_address = current_sources_address; saved_initial_values_address = initial_values_address; - log_info("\t n_neurons = %u, peak %u, n_synapse_types %u", + log_debug("\t n_neurons = %u, peak %u, n_synapse_types %u", n_neurons, n_neurons_peak, n_synapse_types); // Call the neuron implementation initialise function to setup DTCM etc. From 2022748f17c1588a2b1d0989067ba6a6ba3ce2c8 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 10:36:18 +0000 Subject: [PATCH 162/198] Reduce ITCM by using min_weight reciprocal --- .../src/neuron/c_main_synapse_common.h | 5 ++-- .../stdp/synapse_dynamics_stdp_common.h | 4 ++-- ...dynamics_stdp_izhikevich_neuromodulation.c | 4 ++-- .../stdp/synapse_dynamics_stdp_mad_impl.c | 4 ++-- .../stdp/weight_dependence/weight.h | 2 +- .../weight_additive_one_term_impl.c | 13 +++++++--- .../weight_additive_one_term_impl.h | 5 +++- .../weight_additive_two_term_impl.c | 9 ++++++- .../weight_additive_two_term_impl.h | 5 +++- .../weight_multiplicative_impl.c | 9 ++++++- .../weight_multiplicative_impl.h | 6 +++-- .../src/neuron/plasticity/synapse_dynamics.h | 2 +- .../plasticity/synapse_dynamics_static_impl.c | 3 ++- neural_modelling/src/neuron/synapses.c | 24 +++++++++++++------ neural_modelling/src/neuron/synapses.h | 2 +- .../neuron/abstract_population_vertex.py | 3 ++- .../neuron/population_machine_synapses.py | 4 +++- 17 files changed, 74 insertions(+), 30 deletions(-) diff --git a/neural_modelling/src/neuron/c_main_synapse_common.h b/neural_modelling/src/neuron/c_main_synapse_common.h index 6fe224086e..560558588b 100644 --- a/neural_modelling/src/neuron/c_main_synapse_common.h +++ b/neural_modelling/src/neuron/c_main_synapse_common.h @@ -107,11 +107,12 @@ static inline bool initialise_synapse_regions( uint32_t *n_recording_regions_used) { // Set up the synapses REAL *min_weights; + REAL *min_weights_recip; uint32_t n_neurons; uint32_t n_synapse_types; if (!synapses_initialise( data_specification_get_region(regions.synapse_params, ds_regions), - &n_neurons, &n_synapse_types, ring_buffers, &min_weights, + &n_neurons, &n_synapse_types, ring_buffers, &min_weights, &min_weights_recip, clear_input_buffer_of_late_packets, incoming_spike_buffer_size)) { return false; @@ -127,7 +128,7 @@ static inline bool initialise_synapse_regions( // Set up the synapse dynamics if (!synapse_dynamics_initialise( data_specification_get_region(regions.synapse_dynamics, ds_regions), - n_neurons, n_synapse_types, min_weights)) { + n_neurons, n_synapse_types, min_weights, min_weights_recip)) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h index 88aefe2b7d..adba95d8d3 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h @@ -113,7 +113,7 @@ static uint32_t plastic_saturation_count = 0; static inline bool synapse_dynamics_stdp_init( address_t *address, stdp_params *params, uint32_t n_synapse_types, - REAL *min_weights) { + REAL *min_weights, REAL *min_weights_recip) { // Load parameters stdp_params *sdram_params = (stdp_params *) *address; @@ -128,7 +128,7 @@ static inline bool synapse_dynamics_stdp_init( // Load weight dependence data address_t weight_result = weight_initialise( - weight_region_address, n_synapse_types, min_weights); + weight_region_address, n_synapse_types, min_weights, min_weights_recip); if (weight_result == NULL) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c index 1a64672246..cfe694753c 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c @@ -279,10 +279,10 @@ static inline nm_final_state_t izhikevich_neuromodulation_plasticity_update_syna bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - REAL *min_weights) { + REAL *min_weights, REAL *min_weights_recip) { if (!synapse_dynamics_stdp_init( - &address, ¶ms, n_synapse_types, min_weights)) { + &address, ¶ms, n_synapse_types, min_weights, min_weights_recip)) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c index 4a5b2ba4b5..245452ac49 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c @@ -103,10 +103,10 @@ static inline final_state_t plasticity_update_synapse( bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - REAL *min_weights) { + REAL *min_weights, REAL *min_weights_recip) { if (!synapse_dynamics_stdp_init( - &address, ¶ms, n_synapse_types, min_weights)) { + &address, ¶ms, n_synapse_types, min_weights, min_weights_recip)) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h index 19020ec760..4928c4883e 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h @@ -46,7 +46,7 @@ * \return the end of the weight region as an absolute SDRAM memory address. */ address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights); + address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip); /*! * \brief Gets the initial weight state. diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c index c0da700c30..32216b226e 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c @@ -27,6 +27,7 @@ plasticity_weight_region_data_t *plasticity_weight_region_data; //! Plasticity min_weight array, in DTCM REAL *min_weight; +REAL *min_weight_recip; //! \brief How the configuration data for additive_one_term is laid out in //! SDRAM. The layout is an array of these. @@ -41,7 +42,7 @@ typedef struct { // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights) { + address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip) { log_debug("weight_initialise: starting"); log_debug("\tSTDP additive one-term weight dependence"); // Copy plasticity region data from address @@ -60,6 +61,11 @@ address_t weight_initialise( log_error("Could not initialise weight region data"); return NULL; } + min_weight_recip = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight_recip == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } for (uint32_t s = 0; s < n_synapse_types; s++, config++) { dtcm_copy[s].min_weight = config->min_weight; @@ -68,10 +74,11 @@ address_t weight_initialise( dtcm_copy[s].a2_minus = config->a2_minus; min_weight[s] = min_weights[s]; + min_weight_recip[s] = min_weights_recip[s]; - log_debug("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k min_weight %k", + log_info("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k min_weight %k recip %k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, - dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, min_weight[s]); + dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, min_weight[s], min_weight_recip[s]); } // Return end address of region diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h index 541a85155b..ad35f34c43 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.h @@ -44,6 +44,7 @@ typedef struct { accum weight; //!< The starting weight REAL min_weight; //!< Min weight + REAL min_weight_recip; //!< Min weight //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; @@ -63,6 +64,7 @@ typedef struct { static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; extern REAL *min_weight; + extern REAL *min_weight_recip; uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); uint64_t w = (uint64_t) (weight); @@ -72,6 +74,7 @@ static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse return (weight_state_t) { .weight = s1615_weight, .min_weight = min_weight[synapse_type], + .min_weight_recip = min_weight_recip[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -107,7 +110,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { - return (weight_t) (bitsk(state.weight) / bitsk(state.min_weight)); + return (weight_t) (state.weight * state.min_weight_recip); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c index 359f05d4e7..7097439017 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c @@ -38,12 +38,13 @@ typedef struct { //! Plasticity min_weight array, in DTCM REAL *min_weight; +REAL *min_weight_recip; //--------------------------------------- // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights) { + address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip) { log_debug("weight_initialise: starting"); log_debug("\tSTDP additive two-term weight dependance"); // Copy plasticity region data from address @@ -64,6 +65,11 @@ address_t weight_initialise( log_error("Could not initialise weight region data"); return NULL; } + min_weight_recip = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight_recip == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } for (uint32_t s = 0; s < n_synapse_types; s++, config++) { dtcm_copy[s].min_weight = config->min_weight; @@ -74,6 +80,7 @@ address_t weight_initialise( dtcm_copy[s].a3_minus = config->a3_minus; min_weight[s] = min_weights[s]; + min_weight_recip[s] = min_weights_recip[s]; log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k" " A3+:%d, A3-:%d", diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h index ffcf82c9f0..d54d6d4d3c 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.h @@ -46,6 +46,7 @@ typedef struct weight_state_t { accum weight; //!< The weight REAL min_weight; //!< The min_weight + REAL min_weight_recip; //!< The min_weight //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; @@ -65,6 +66,7 @@ typedef struct weight_state_t { static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; extern REAL *min_weight; + extern REAL *min_weight_recip; uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); uint64_t w = (uint64_t) (weight); @@ -74,6 +76,7 @@ static inline weight_state_t weight_get_initial(weight_t weight, index_t synapse return (weight_state_t) { .weight = s1615_weight, .min_weight = min_weight[synapse_type], + .min_weight_recip = min_weight_recip[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -113,7 +116,7 @@ static inline weight_state_t weight_two_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { - return (weight_t) (bitsk(state.weight) / bitsk(state.min_weight)); + return (weight_t) (state.weight * state.min_weight_recip); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c index 4e76aca278..8cecf3be10 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c @@ -27,6 +27,7 @@ plasticity_weight_region_data_t *plasticity_weight_region_data; //! Plasticity min_weight array, in DTCM REAL *min_weight; +REAL *min_weight_recip; //! \brief How the configuration data for multiplicative is laid out in SDRAM. //! The layout is an array of these. @@ -42,7 +43,7 @@ typedef struct { //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights) { + address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip) { log_debug("weight_initialise: starting"); log_debug("\tSTDP multiplicative weight dependence"); // Copy plasticity region data from address @@ -59,6 +60,11 @@ address_t weight_initialise( log_error("Could not initialise weight region data"); return NULL; } + min_weight_recip = spin1_malloc(sizeof(REAL) * n_synapse_types); + if (min_weight_recip == NULL) { + log_error("Could not initialise weight region data"); + return NULL; + } multiplicative_config_t *config = (multiplicative_config_t *) address; for (uint32_t s = 0; s < n_synapse_types; s++, config++) { @@ -69,6 +75,7 @@ address_t weight_initialise( dtcm_copy[s].a2_minus = config->a2_minus; min_weight[s] = min_weights[s]; + min_weight_recip[s] = min_weights_recip[s]; log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h index 66859fe428..427629b4a8 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.h @@ -44,6 +44,7 @@ typedef struct { accum weight; //!< The current weight REAL min_weight; //!< The min weight + REAL min_weight_recip; //!< The min weight //! Reference to the configuration data const plasticity_weight_region_data_t *weight_region; @@ -70,6 +71,7 @@ static inline weight_state_t weight_get_initial( weight_t weight, index_t synapse_type) { extern plasticity_weight_region_data_t *plasticity_weight_region_data; extern REAL *min_weight; + extern REAL *min_weight_recip; uint64_t mw = (uint64_t) bitsk(min_weight[synapse_type]); uint64_t w = (uint64_t) (weight); @@ -79,6 +81,7 @@ static inline weight_state_t weight_get_initial( return (weight_state_t) { .weight = s1615_weight, .min_weight = min_weight[synapse_type], + .min_weight_recip = min_weight_recip[synapse_type], .weight_region = &plasticity_weight_region_data[synapse_type] }; } @@ -124,8 +127,7 @@ static inline weight_state_t weight_one_term_apply_potentiation( * \return The new weight. */ static inline weight_t weight_get_final(weight_state_t state) { -// log_info("\tnew_weight:%d\n", state.weight); - return (weight_t) (bitsk(state.weight) / bitsk(state.min_weight)); + return (weight_t) (state.weight * state.min_weight_recip); } static inline void weight_decay(weight_state_t *state, int32_t decay) { diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h index 627a5f1748..13c68bbbe6 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h @@ -34,7 +34,7 @@ //! \return Whether the initialisation succeeded. bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - REAL *min_weights); + REAL *min_weights, REAL *min_weights_recip); //! \brief Process the dynamics of the synapses //! \param[in,out] plastic_region_data: Where the plastic data is diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c index c8a1d388b0..9b38767a63 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c @@ -29,7 +29,8 @@ bool synapse_dynamics_initialise( UNUSED address_t address, UNUSED uint32_t n_neurons, - UNUSED uint32_t n_synapse_types, UNUSED REAL *min_weights) { + UNUSED uint32_t n_synapse_types, UNUSED REAL *min_weights, + UNUSED REAL *min_weights_recip) { return true; } diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 358911513e..9532e6c1ff 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -49,8 +49,10 @@ static uint32_t ring_buffer_size; //! Ring buffer mask static uint32_t ring_buffer_mask; -// The weight value represented by the LSB of a weight +// The minimum of the weight value represented by the LSB of a weight, +// and the recioprocal of these minimum values static REAL *min_weights; +static REAL *min_weights_recip; //! \brief Number of bits needed for the synapse type and index //! \details @@ -263,14 +265,14 @@ struct synapse_params { uint32_t log_max_delay; uint32_t drop_late_packets; uint32_t incoming_spike_buffer_size; - REAL min_weights[]; + REAL min_weights_recip[]; // this is min_weight followed by the reciprocals }; /* INTERFACE FUNCTIONS */ bool synapses_initialise( address_t synapse_params_address, uint32_t *n_neurons_out, uint32_t *n_synapse_types_out, - weight_t **ring_buffers_out, REAL **min_weights_out, + weight_t **ring_buffers_out, REAL **min_weights_out, REAL **min_weights_recip_out, bool* clear_input_buffers_of_late_packets_init, uint32_t *incoming_spike_buffer_size) { struct synapse_params *params = (struct synapse_params *) synapse_params_address; @@ -292,13 +294,21 @@ bool synapses_initialise( log_error("Not enough memory to allocate min weights"); return false; } + min_weights_recip = spin1_malloc(min_weights_bytes); + if (min_weights_recip == NULL) { + log_error("Not enough memory to allocate min weights"); + return false; + } // read in min_weights - spin1_memcpy(min_weights, params->min_weights, min_weights_bytes); - *min_weights_out = min_weights; + spin1_memcpy(min_weights, params->min_weights_recip, min_weights_bytes); + spin1_memcpy(min_weights_recip, ¶ms->min_weights_recip[n_synapse_types], min_weights_bytes); + *min_weights_out = min_weights; + *min_weights_recip_out = min_weights_recip; // for (uint32_t s = 0; s < n_synapse_types; s++) { -// log_info("synapse initialise, min_weights_out[%u] = %k", -// s, min_weights_out[s]); +// log_info("synapse initialise, min_weights_out[%u] = %k, min_weights_recip_out[%u] = %k %k %k", +// s, min_weights_out[s], s, min_weights_recip_out[s], +// min_weights[s], min_weights_recip[s]); // } synapse_type_index_bits = log_n_neurons + log_n_synapse_types; diff --git a/neural_modelling/src/neuron/synapses.h b/neural_modelling/src/neuron/synapses.h index dc867fc7a5..60317d700a 100644 --- a/neural_modelling/src/neuron/synapses.h +++ b/neural_modelling/src/neuron/synapses.h @@ -91,7 +91,7 @@ static inline void synapses_print_weight( bool synapses_initialise( address_t synapse_params_address, uint32_t *n_neurons_out, uint32_t *n_synapse_types_out, - weight_t **ring_buffers_out, REAL **min_weights_out, + weight_t **ring_buffers_out, REAL **min_weights_out, REAL **min_weights_recip_out, bool* clear_input_buffers_of_late_packets_init, uint32_t *incoming_spike_buffer_size); diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 60a05f022d..eb1a4bbfee 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1166,7 +1166,8 @@ def get_synapse_params_size(self): # This will only hold ring buffer scaling for the neuron synapse # types return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES + - (BYTES_PER_WORD * self.__neuron_impl.get_n_synapse_types())) + (BYTES_PER_WORD * 2 * self.__neuron_impl.get_n_synapse_types( + ))) def get_synapse_dynamics_size(self, n_atoms): """ Get the size of the synapse dynamics region diff --git a/spynnaker/pyNN/models/neuron/population_machine_synapses.py b/spynnaker/pyNN/models/neuron/population_machine_synapses.py index ebb243a26d..1aa31df9bf 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_synapses.py +++ b/spynnaker/pyNN/models/neuron/population_machine_synapses.py @@ -200,9 +200,11 @@ def _write_synapse_parameters(self, spec, min_weights): spec.write_value(get_n_bits(max_delay)) spec.write_value(int(self._app_vertex.drop_late_spikes)) spec.write_value(self._app_vertex.incoming_spike_buffer_size) - # Write the minimum weights + # Write the minimum weights and the reciprocals (no machine division) for min_w in min_weights: spec.write_value(min_w, data_type=DataType.S1615) + for min_w in min_weights: + spec.write_value(1 / min_w, data_type=DataType.S1615) @overrides(AbstractSynapseExpandable.gen_on_machine) def gen_on_machine(self): From 2c4bfcd4525c4ac8e6d9c8cbdf22cf65332a460e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 10:42:45 +0000 Subject: [PATCH 163/198] vera line length, and change info to debug --- .../stdp/weight_dependence/weight_additive_one_term_impl.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c index 32216b226e..52b82f72db 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c @@ -76,9 +76,9 @@ address_t weight_initialise( min_weight[s] = min_weights[s]; min_weight_recip[s] = min_weights_recip[s]; - log_info("\tSynapse type %u: Min weight:%k, Max weight:%k, A2+:%k, A2-:%k min_weight %k recip %k", - s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, - dtcm_copy[s].a2_plus, dtcm_copy[s].a2_minus, min_weight[s], min_weight_recip[s]); + log_debug("\tSynapse type %u: Min w:%k, Max w:%k, A2+:%k, A2-:%k min_weight %k recip %k", + s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, + dtcm_copy[s].a2_minus, min_weight[s], min_weight_recip[s]); } // Return end address of region From 16ccb4320702555922e588312f9666d99d53e95b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 10:47:00 +0000 Subject: [PATCH 164/198] Vera line length and tidy up --- neural_modelling/src/neuron/synapses.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 9532e6c1ff..9ed344220d 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -300,15 +300,15 @@ bool synapses_initialise( return false; } - // read in min_weights + // read in min_weights and reciprocals spin1_memcpy(min_weights, params->min_weights_recip, min_weights_bytes); - spin1_memcpy(min_weights_recip, ¶ms->min_weights_recip[n_synapse_types], min_weights_bytes); + spin1_memcpy( + min_weights_recip, ¶ms->min_weights_recip[n_synapse_types], min_weights_bytes); *min_weights_out = min_weights; *min_weights_recip_out = min_weights_recip; // for (uint32_t s = 0; s < n_synapse_types; s++) { -// log_info("synapse initialise, min_weights_out[%u] = %k, min_weights_recip_out[%u] = %k %k %k", -// s, min_weights_out[s], s, min_weights_recip_out[s], -// min_weights[s], min_weights_recip[s]); +// log_info("synapse initialise, min_weights_out[%u] = %k, min_weights_recip_out[%u] = %k", +// s, min_weights_out[s], s, min_weights_recip_out[s]); // } synapse_type_index_bits = log_n_neurons + log_n_synapse_types; From a624e57a10b0e18d95a677e92db1fcaf9cb2dc9c Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 15:03:01 +0000 Subject: [PATCH 165/198] More tidying up --- .../src/neuron/models/neuron_model_lif_impl.h | 16 ++++++++-------- .../synapse_types/synapse_types_alpha_impl.h | 4 ++-- .../synapse_types/synapse_types_semd_impl.h | 12 ++++++------ 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 8f1eacc043..15ae448ff9 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -190,20 +190,20 @@ static inline state_t neuron_model_get_membrane_voltage(const neuron_t *neuron) } static inline void neuron_model_print_state_variables(const neuron_t *neuron) { - log_info("V membrane = %11.4k mv", neuron->V_membrane); - log_info("Refract timer = %u timesteps", neuron->refract_timer); + log_debug("V membrane = %11.4k mv", neuron->V_membrane); + log_debug("Refract timer = %u timesteps", neuron->refract_timer); } static inline void neuron_model_print_parameters(const neuron_t *neuron) { - log_info("V reset = %11.4k mv", neuron->V_reset); - log_info("V rest = %11.4k mv", neuron->V_rest); + log_debug("V reset = %11.4k mv", neuron->V_reset); + log_debug("V rest = %11.4k mv", neuron->V_rest); - log_info("I offset = %11.4k nA", neuron->I_offset); - log_info("R membrane = %11.4k Mohm", neuron->R_membrane); + log_debug("I offset = %11.4k nA", neuron->I_offset); + log_debug("R membrane = %11.4k Mohm", neuron->R_membrane); - log_info("exp(-ms/(RC)) = %11.4k [.]", neuron->exp_TC); + log_debug("exp(-ms/(RC)) = %11.4k [.]", neuron->exp_TC); - log_info("T refract = %u timesteps", neuron->T_refract); + log_debug("T refract = %u timesteps", neuron->T_refract); } diff --git a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h index 5c4310e886..1ac3b160cc 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h @@ -38,9 +38,9 @@ //! Number of inhibitory receptors #define NUM_INHIBITORY_RECEPTORS 1 -#include #include #include "synapse_types.h" +#include //--------------------------------------- // Synapse parameters @@ -222,7 +222,7 @@ static inline const char *synapse_types_get_type_char( //! \param[in] parameters: the pointer to the parameters to print static inline void synapse_types_print_input( synapse_types_t *parameters) { - io_printf(IO_BUF, "%12.6k - %12.6k", + log_debug("%12.6k - %12.6k", parameters->exc.lin_buff * parameters->exc.exp_buff, parameters->inh.lin_buff * parameters->inh.exp_buff); } diff --git a/neural_modelling/src/neuron/synapse_types/synapse_types_semd_impl.h b/neural_modelling/src/neuron/synapse_types/synapse_types_semd_impl.h index b2e9efe584..c4b7c0cc32 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_types_semd_impl.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_types_semd_impl.h @@ -147,17 +147,17 @@ static inline void synapse_types_add_neuron_input( //! \return the excitatory input buffers for a given neuron ID. static inline input_t *synapse_types_get_excitatory_input( input_t *excitatory_response, synapse_types_t *parameters) { - if (parameters->exc2.synaptic_input_value >= 0.001 - && parameters->multiplicator == 0 - && parameters->exc2_old == 0) { + if (parameters->exc2.synaptic_input_value >= 0.001k + && parameters->multiplicator == 0.0k + && parameters->exc2_old == 0.0k) { parameters->multiplicator = parameters->exc.synaptic_input_value; - } else if (parameters->exc2.synaptic_input_value < 0.001) { - parameters->multiplicator = 0; + } else if (parameters->exc2.synaptic_input_value < 0.001k) { + parameters->multiplicator = 0.0k; } parameters->exc2_old = parameters->exc2.synaptic_input_value; - excitatory_response[0] = 0; + excitatory_response[0] = 0.0k; excitatory_response[1] = parameters->exc2.synaptic_input_value * parameters->multiplicator * parameters->scaling_factor; From f6754fe56a6bcb2d1cf28473af10b0a2fdada66f Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 15:14:36 +0000 Subject: [PATCH 166/198] Remove unneeded check in alpha synapse --- .../synapse_types/synapse_types_alpha_impl.h | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h index 1ac3b160cc..152f3d6a47 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h @@ -161,16 +161,14 @@ static inline void add_input_alpha(alpha_state_t *a_params, input_t input) { static inline void synapse_types_add_neuron_input( index_t synapse_type_index, synapse_types_t *parameters, input_t input) { - if (input > ZERO) { - switch (synapse_type_index) { - case EXCITATORY: - add_input_alpha(¶meters->exc, input); - break; - case INHIBITORY: - add_input_alpha(¶meters->inh, input); - break; - } - } + switch (synapse_type_index) { + case EXCITATORY: + add_input_alpha(¶meters->exc, input); + break; + case INHIBITORY: + add_input_alpha(¶meters->inh, input); + break; + } } //! \brief extracts the excitatory input buffers from the buffers available From a8d864b9d9c46f924d833da7748cf0fea04e59e1 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 17:11:39 +0000 Subject: [PATCH 167/198] Trying to save more ITCM --- .../timing_pfister_triplet_impl.h | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h index 39c56fc059..8ce8e3ee0f 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h @@ -77,8 +77,8 @@ static inline post_trace_t timing_decay_post( uint32_t delta_time = time - last_time; // Decay previous o1 trace - int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_trace.o1, - maths_lut_exponential_decay(delta_time, tau_minus_lookup)); + int32_t decay_minus = maths_lut_exponential_decay(delta_time, tau_minus_lookup); + int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_trace.o1, decay_minus); // If we have already added on the last spike effect, just decay // (as it's sampled BEFORE the spike), @@ -128,17 +128,17 @@ static inline pre_trace_t timing_add_pre_spike( uint32_t delta_time = time - last_time; // Decay previous r1 trace and add energy caused by new spike - int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_trace.r1, - maths_lut_exponential_decay(delta_time, tau_plus_lookup)); + int32_t decay_tau = maths_lut_exponential_decay(delta_time, tau_plus_lookup); + int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_trace.r1, decay_tau); int32_t new_r1 = decayed_r1 + STDP_FIXED_POINT_ONE; // If this is the 1st pre-synaptic event, r2 trace is zero // (as it's sampled BEFORE the spike), // otherwise, add on energy caused by last spike and decay that + int32_t decay_x = maths_lut_exponential_decay(delta_time, tau_x_lookup); int32_t new_r2 = (last_time == 0) ? 0 : STDP_FIXED_MUL_16X16( - last_trace.r2 + STDP_FIXED_POINT_ONE, - maths_lut_exponential_decay(delta_time, tau_x_lookup)); + last_trace.r2 + STDP_FIXED_POINT_ONE, decay_x); log_debug("\tdelta_time=%u, r1=%d, r2=%d\n", delta_time, new_r1, new_r2); @@ -163,8 +163,8 @@ static inline update_state_t timing_apply_pre_spike( post_trace_t last_post_trace, update_state_t previous_state) { // Get time of event relative to last post-synaptic event uint32_t time_since_last_post = time - last_post_time; - int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_post_trace.o1, - maths_lut_exponential_decay(time_since_last_post, tau_minus_lookup)); + int32_t decay_minus = maths_lut_exponential_decay(time_since_last_post, tau_minus_lookup); + int32_t decayed_o1 = STDP_FIXED_MUL_16X16(last_post_trace.o1, decay_minus); // Calculate triplet term int32_t decayed_o1_r2 = STDP_FIXED_MUL_16X16(decayed_o1, trace.r2); @@ -195,8 +195,8 @@ static inline update_state_t timing_apply_post_spike( // Get time of event relative to last pre-synaptic event uint32_t time_since_last_pre = time - last_pre_time; if (time_since_last_pre > 0) { - int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_pre_trace.r1, - maths_lut_exponential_decay(time_since_last_pre, tau_plus_lookup)); + int32_t decay_plus = maths_lut_exponential_decay(time_since_last_pre, tau_plus_lookup); + int32_t decayed_r1 = STDP_FIXED_MUL_16X16(last_pre_trace.r1, decay_plus); // Calculate triplet term int32_t decayed_r1_o2 = STDP_FIXED_MUL_16X16(decayed_r1, trace.o2); From d3d745d72fda6611fa9e0feba80e76f29a6e7f8d Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 17:36:43 +0000 Subject: [PATCH 168/198] Yet more ITCM savings... --- .../src/neuron/current_sources/current_source_ac.h | 2 +- .../neuron/current_sources/current_source_noisy.h | 2 +- .../src/neuron/current_sources/current_source_step.h | 12 +++--------- .../timing_dependence/timing_pfister_triplet_impl.h | 2 +- 4 files changed, 6 insertions(+), 12 deletions(-) diff --git a/neural_modelling/src/neuron/current_sources/current_source_ac.h b/neural_modelling/src/neuron/current_sources/current_source_ac.h index c80bab515f..483744d7d7 100644 --- a/neural_modelling/src/neuron/current_sources/current_source_ac.h +++ b/neural_modelling/src/neuron/current_sources/current_source_ac.h @@ -41,7 +41,7 @@ static bool current_source_ac_init(uint32_t n_ac_sources, uint32_t *next) { for (uint32_t n_ac=0; n_ac < n_ac_sources; n_ac++) { ac_source[n_ac] = spin1_malloc(sizeof(ac_source_t)); if (ac_source[n_ac] == NULL) { - log_error("Unable to allocate DC source parameters - out of DTCM"); + log_error("Unable to allocate AC source parameters - out of DTCM"); return false; } *next += sizeof(ac_source_t) / 4; diff --git a/neural_modelling/src/neuron/current_sources/current_source_noisy.h b/neural_modelling/src/neuron/current_sources/current_source_noisy.h index 3445c13377..19386fb912 100644 --- a/neural_modelling/src/neuron/current_sources/current_source_noisy.h +++ b/neural_modelling/src/neuron/current_sources/current_source_noisy.h @@ -42,7 +42,7 @@ static bool current_source_noisy_init(uint32_t n_noisy_sources, uint32_t *next) for (uint32_t n_noisy=0; n_noisy < n_noisy_sources; n_noisy++) { noisy_source[n_noisy] = spin1_malloc(sizeof(noisy_current_source_t)); if (noisy_source[n_noisy] == NULL) { - log_error("Unable to allocate DC source parameters - out of DTCM"); + log_error("Unable to allocate noisy source parameters - out of DTCM"); return false; } *next += sizeof(noisy_current_source_t) / 4; diff --git a/neural_modelling/src/neuron/current_sources/current_source_step.h b/neural_modelling/src/neuron/current_sources/current_source_step.h index bb8b06c9aa..1cd488e8f8 100644 --- a/neural_modelling/src/neuron/current_sources/current_source_step.h +++ b/neural_modelling/src/neuron/current_sources/current_source_step.h @@ -49,12 +49,8 @@ static bool current_source_step_init( step_cs_amps = spin1_malloc(n_step_current_sources * sizeof(uint32_t*)); step_cs_amp_last = spin1_malloc(n_step_current_sources * sizeof(REAL)); step_cs_index = spin1_malloc(n_step_current_sources * sizeof(uint32_t)); - if (step_cs_amp_last == NULL) { - log_error("Unable to allocate step current source amp last - out of DTCM"); - return false; - } if (step_cs_index == NULL) { - log_error("Unable to allocate step current source index - out of DTCM"); + log_error("Unable to allocate step current source arrays - out of DTCM"); return false; } } @@ -63,15 +59,13 @@ static bool current_source_step_init( uint32_t struct_size = (arr_len + 1) * sizeof(uint32_t); step_cs_times[n_step] = spin1_malloc(struct_size); if (step_cs_times[n_step] == NULL) { - log_error("Unable to allocate step current source times - out of DTCM", - "struct_size is %u next %u n_step %u)", struct_size, *next, n_step); + log_error("Unable to allocate step current source times - out of DTCM"); return false; } step_cs_amps[n_step] = spin1_malloc(struct_size); if (step_cs_amps[n_step] == NULL) { - log_error("Unable to allocate step current source amplitudes - out of DTCM", - "(struct_size is %u next %u n_step %u)", struct_size, *next, n_step); + log_error("Unable to allocate step current source amplitudes - out of DTCM"); return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h index 8ce8e3ee0f..3c3febc8f6 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h +++ b/neural_modelling/src/neuron/plasticity/stdp/timing_dependence/timing_pfister_triplet_impl.h @@ -84,7 +84,7 @@ static inline post_trace_t timing_decay_post( // (as it's sampled BEFORE the spike), // otherwise, add on energy caused by last spike and decay that int32_t new_o2 = 0; - int32_t next_spike_time = last_trace.last_spike_time; + uint32_t next_spike_time = last_trace.last_spike_time; if (last_trace.last_spike_time == 0) { int32_t decay = maths_lut_exponential_decay(delta_time, tau_y_lookup); new_o2 = STDP_FIXED_MUL_16X16(last_trace.o2, decay); From 514c8c8dd31c7cf9cb237633d15bff3614bedf36 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 22 Nov 2022 17:44:45 +0000 Subject: [PATCH 169/198] Saving more ITCM... --- .../src/neuron/current_sources/current_source_step.h | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/neural_modelling/src/neuron/current_sources/current_source_step.h b/neural_modelling/src/neuron/current_sources/current_source_step.h index 1cd488e8f8..2d8a390a49 100644 --- a/neural_modelling/src/neuron/current_sources/current_source_step.h +++ b/neural_modelling/src/neuron/current_sources/current_source_step.h @@ -49,11 +49,8 @@ static bool current_source_step_init( step_cs_amps = spin1_malloc(n_step_current_sources * sizeof(uint32_t*)); step_cs_amp_last = spin1_malloc(n_step_current_sources * sizeof(REAL)); step_cs_index = spin1_malloc(n_step_current_sources * sizeof(uint32_t)); - if (step_cs_index == NULL) { - log_error("Unable to allocate step current source arrays - out of DTCM"); - return false; - } } + for (uint32_t n_step=0; n_step < n_step_current_sources; n_step++) { uint32_t arr_len = (uint32_t) cs_address[*next]; uint32_t struct_size = (arr_len + 1) * sizeof(uint32_t); From aab7916df49379baa4d8f2a939d469e6a65b65e5 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 23 Nov 2022 15:04:08 +0000 Subject: [PATCH 170/198] Simplify code on machine to only split array when needed --- .../src/neuron/c_main_synapse_common.h | 7 ++++--- .../neuron/current_sources/current_source.h | 2 +- neural_modelling/src/neuron/neuron.c | 2 +- .../stdp/synapse_dynamics_stdp_common.h | 4 ++-- ...dynamics_stdp_izhikevich_neuromodulation.c | 4 ++-- .../stdp/synapse_dynamics_stdp_mad_impl.c | 4 ++-- .../stdp/weight_dependence/weight.h | 2 +- .../weight_additive_one_term_impl.c | 4 ++-- .../weight_additive_two_term_impl.c | 4 ++-- .../weight_multiplicative_impl.c | 4 ++-- .../src/neuron/plasticity/synapse_dynamics.h | 2 +- .../plasticity/synapse_dynamics_static_impl.c | 3 +-- neural_modelling/src/neuron/synapses.c | 20 +++---------------- neural_modelling/src/neuron/synapses.h | 4 ++-- .../neuron/abstract_population_vertex.py | 7 +++---- ...tion_machine_local_only_combined_vertex.py | 12 +++++------ .../neuron/population_machine_neurons.py | 2 -- .../neuron/population_machine_vertex.py | 4 ++-- .../synapse_dynamics/synapse_dynamics_stdp.py | 2 ++ 19 files changed, 39 insertions(+), 54 deletions(-) diff --git a/neural_modelling/src/neuron/c_main_synapse_common.h b/neural_modelling/src/neuron/c_main_synapse_common.h index 560558588b..8072dbe473 100644 --- a/neural_modelling/src/neuron/c_main_synapse_common.h +++ b/neural_modelling/src/neuron/c_main_synapse_common.h @@ -107,12 +107,11 @@ static inline bool initialise_synapse_regions( uint32_t *n_recording_regions_used) { // Set up the synapses REAL *min_weights; - REAL *min_weights_recip; uint32_t n_neurons; uint32_t n_synapse_types; if (!synapses_initialise( data_specification_get_region(regions.synapse_params, ds_regions), - &n_neurons, &n_synapse_types, ring_buffers, &min_weights, &min_weights_recip, + &n_neurons, &n_synapse_types, ring_buffers, &min_weights, clear_input_buffer_of_late_packets, incoming_spike_buffer_size)) { return false; @@ -125,10 +124,11 @@ static inline bool initialise_synapse_regions( row_max_n_words)) { return false; } + // Set up the synapse dynamics if (!synapse_dynamics_initialise( data_specification_get_region(regions.synapse_dynamics, ds_regions), - n_neurons, n_synapse_types, min_weights, min_weights_recip)) { + n_neurons, n_synapse_types, min_weights)) { return false; } @@ -138,5 +138,6 @@ static inline bool initialise_synapse_regions( return false; } + return true; } diff --git a/neural_modelling/src/neuron/current_sources/current_source.h b/neural_modelling/src/neuron/current_sources/current_source.h index c0f327784c..748df60fd1 100644 --- a/neural_modelling/src/neuron/current_sources/current_source.h +++ b/neural_modelling/src/neuron/current_sources/current_source.h @@ -151,7 +151,7 @@ static bool current_source_load_parameters(address_t cs_address) { // Avoid the loops if no current sources #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) -// io_printf(IO_BUF, "no current sources defined \n"); + io_printf(IO_BUF, "no current sources defined \n"); return true; #else diff --git a/neural_modelling/src/neuron/neuron.c b/neural_modelling/src/neuron/neuron.c index 58fe6523f5..33b83f6fa0 100644 --- a/neural_modelling/src/neuron/neuron.c +++ b/neural_modelling/src/neuron/neuron.c @@ -148,7 +148,7 @@ bool neuron_initialise( current_source_address = current_sources_address; saved_initial_values_address = initial_values_address; - log_debug("\t n_neurons = %u, peak %u, n_synapse_types %u", + log_info("\t n_neurons = %u, peak %u, n_synapse_types %u", n_neurons, n_neurons_peak, n_synapse_types); // Call the neuron implementation initialise function to setup DTCM etc. diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h index adba95d8d3..88aefe2b7d 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_common.h @@ -113,7 +113,7 @@ static uint32_t plastic_saturation_count = 0; static inline bool synapse_dynamics_stdp_init( address_t *address, stdp_params *params, uint32_t n_synapse_types, - REAL *min_weights, REAL *min_weights_recip) { + REAL *min_weights) { // Load parameters stdp_params *sdram_params = (stdp_params *) *address; @@ -128,7 +128,7 @@ static inline bool synapse_dynamics_stdp_init( // Load weight dependence data address_t weight_result = weight_initialise( - weight_region_address, n_synapse_types, min_weights, min_weights_recip); + weight_region_address, n_synapse_types, min_weights); if (weight_result == NULL) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c index cfe694753c..1a64672246 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_izhikevich_neuromodulation.c @@ -279,10 +279,10 @@ static inline nm_final_state_t izhikevich_neuromodulation_plasticity_update_syna bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - REAL *min_weights, REAL *min_weights_recip) { + REAL *min_weights) { if (!synapse_dynamics_stdp_init( - &address, ¶ms, n_synapse_types, min_weights, min_weights_recip)) { + &address, ¶ms, n_synapse_types, min_weights)) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c index 245452ac49..4a5b2ba4b5 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/synapse_dynamics_stdp_mad_impl.c @@ -103,10 +103,10 @@ static inline final_state_t plasticity_update_synapse( bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - REAL *min_weights, REAL *min_weights_recip) { + REAL *min_weights) { if (!synapse_dynamics_stdp_init( - &address, ¶ms, n_synapse_types, min_weights, min_weights_recip)) { + &address, ¶ms, n_synapse_types, min_weights)) { return false; } diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h index 4928c4883e..19020ec760 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight.h @@ -46,7 +46,7 @@ * \return the end of the weight region as an absolute SDRAM memory address. */ address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip); + address_t address, uint32_t n_synapse_types, REAL *min_weights); /*! * \brief Gets the initial weight state. diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c index 52b82f72db..d951ca0abd 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_one_term_impl.c @@ -42,7 +42,7 @@ typedef struct { // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip) { + address_t address, uint32_t n_synapse_types, REAL *min_weights) { log_debug("weight_initialise: starting"); log_debug("\tSTDP additive one-term weight dependence"); // Copy plasticity region data from address @@ -74,7 +74,7 @@ address_t weight_initialise( dtcm_copy[s].a2_minus = config->a2_minus; min_weight[s] = min_weights[s]; - min_weight_recip[s] = min_weights_recip[s]; + min_weight_recip[s] = min_weights[s+n_synapse_types]; log_debug("\tSynapse type %u: Min w:%k, Max w:%k, A2+:%k, A2-:%k min_weight %k recip %k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, dtcm_copy[s].a2_plus, diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c index 7097439017..57e8c75c99 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_additive_two_term_impl.c @@ -44,7 +44,7 @@ REAL *min_weight_recip; // Functions //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip) { + address_t address, uint32_t n_synapse_types, REAL *min_weights) { log_debug("weight_initialise: starting"); log_debug("\tSTDP additive two-term weight dependance"); // Copy plasticity region data from address @@ -80,7 +80,7 @@ address_t weight_initialise( dtcm_copy[s].a3_minus = config->a3_minus; min_weight[s] = min_weights[s]; - min_weight_recip[s] = min_weights_recip[s]; + min_weight_recip[s] = min_weights[s+n_synapse_types]; log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k" " A3+:%d, A3-:%d", diff --git a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c index 8cecf3be10..0eba66bfb9 100644 --- a/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c +++ b/neural_modelling/src/neuron/plasticity/stdp/weight_dependence/weight_multiplicative_impl.c @@ -43,7 +43,7 @@ typedef struct { //--------------------------------------- address_t weight_initialise( - address_t address, uint32_t n_synapse_types, REAL *min_weights, REAL *min_weights_recip) { + address_t address, uint32_t n_synapse_types, REAL *min_weights) { log_debug("weight_initialise: starting"); log_debug("\tSTDP multiplicative weight dependence"); // Copy plasticity region data from address @@ -75,7 +75,7 @@ address_t weight_initialise( dtcm_copy[s].a2_minus = config->a2_minus; min_weight[s] = min_weights[s]; - min_weight_recip[s] = min_weights_recip[s]; + min_weight_recip[s] = min_weights[s+n_synapse_types]; log_debug("\tSynapse type %u: Min weight:%d, Max weight:%d, A2+:%d, A2-:%d, min_weight %k", s, dtcm_copy[s].min_weight, dtcm_copy[s].max_weight, diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h index 13c68bbbe6..627a5f1748 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics.h +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics.h @@ -34,7 +34,7 @@ //! \return Whether the initialisation succeeded. bool synapse_dynamics_initialise( address_t address, uint32_t n_neurons, uint32_t n_synapse_types, - REAL *min_weights, REAL *min_weights_recip); + REAL *min_weights); //! \brief Process the dynamics of the synapses //! \param[in,out] plastic_region_data: Where the plastic data is diff --git a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c index 9b38767a63..c8a1d388b0 100644 --- a/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c +++ b/neural_modelling/src/neuron/plasticity/synapse_dynamics_static_impl.c @@ -29,8 +29,7 @@ bool synapse_dynamics_initialise( UNUSED address_t address, UNUSED uint32_t n_neurons, - UNUSED uint32_t n_synapse_types, UNUSED REAL *min_weights, - UNUSED REAL *min_weights_recip) { + UNUSED uint32_t n_synapse_types, UNUSED REAL *min_weights) { return true; } diff --git a/neural_modelling/src/neuron/synapses.c b/neural_modelling/src/neuron/synapses.c index 9ed344220d..65e30bf8e3 100644 --- a/neural_modelling/src/neuron/synapses.c +++ b/neural_modelling/src/neuron/synapses.c @@ -49,10 +49,8 @@ static uint32_t ring_buffer_size; //! Ring buffer mask static uint32_t ring_buffer_mask; -// The minimum of the weight value represented by the LSB of a weight, -// and the recioprocal of these minimum values +// The minimum weight value, and the reciprocal of these minimum values static REAL *min_weights; -static REAL *min_weights_recip; //! \brief Number of bits needed for the synapse type and index //! \details @@ -272,7 +270,7 @@ struct synapse_params { bool synapses_initialise( address_t synapse_params_address, uint32_t *n_neurons_out, uint32_t *n_synapse_types_out, - weight_t **ring_buffers_out, REAL **min_weights_out, REAL **min_weights_recip_out, + weight_t **ring_buffers_out, REAL **min_weights_out, bool* clear_input_buffers_of_late_packets_init, uint32_t *incoming_spike_buffer_size) { struct synapse_params *params = (struct synapse_params *) synapse_params_address; @@ -288,28 +286,16 @@ bool synapses_initialise( uint32_t log_max_delay = params->log_max_delay; // Set up min_weights - uint32_t min_weights_bytes = n_synapse_types * sizeof(REAL); + uint32_t min_weights_bytes = 2 * n_synapse_types * sizeof(REAL); min_weights = spin1_malloc(min_weights_bytes); if (min_weights == NULL) { log_error("Not enough memory to allocate min weights"); return false; } - min_weights_recip = spin1_malloc(min_weights_bytes); - if (min_weights_recip == NULL) { - log_error("Not enough memory to allocate min weights"); - return false; - } // read in min_weights and reciprocals spin1_memcpy(min_weights, params->min_weights_recip, min_weights_bytes); - spin1_memcpy( - min_weights_recip, ¶ms->min_weights_recip[n_synapse_types], min_weights_bytes); *min_weights_out = min_weights; - *min_weights_recip_out = min_weights_recip; -// for (uint32_t s = 0; s < n_synapse_types; s++) { -// log_info("synapse initialise, min_weights_out[%u] = %k, min_weights_recip_out[%u] = %k", -// s, min_weights_out[s], s, min_weights_recip_out[s]); -// } synapse_type_index_bits = log_n_neurons + log_n_synapse_types; synapse_type_index_mask = (1 << synapse_type_index_bits) - 1; diff --git a/neural_modelling/src/neuron/synapses.h b/neural_modelling/src/neuron/synapses.h index 60317d700a..61f82a3bcb 100644 --- a/neural_modelling/src/neuron/synapses.h +++ b/neural_modelling/src/neuron/synapses.h @@ -90,8 +90,8 @@ static inline void synapses_print_weight( //! \return True if successfully initialised. False otherwise. bool synapses_initialise( address_t synapse_params_address, - uint32_t *n_neurons_out, uint32_t *n_synapse_types_out, - weight_t **ring_buffers_out, REAL **min_weights_out, REAL **min_weights_recip_out, + uint32_t *n_neurons, uint32_t *n_synapse_types, + weight_t **ring_buffers, REAL **min_weights, bool* clear_input_buffers_of_late_packets_init, uint32_t *incoming_spike_buffer_size); diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index eb1a4bbfee..bb3e7b932b 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -168,7 +168,7 @@ class AbstractPopulationVertex( _SYNAPSE_BASE_N_CPU_CYCLES = 10 # Elements before the start of global parameters - # 1. has key, 2. key, 3. n atoms, 4. n_atoms_peak 5. n_colour_bits + # 1 has key 2 n atoms 3 n_atoms_peak 4 n_colour_bits 5 n_synapse_types CORE_PARAMS_BASE_SIZE = 5 * BYTES_PER_WORD def __init__( @@ -1165,9 +1165,8 @@ def get_synapse_params_size(self): """ # This will only hold ring buffer scaling for the neuron synapse # types - return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES + - (BYTES_PER_WORD * 2 * self.__neuron_impl.get_n_synapse_types( - ))) + return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES + ( + BYTES_PER_WORD * 2 * self.__neuron_impl.get_n_synapse_types())) def get_synapse_dynamics_size(self, n_atoms): """ Get the size of the synapse dynamics region diff --git a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py index d4f9cbe0ff..d4a793e0c7 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_local_only_combined_vertex.py @@ -66,7 +66,7 @@ class PopulationMachineLocalOnlyCombinedVertex( __slots__ = [ "__key", - "__ring_buffer_shifts", + "__min_weights", "__weight_scales", "__slice_index", "__neuron_data", @@ -123,7 +123,7 @@ class REGIONS(Enum): def __init__( self, sdram, label, app_vertex, vertex_slice, slice_index, - ring_buffer_shifts, weight_scales, neuron_data, + min_weights, weight_scales, neuron_data, max_atoms_per_core): """ :param ~pacman.model.resources.AbstractSDRAM sdram: @@ -135,8 +135,8 @@ def __init__( The slice of the population that this implements :param int slice_index: The index of the slice in the ordered list of slices - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values + :param list(int) min_weights: + The min_weights used in the calculations :param list(int) weight_scales: The scaling to apply to weights to store them in the synapses :param int all_syn_block_sz: The maximum size of the synapses in bytes @@ -154,7 +154,7 @@ def __init__( self._PROFILE_TAG_LABELS, self.__get_binary_file_name(app_vertex)) self.__key = None self.__slice_index = slice_index - self.__ring_buffer_shifts = ring_buffer_shifts + self.__min_weights = min_weights self.__weight_scales = weight_scales self.__neuron_data = neuron_data self.__max_atoms_per_core = max_atoms_per_core @@ -252,7 +252,7 @@ def generate_data_specification(self, spec, placement): self.vertex_slice)) self._write_common_data_spec(spec, rec_regions) - self._write_neuron_data_spec(spec, self.__ring_buffer_shifts) + self._write_neuron_data_spec(spec, self.__min_weights) self.__write_local_only_data(spec) diff --git a/spynnaker/pyNN/models/neuron/population_machine_neurons.py b/spynnaker/pyNN/models/neuron/population_machine_neurons.py index be39e8791f..ae10ef26cb 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_neurons.py +++ b/spynnaker/pyNN/models/neuron/population_machine_neurons.py @@ -180,8 +180,6 @@ def _rewrite_neuron_data_spec(self, spec): :param ~data_specification.DataSpecificationGenerator spec: The data specification to write to - :param list(int) ring_buffer_shifts: - The shifts to apply to convert ring buffer values to S1615 values """ # Write the current source parameters diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index ddc735f626..46fe391db9 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -312,8 +312,8 @@ def regenerate_data_specification(self, spec, placement): if self.__regenerate_synapse_data: self._write_synapse_data_spec( - spec, self.__ring_buffer_shifts, - self.__min_weights, self.__structural_sz) + spec, self.__min_weights, + self.__weight_scales, self.__structural_sz) self.__regenerate_synapse_data = False # close spec diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 6dc9d40e7c..8b4d5a086c 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -516,7 +516,9 @@ def __get_weight_min_delta(self, max_stdp_spike_delta): def calculate_min_weight(self, min_weights, max_stdp_spike_delta, weight_scale, conn_weight_min, synapse_type): min_delta = self.__get_weight_min_delta(max_stdp_spike_delta) + print("min_delta ", min_delta) min_delta *= weight_scale + print("scaled ", min_delta, synapse_type, conn_weight_min, min_weights) if min_delta is not None and min_delta != 0: # This also depends on the earlier calculated minimum min_delta = float_gcd(min_delta, conn_weight_min) From de09312725295cd46253399af21d71ddf976ba5c Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 24 Nov 2022 08:56:54 +0000 Subject: [PATCH 171/198] Add missing check to _get_weight_maximum --- .../neural_projections/connectors/abstract_connector.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 45a78d518f..9a9c5b0c34 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -343,6 +343,14 @@ def _get_weight_maximum(self, weights, n_connections, synapse_info): return numpy.max(_expr_context.eval(weights, d=d)) elif numpy.isscalar(weights): return abs(weights) + elif hasattr(weights, "__getitem__"): + # Have to assume here that the list of weights that has been + # provided has different (non-zero) values in it. + non_zero_weights = numpy.abs(weights)[ + numpy.nonzero(numpy.abs(weights))] + if len(non_zero_weights) == 0: + return 0.0 + return numpy.max(non_zero_weights) raise SpynnakerException("Unrecognised weight format") @abstractmethod From f7fc478eadb08f0c5fa564dc973e5654557cf5af Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 30 Nov 2022 11:55:38 +0000 Subject: [PATCH 172/198] Update to stop compiler warnings --- neural_modelling/src/neuron/current_sources/current_source.h | 3 +-- neural_modelling/src/neuron/models/neuron_model_lif_impl.h | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/neural_modelling/src/neuron/current_sources/current_source.h b/neural_modelling/src/neuron/current_sources/current_source.h index a18c35fa83..aba55bd5ef 100644 --- a/neural_modelling/src/neuron/current_sources/current_source.h +++ b/neural_modelling/src/neuron/current_sources/current_source.h @@ -207,8 +207,7 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \return True if successful static inline REAL current_source_get_offset(uint32_t time, uint32_t neuron_index) { // Avoid the loops if no current sources defined - #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ - !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) + #if defined(_CURRENT_SOURCE_NONE_H_) return ZERO; #else diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 2ba573249b..d0073af2a8 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -65,7 +65,7 @@ static inline void lif_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { // accum = accum * accum + accum // REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; - REAL alpha = MULT_ROUND_NEAREST_ACCUM ( + REAL alpha = MULT_ROUND_NEAREST_ACCUM( input_this_timestep, neuron->R_membrane) + neuron->V_rest; // update membrane voltage From d7bf5dedb32b3c2d87468edf9e0c09a178e9091a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 30 Nov 2022 16:43:42 +0000 Subject: [PATCH 173/198] missed in merge --- spynnaker/pyNN/models/populations/population.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/spynnaker/pyNN/models/populations/population.py b/spynnaker/pyNN/models/populations/population.py index eb3591552b..78de5b158e 100644 --- a/spynnaker/pyNN/models/populations/population.py +++ b/spynnaker/pyNN/models/populations/population.py @@ -828,14 +828,6 @@ def __create_vertex( if rb_left_shifts is not None: self.__vertex.set_rb_left_shifts(rb_left_shifts) - # Introspect properties of the vertex - self.__vertex_population_settable = \ - isinstance(self.__vertex, AbstractPopulationSettable) - self.__vertex_population_initializable = \ - isinstance(self.__vertex, AbstractPopulationInitializable) - self.__vertex_contains_units = \ - isinstance(self.__vertex, AbstractContainsUnits) - @staticmethod def create(cellclass, cellparams=None, n=1): """ Pass through method to the constructor defined by PyNN.\ From 5905d56f5a9695ef8508fbafe14751ac6692917f Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 1 Dec 2022 17:26:32 +0000 Subject: [PATCH 174/198] Still trying to work out ITCM problems on this branch --- neural_modelling/src/neuron/c_main.c | 30 ++-- neural_modelling/src/neuron/c_main_synapses.c | 22 +++ .../neuron/current_sources/current_source.h | 9 +- .../current_sources/current_source_none.h | 25 ---- .../current_source_none_impl.h | 25 ---- .../input_types/input_type_conductance.h | 18 +-- .../src/neuron/models/neuron_model_lif_impl.h | 18 +-- .../src/neuron/spike_processing.c | 104 +++++++------- .../src/neuron/spike_processing.h | 58 ++++---- .../src/neuron/spike_processing_fast.c | 56 ++++++++ .../src/neuron/spike_processing_fast.h | 43 ++++++ .../neuron/synapse_types/exp_synapse_utils.h | 19 +-- .../splitter_abstract_pop_vertex_fixed.py | 31 ++-- .../neuron/population_machine_vertex.py | 134 +++++++++--------- ...pulation_synapses_machine_vertex_common.py | 70 ++++++++- 15 files changed, 402 insertions(+), 260 deletions(-) delete mode 100644 neural_modelling/src/neuron/current_sources/current_source_none.h delete mode 100644 neural_modelling/src/neuron/current_sources/current_source_none_impl.h diff --git a/neural_modelling/src/neuron/c_main.c b/neural_modelling/src/neuron/c_main.c index 6ac13699bd..b11909a47b 100644 --- a/neural_modelling/src/neuron/c_main.c +++ b/neural_modelling/src/neuron/c_main.c @@ -38,13 +38,13 @@ #include "c_main_common.h" #include "regions.h" #include "profile_tags.h" -#include "spike_profiling.h" +//#include "spike_profiling.h" #include "spike_processing.h" -struct spike_holder_t spike_counter; -struct spike_holder_t spike_cache; -struct spike_holder_t spike_counter_inh; -struct spike_holder_t spike_cache_inh; +//struct spike_holder_t spike_counter; +//struct spike_holder_t spike_cache; +//struct spike_holder_t spike_counter_inh; +//struct spike_holder_t spike_cache_inh; //! The combined provenance from synapses and neurons struct combined_provenance { @@ -188,16 +188,16 @@ void background_callback(uint timer_count, uint local_time) { //! \param[in] unused: unused parameter kept for API consistency void timer_callback(uint timer_count, UNUSED uint unused) { - // Get number of spikes in last tick, and reset spike counter - spike_processing_get_and_reset_spikes_this_tick(); - spike_processing_get_and_reset_dmas_this_tick(); - spike_processing_get_and_reset_pipeline_restarts_this_tick(); - - // cache and flush spike counters - spike_profiling_cache_and_flush_spike_holder(&spike_counter, - &spike_cache); - spike_profiling_cache_and_flush_spike_holder(&spike_counter_inh, - &spike_cache_inh); +// // Get number of spikes in last tick, and reset spike counter +// spike_processing_get_and_reset_spikes_this_tick(); +// spike_processing_get_and_reset_dmas_this_tick(); +// spike_processing_get_and_reset_pipeline_restarts_this_tick(); +// +// // cache and flush spike counters +// spike_profiling_cache_and_flush_spike_holder(&spike_counter, +// &spike_cache); +// spike_profiling_cache_and_flush_spike_holder(&spike_counter_inh, +// &spike_cache_inh); // Disable interrupts to stop DMAs and MC getting in the way of this bit uint32_t state = spin1_int_disable(); diff --git a/neural_modelling/src/neuron/c_main_synapses.c b/neural_modelling/src/neuron/c_main_synapses.c index 811e2976f4..e99b5903c7 100644 --- a/neural_modelling/src/neuron/c_main_synapses.c +++ b/neural_modelling/src/neuron/c_main_synapses.c @@ -36,9 +36,20 @@ #include "c_main_synapse_common.h" #include "c_main_common.h" #include "spike_processing_fast.h" +#include "spike_profiling.h" #include "structural_plasticity/synaptogenesis_dynamics.h" #include +//! spike profiling +struct spike_holder_t spike_counter; +struct spike_holder_t spike_cache; +struct spike_holder_t spike_counter_inh; +struct spike_holder_t spike_cache_inh; + +//// FLUSH SPIKES ?? +//bool timer_callback_active = false; +//extern volatile bool dma_busy; + //! values for the priority for each callback typedef enum callback_priorities { MC = -1, DMA = -2, TIMER = 0, SDP = 0 @@ -130,6 +141,17 @@ void resume_callback(void) { //! \param[in] unused0: unused //! \param[in] unused1: unused void timer_callback(UNUSED uint unused0, UNUSED uint unused1) { + // Get number of spikes in last tick, and reset spike counter + spike_processing_get_and_reset_spikes_this_tick(); + spike_processing_get_and_reset_dmas_this_tick(); + spike_processing_get_and_reset_pipeline_restarts_this_tick(); + + // cache and flush spike counters + spike_profiling_cache_and_flush_spike_holder(&spike_counter, + &spike_cache); + spike_profiling_cache_and_flush_spike_holder(&spike_counter_inh, + &spike_cache_inh); + time++; if (simulation_is_finished()) { // Enter pause and resume state to avoid another tick diff --git a/neural_modelling/src/neuron/current_sources/current_source.h b/neural_modelling/src/neuron/current_sources/current_source.h index bf021c0066..c4a2ef8e30 100644 --- a/neural_modelling/src/neuron/current_sources/current_source.h +++ b/neural_modelling/src/neuron/current_sources/current_source.h @@ -58,7 +58,8 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \return True if successful static bool current_source_initialise(address_t cs_address, uint32_t n_neurons) { // Avoid the loops if no current sources - #if defined(_CURRENT_SOURCE_NONE_H_) + #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ + !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) return true; #else @@ -148,7 +149,8 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \return True if successful static bool current_source_load_parameters(address_t cs_address) { // Avoid the loops if no current sources - #if defined(_CURRENT_SOURCE_NONE_H_) + #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ + !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) return true; #else @@ -204,7 +206,8 @@ SOMETIMES_UNUSED // Marked unused as only used sometimes //! \return True if successful static inline REAL current_source_get_offset(uint32_t time, uint32_t neuron_index) { // Avoid the loops if no current sources defined - #if defined(_CURRENT_SOURCE_NONE_H_) + #if !defined(_CURRENT_SOURCE_DC_H_) && !defined(_CURRENT_SOURCE_AC_H) && \ + !defined(_CURRENT_SOURCE_STEP_H_) && !defined(_CURRENT_SOURCE_NOISY_H_) return ZERO; #else diff --git a/neural_modelling/src/neuron/current_sources/current_source_none.h b/neural_modelling/src/neuron/current_sources/current_source_none.h deleted file mode 100644 index 53a951aa8c..0000000000 --- a/neural_modelling/src/neuron/current_sources/current_source_none.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2017-2019 The University of Manchester - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -//! \dir -//! \brief None current source functions -//! \file -//! \brief Functions called for no current source -#ifndef _CURRENT_SOURCE_NONE_H_ -#define _CURRENT_SOURCE_NONE_H_ - -#endif // _CURRENT_SOURCE_NONE_H_ diff --git a/neural_modelling/src/neuron/current_sources/current_source_none_impl.h b/neural_modelling/src/neuron/current_sources/current_source_none_impl.h deleted file mode 100644 index bdea7a9b92..0000000000 --- a/neural_modelling/src/neuron/current_sources/current_source_none_impl.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright (c) 2017-2019 The University of Manchester - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -//! \dir -//! \brief Current source implementations -//! \file -//! \brief General API of a current source implementation -#ifndef _CURRENT_SOURCE_NONE_IMPL_H_ -#define _CURRENT_SOURCE_NONE_IMPL_H_ - -#endif // _CURRENT_SOURCE_NONE_IMPL_H_ diff --git a/neural_modelling/src/neuron/input_types/input_type_conductance.h b/neural_modelling/src/neuron/input_types/input_type_conductance.h index c3fad7e385..5c78eb2b3f 100644 --- a/neural_modelling/src/neuron/input_types/input_type_conductance.h +++ b/neural_modelling/src/neuron/input_types/input_type_conductance.h @@ -21,7 +21,7 @@ #define _INPUT_TYPE_CONDUCTANCE_H_ #include "input_type.h" -#include "round.h" +//#include "round.h" //! Conductance input parameters struct input_type_params_t { @@ -77,11 +77,11 @@ static inline void input_type_convert_excitatory_input_to_current( state_t membrane_voltage) { for (int i=0; i < NUM_EXCITATORY_RECEPTORS; i++) { // accum = accum * (accum - accum) -// exc_input[i] = exc_input[i] * -// (input_type->V_rev_E - membrane_voltage); + exc_input[i] = exc_input[i] * + (input_type->V_rev_E - membrane_voltage); // RTN accum - exc_input[i] = MULT_ROUND_NEAREST_ACCUM(exc_input[i], - (input_type->V_rev_E - membrane_voltage)); +// exc_input[i] = MULT_ROUND_NEAREST_ACCUM(exc_input[i], +// (input_type->V_rev_E - membrane_voltage)); } } @@ -96,11 +96,11 @@ static inline void input_type_convert_inhibitory_input_to_current( state_t membrane_voltage) { for (int i=0; i < NUM_INHIBITORY_RECEPTORS; i++) { // accum = accum * (accum - accum) -// inh_input[i] = -inh_input[i] * -// (input_type->V_rev_I - membrane_voltage); + inh_input[i] = -inh_input[i] * + (input_type->V_rev_I - membrane_voltage); // RTN accum - inh_input[i] = MULT_ROUND_NEAREST_ACCUM(-inh_input[i], - (input_type->V_rev_I - membrane_voltage)); +// inh_input[i] = MULT_ROUND_NEAREST_ACCUM(-inh_input[i], +// (input_type->V_rev_I - membrane_voltage)); } } diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index b8fd0bc0f4..a024f9838a 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -21,7 +21,7 @@ #define _NEURON_MODEL_LIF_CURR_IMPL_H_ #include "neuron_model.h" -#include "round.h" +//#include "round.h" //! definition for LIF neuron parameters struct neuron_params_t { @@ -121,15 +121,15 @@ static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *par static inline void lif_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { // accum = accum * accum + accum -// REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; - REAL alpha = MULT_ROUND_NEAREST_ACCUM( - input_this_timestep, neuron->R_membrane) + neuron->V_rest; + REAL alpha = (input_this_timestep * neuron->R_membrane) + neuron->V_rest; +// REAL alpha = MULT_ROUND_NEAREST_ACCUM( +// input_this_timestep, neuron->R_membrane) + neuron->V_rest; // update membrane voltage // accum - (ufract * (accum - accum)) -// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); - neuron->V_membrane = alpha - MULT_ROUND_NEAREST_ACCUM( - neuron->exp_TC, (alpha - V_prev)); + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); +// neuron->V_membrane = alpha - MULT_ROUND_NEAREST_ACCUM( +// neuron->exp_TC, (alpha - V_prev)); } //! \brief primary function called in timer loop after synaptic updates @@ -154,8 +154,8 @@ static inline state_t neuron_model_state_update( // If outside of the refractory period if (neuron->refract_timer <= 0) { - REAL total_exc = 0; - REAL total_inh = 0; + REAL total_exc = ZERO; + REAL total_inh = ZERO; for (int i=0; i < num_excitatory_inputs; i++) { total_exc += exc_input[i]; diff --git a/neural_modelling/src/neuron/spike_processing.c b/neural_modelling/src/neuron/spike_processing.c index 5179c608d8..18ce336e4c 100644 --- a/neural_modelling/src/neuron/spike_processing.c +++ b/neural_modelling/src/neuron/spike_processing.c @@ -109,18 +109,18 @@ static uint32_t biggest_fill_size_of_input_buffer; //! end of a timer tick. static bool clear_input_buffers_of_late_packets; -static uint32_t max_spikes_in_a_tick; -static uint32_t max_dmas_in_a_tick; -static uint32_t dma_complete_count; -static uint32_t max_pipeline_restarts; -static uint32_t spike_pipeline_deactivation_time = 0; -static uint32_t timer_callback_completed = 0; -static uint32_t spikes_this_time_step = 0; // needed because packets gets reset? -static uint32_t dmas_this_time_step = 0; -static uint32_t pipeline_restarts = 0; - -static uint32_t max_flushed_spikes = 0; -static uint32_t total_flushed_spikes = 0; +//static uint32_t max_spikes_in_a_tick; +//static uint32_t max_dmas_in_a_tick; +//static uint32_t dma_complete_count; +//static uint32_t max_pipeline_restarts; +//static uint32_t spike_pipeline_deactivation_time = 0; +//static uint32_t timer_callback_completed = 0; +//static uint32_t spikes_this_time_step = 0; // needed because packets gets reset? +//static uint32_t dmas_this_time_step = 0; +//static uint32_t pipeline_restarts = 0; +// +//static uint32_t max_flushed_spikes = 0; +//static uint32_t total_flushed_spikes = 0; //! the number of packets received this time step static struct { @@ -315,8 +315,8 @@ static inline void start_dma_loop(void) { static void multicast_packet_received_callback(uint key, UNUSED uint unused) { p_per_ts_struct.packets_this_time_step += 1; // Increment the count of number of spikes received this tick by this core - spikes_this_time_step++; - log_debug("Received spike %x at %d, DMA Busy = %d", key, time, dma_busy); +// spikes_this_time_step++; +// log_debug("Received spike %x at %d, DMA Busy = %d", key, time, dma_busy); if (in_spikes_add_spike(key)) { start_dma_loop(); } @@ -328,9 +328,9 @@ static void multicast_packet_received_callback(uint key, UNUSED uint unused) { static void multicast_packet_pl_received_callback(uint key, uint payload) { p_per_ts_struct.packets_this_time_step += 1; // Increment the count of number of spikes received this tick by this core - spikes_this_time_step++; - log_debug("Received spike %x with payload %d at %d, DMA Busy = %d", - key, payload, time, dma_busy); +// spikes_this_time_step++; +// log_debug("Received spike %x with payload %d at %d, DMA Busy = %d", +// key, payload, time, dma_busy); // cycle through the packet insertion bool added = false; @@ -350,11 +350,11 @@ static void dma_complete_callback(UNUSED uint unused, UNUSED uint tag) { // increment the dma complete count for provenance generation dma_complete_count++; - log_debug("DMA transfer complete at time %u with tag %u", time, tag); - - // Increment the counter tracking the number of DMAs completed this - // timestep on a particular core - dmas_this_time_step++; +// log_debug("DMA transfer complete at time %u with tag %u", time, tag); +// +// // Increment the counter tracking the number of DMAs completed this +// // timestep on a particular core +// dmas_this_time_step++; // Get pointer to current buffer uint32_t current_buffer_index = buffer_being_read; @@ -426,7 +426,7 @@ void user_event_callback(UNUSED uint unused0, UNUSED uint unused1) { dma_n_spikes = 0; // Increment counter for spike processing pipeline restarts - pipeline_restarts++; +// pipeline_restarts++; if (buffer_being_read < N_DMA_BUFFERS) { // If the DMA buffer is full of valid data, attempt to reuse it on the @@ -500,13 +500,13 @@ void spike_processing_store_provenance(struct spike_processing_provenance *prov) prov->n_rewires = n_successful_rewires; prov->n_packets_dropped_from_lateness = count_input_buffer_packets_late; prov->max_filled_input_buffer_size = biggest_fill_size_of_input_buffer; - prov->max_spikes_in_a_tick = max_spikes_in_a_tick; - prov->max_dmas_in_a_tick = max_dmas_in_a_tick; - prov->max_pipeline_restarts = max_pipeline_restarts; - prov->timer_callback_completed = timer_callback_completed; - prov->spike_pipeline_deactivated = spike_pipeline_deactivation_time; - prov->max_flushed_spikes = max_flushed_spikes; - prov->total_flushed_spikes = total_flushed_spikes; +// prov->max_spikes_in_a_tick = max_spikes_in_a_tick; +// prov->max_dmas_in_a_tick = max_dmas_in_a_tick; +// prov->max_pipeline_restarts = max_pipeline_restarts; +// prov->timer_callback_completed = timer_callback_completed; +// prov->spike_pipeline_deactivated = spike_pipeline_deactivation_time; +// prov->max_flushed_spikes = max_flushed_spikes; +// prov->total_flushed_spikes = total_flushed_spikes; } bool spike_processing_do_rewiring(int number_of_rewires) { @@ -519,28 +519,28 @@ bool spike_processing_do_rewiring(int number_of_rewires) { return true; } -// Custom provenance from SpiNNCer -void spike_processing_get_and_reset_spikes_this_tick(void ) { - if (spikes_this_time_step > max_spikes_in_a_tick) { - max_spikes_in_a_tick = spikes_this_time_step; - } - spikes_this_time_step = 0; -} - -void spike_processing_get_and_reset_dmas_this_tick(void) { - if (dmas_this_time_step > max_dmas_in_a_tick){ - max_dmas_in_a_tick = dmas_this_time_step; - } - dmas_this_time_step = 0; -} - -void spike_processing_get_and_reset_pipeline_restarts_this_tick(void) { - if (pipeline_restarts > max_pipeline_restarts) { - max_pipeline_restarts = pipeline_restarts; - } - pipeline_restarts = 0; -} - +//// Custom provenance from SpiNNCer +//void spike_processing_get_and_reset_spikes_this_tick(void ) { +// if (spikes_this_time_step > max_spikes_in_a_tick) { +// max_spikes_in_a_tick = spikes_this_time_step; +// } +// spikes_this_time_step = 0; +//} +// +//void spike_processing_get_and_reset_dmas_this_tick(void) { +// if (dmas_this_time_step > max_dmas_in_a_tick){ +// max_dmas_in_a_tick = dmas_this_time_step; +// } +// dmas_this_time_step = 0; +//} +// +//void spike_processing_get_and_reset_pipeline_restarts_this_tick(void) { +// if (pipeline_restarts > max_pipeline_restarts) { +// max_pipeline_restarts = pipeline_restarts; +// } +// pipeline_restarts = 0; +//} +// //uint32_t spike_processing_get_pipeline_deactivation_time(){ // return spike_pipeline_deactivation_time; //} diff --git a/neural_modelling/src/neuron/spike_processing.h b/neural_modelling/src/neuron/spike_processing.h index 38d9f3cc22..3a48a5bcb9 100644 --- a/neural_modelling/src/neuron/spike_processing.h +++ b/neural_modelling/src/neuron/spike_processing.h @@ -38,21 +38,21 @@ struct spike_processing_provenance { uint32_t n_packets_dropped_from_lateness; //! The maximum size of the input buffer uint32_t max_filled_input_buffer_size; - //! SpiNNCer-related provenance - //! The maximum number of spikes in a tick - uint32_t max_spikes_in_a_tick; - //! The maximum number of DMAs in a tick - uint32_t max_dmas_in_a_tick; - //! The maximum number of pipeline restarts - uint32_t max_pipeline_restarts; - //! Was the timer callback completed? - uint32_t timer_callback_completed; - //! Was the spike pipeline deactivated? - uint32_t spike_pipeline_deactivated; - //! The maximum number of flushed spikes in one step - uint32_t max_flushed_spikes; - //! Thet total number of flushed spikes - uint32_t total_flushed_spikes; +// //! SpiNNCer-related provenance +// //! The maximum number of spikes in a tick +// uint32_t max_spikes_in_a_tick; +// //! The maximum number of DMAs in a tick +// uint32_t max_dmas_in_a_tick; +// //! The maximum number of pipeline restarts +// uint32_t max_pipeline_restarts; +// //! Was the timer callback completed? +// uint32_t timer_callback_completed; +// //! Was the spike pipeline deactivated? +// uint32_t spike_pipeline_deactivated; +// //! The maximum number of flushed spikes in one step +// uint32_t max_flushed_spikes; +// //! Thet total number of flushed spikes +// uint32_t total_flushed_spikes; }; //! \brief Initialise the spike processing system @@ -86,20 +86,20 @@ bool spike_processing_do_rewiring(int number_of_rewires); void spike_processing_clear_input_buffer(timer_t time); -// Custom provenance from SpiNNCer - -//! \brief get number of spikes received since last timer event -//! \return uint32_t number of spikes -void spike_processing_get_and_reset_spikes_this_tick(void); - -//! \brief get number of dmas completed since last timer event -//! \return uint32_t number of DMAs -void spike_processing_get_and_reset_dmas_this_tick(void); - -//! \brief get number of time pipeline was restarted since last timer event -//! \return uint32_t number of pipeline restarts -void spike_processing_get_and_reset_pipeline_restarts_this_tick(void); - +//// Custom provenance from SpiNNCer +// +////! \brief get number of spikes received since last timer event +////! \return uint32_t number of spikes +//void spike_processing_get_and_reset_spikes_this_tick(void); +// +////! \brief get number of dmas completed since last timer event +////! \return uint32_t number of DMAs +//void spike_processing_get_and_reset_dmas_this_tick(void); +// +////! \brief get number of time pipeline was restarted since last timer event +////! \return uint32_t number of pipeline restarts +//void spike_processing_get_and_reset_pipeline_restarts_this_tick(void); +// ////! \brief get time from T1 clock at which spike pipeline completed ////! \return uint32_t pipeline deactivation time //uint32_t spike_processing_get_pipeline_deactivation_time(); diff --git a/neural_modelling/src/neuron/spike_processing_fast.c b/neural_modelling/src/neuron/spike_processing_fast.c index a668ae23bc..2c698e609c 100644 --- a/neural_modelling/src/neuron/spike_processing_fast.c +++ b/neural_modelling/src/neuron/spike_processing_fast.c @@ -115,6 +115,19 @@ static uint32_t earliest_spike_received_time = 0; //! The maximum number of spikes left at the end of a time step static uint32_t max_spikes_overflow = 0; +static uint32_t max_spikes_in_a_tick; +static uint32_t max_dmas_in_a_tick; +static uint32_t dma_complete_count; +static uint32_t max_pipeline_restarts; +static uint32_t spike_pipeline_deactivation_time = 0; +static uint32_t timer_callback_completed = 0; +static uint32_t spikes_this_time_step = 0; // needed because packets gets reset? +static uint32_t dmas_this_time_step = 0; +static uint32_t pipeline_restarts = 0; + +static uint32_t max_flushed_spikes = 0; +static uint32_t total_flushed_spikes = 0; + //! The number of packets received this time step for recording static struct { uint32_t time; @@ -657,4 +670,47 @@ void spike_processing_fast_store_provenance( prov->earliest_receive = earliest_spike_received_time; prov->latest_receive = latest_spike_received_time; prov->max_spikes_overflow = max_spikes_overflow; + prov->max_spikes_in_a_tick = max_spikes_in_a_tick; + prov->max_dmas_in_a_tick = max_dmas_in_a_tick; + prov->max_pipeline_restarts = max_pipeline_restarts; + prov->timer_callback_completed = timer_callback_completed; + prov->spike_pipeline_deactivated = spike_pipeline_deactivation_time; + prov->max_flushed_spikes = max_flushed_spikes; + prov->total_flushed_spikes = total_flushed_spikes; } + +// Custom provenance from SpiNNCer +void spike_processing_get_and_reset_spikes_this_tick(void ) { + if (spikes_this_time_step > max_spikes_in_a_tick) { + max_spikes_in_a_tick = spikes_this_time_step; + } + spikes_this_time_step = 0; +} + +void spike_processing_get_and_reset_dmas_this_tick(void) { + if (dmas_this_time_step > max_dmas_in_a_tick){ + max_dmas_in_a_tick = dmas_this_time_step; + } + dmas_this_time_step = 0; +} + +void spike_processing_get_and_reset_pipeline_restarts_this_tick(void) { + if (pipeline_restarts > max_pipeline_restarts) { + max_pipeline_restarts = pipeline_restarts; + } + pipeline_restarts = 0; +} + +uint32_t spike_processing_get_pipeline_deactivation_time(){ + return spike_pipeline_deactivation_time; +} + +// FLUSH SPIKES +uint32_t spike_processing_get_total_flushed_spikes(){ + return total_flushed_spikes; +} + +uint32_t spike_processing_get_max_flushed_spikes(){ + return max_flushed_spikes; +} + diff --git a/neural_modelling/src/neuron/spike_processing_fast.h b/neural_modelling/src/neuron/spike_processing_fast.h index 3ee096cd9a..20c79ff9c5 100644 --- a/neural_modelling/src/neuron/spike_processing_fast.h +++ b/neural_modelling/src/neuron/spike_processing_fast.h @@ -80,6 +80,21 @@ struct spike_processing_fast_provenance { uint32_t latest_receive; //! The most spikes left at the end of any time step uint32_t max_spikes_overflow; + //! SpiNNCer-related provenance + //! The maximum number of spikes in a tick + uint32_t max_spikes_in_a_tick; + //! The maximum number of DMAs in a tick + uint32_t max_dmas_in_a_tick; + //! The maximum number of pipeline restarts + uint32_t max_pipeline_restarts; + //! Was the timer callback completed? + uint32_t timer_callback_completed; + //! Was the spike pipeline deactivated? + uint32_t spike_pipeline_deactivated; + //! The maximum number of flushed spikes in one step + uint32_t max_flushed_spikes; + //! Thet total number of flushed spikes + uint32_t total_flushed_spikes; }; //! \brief Set up spike processing @@ -113,4 +128,32 @@ void spike_processing_fast_time_step_loop(uint32_t time, uint32_t n_rewires); void spike_processing_fast_store_provenance( struct spike_processing_fast_provenance *prov); +// Custom provenance from SpiNNCer + +//! \brief get number of spikes received since last timer event +//! \return uint32_t number of spikes +void spike_processing_get_and_reset_spikes_this_tick(void); + +//! \brief get number of dmas completed since last timer event +//! \return uint32_t number of DMAs +void spike_processing_get_and_reset_dmas_this_tick(void); + +//! \brief get number of time pipeline was restarted since last timer event +//! \return uint32_t number of pipeline restarts +void spike_processing_get_and_reset_pipeline_restarts_this_tick(void); + +//! \brief get time from T1 clock at which spike pipeline completed +//! \return uint32_t pipeline deactivation time +uint32_t spike_processing_get_pipeline_deactivation_time(); + +// FLUSH SPIKES +//! \brief returns the total unprocessed spikes from a simulation +//! \return total unprocessed spikes +uint32_t spike_processing_get_total_flushed_spikes(); + +//! \brief returns the maximum unprocessed spikes from a single +//! simulation timestep. +//! \return maximum unprocessed spikes from a single timestep. +uint32_t spike_processing_get_max_flushed_spikes(); + #endif // _SPIKE_PROCESSING_FAST_H_ diff --git a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h index 12eab582bb..731d9c4c10 100644 --- a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h +++ b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h @@ -22,7 +22,7 @@ #include #include -#include "round.h" +//#include "round.h" //! The type of exponential decay parameters typedef struct exp_params_t { @@ -61,20 +61,21 @@ static inline void decay_and_init(exp_state_t *state, exp_params_t *params, //! \param[in,out] exp_param: The parameter to shape static inline void exp_shaping(exp_state_t *exp_param) { // decay value according to decay constant -// exp_param->synaptic_input_value = -// decay_s1615(exp_param->synaptic_input_value, exp_param->decay); - exp_param->synaptic_input_value = - MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, - exp_param->decay);} + exp_param->synaptic_input_value = + decay_s1615(exp_param->synaptic_input_value, exp_param->decay); +// exp_param->synaptic_input_value = +// MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, +// exp_param->decay); +} //! \brief helper function to add input for a given timer period to a given //! neuron //! \param[in,out] parameter: the parameter to update //! \param[in] input: the input to add. static inline void add_input_exp(exp_state_t *parameter, input_t input) { -// parameter->synaptic_input_value = parameter->synaptic_input_value + -// decay_s1615(input, parameter->init); parameter->synaptic_input_value = parameter->synaptic_input_value + - MULT_ROUND_NEAREST_ACCUM(input, parameter->init); + decay_s1615(input, parameter->init); +// parameter->synaptic_input_value = parameter->synaptic_input_value + +// MULT_ROUND_NEAREST_ACCUM(input, parameter->init); } diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py index 666fd5384f..6043747a56 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/splitter_abstract_pop_vertex_fixed.py @@ -104,22 +104,21 @@ def create_machine_vertices(self, chip_counter): app_vertex.get_max_atoms_per_core(), app_vertex.n_atoms) ring_buffer_shifts = None - if self.__ring_buffer_shifts is None: - app_vertex = self._governed_app_vertex - if (hasattr(app_vertex, "rb_left_shifts") and - app_vertex.rb_left_shifts is not None): - print("=" * 80) - print("Using given values for RB left shifts.") - ring_buffer_shifts = app_vertex.rb_left_shifts - print("RB left shifts for {:20}".format(app_vertex.label), - "=", ring_buffer_shifts) - print("-" * 80) - else: - print("=" * 80) - print("Computing RB left shifts for", app_vertex.label) - ring_buffer_shifts = app_vertex.get_ring_buffer_shifts() - print("RB left shifts for {:20}".format(app_vertex.label), - "=", ring_buffer_shifts) + app_vertex = self._governed_app_vertex + if (hasattr(app_vertex, "rb_left_shifts") and + app_vertex.rb_left_shifts is not None): + print("=" * 80) + print("Using given values for RB left shifts.") + ring_buffer_shifts = app_vertex.rb_left_shifts + print("RB left shifts for {:20}".format(app_vertex.label), + "=", ring_buffer_shifts) + print("-" * 80) + else: + print("=" * 80) + print("Computing RB left shifts for", app_vertex.label) + ring_buffer_shifts = app_vertex.get_ring_buffer_shifts() + print("RB left shifts for {:20}".format(app_vertex.label), + "=", ring_buffer_shifts) weight_scales = app_vertex.get_weight_scales(ring_buffer_shifts) all_syn_block_sz = app_vertex.get_synapses_size( diff --git a/spynnaker/pyNN/models/neuron/population_machine_vertex.py b/spynnaker/pyNN/models/neuron/population_machine_vertex.py index b25a641400..b8cff87107 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_vertex.py +++ b/spynnaker/pyNN/models/neuron/population_machine_vertex.py @@ -42,21 +42,21 @@ class SpikeProcessingProvenance(ctypes.LittleEndianStructure): # The number of packets that were dropped due to being late ("n_late_packets", ctypes.c_uint32), # The maximum size of the spike input buffer during simulation - ("max_size_input_buffer", ctypes.c_uint32), - # Custom provenance from SpiNNCer - max spikes in a tick - ("max_spikes_in_a_tick", ctypes.c_uint32), - # max dmas in a tick - ("max_dmas_in_a_tick", ctypes.c_uint32), - # max pipeline restarts - ("max_pipeline_restarts", ctypes.c_uint32), - # timer callback completed? - ("timer_callback_completed", ctypes.c_uint32), - # spikes pipeline activated? - ("spikes_pipeline_activated", ctypes.c_uint32), - # Max flushed spikes in a timestep - ("max_flushed_spikes", ctypes.c_uint32), - # Total flushed spikes - ("total_flushed_spikes", ctypes.c_uint32) + ("max_size_input_buffer", ctypes.c_uint32) + # # Custom provenance from SpiNNCer - max spikes in a tick + # ("max_spikes_in_a_tick", ctypes.c_uint32), + # # max dmas in a tick + # ("max_dmas_in_a_tick", ctypes.c_uint32), + # # max pipeline restarts + # ("max_pipeline_restarts", ctypes.c_uint32), + # # timer callback completed? + # ("timer_callback_completed", ctypes.c_uint32), + # # spikes pipeline activated? + # ("spikes_pipeline_activated", ctypes.c_uint32), + # # Max flushed spikes in a timestep + # ("max_flushed_spikes", ctypes.c_uint32), + # # Total flushed spikes + # ("total_flushed_spikes", ctypes.c_uint32) ] N_ITEMS = len(_fields_) @@ -104,15 +104,15 @@ class PopulationMachineVertex( MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME = "Max_filled_size_input_buffer" BACKGROUND_OVERLOADS_NAME = "Times_the_background_queue_overloaded" BACKGROUND_MAX_QUEUED_NAME = "Max_backgrounds_queued" - # Custom provenance from SpiNNCer - MAX_SPIKES_IN_A_TICK = "Maximum number of spikes in a timer tick" - MAX_DMAS_IN_A_TICK = "Maximum number of DMAs in a timer tick" - MAX_PIPELINE_RESTARTS = "Maximum pipeline restarts" - TIMER_CALLBACK_COMPLETED = "Was the timer callback completed?" - SPIKES_PIPELINE_ACTIVATED = "Was the spikes pipeline activated?" - # Flushed spikes - MAX_FLUSHED_SPIKES = "Maximum number of spikes flushed in a timer tick" - TOTAL_FLUSHED_SPIKES = "Total number of spikes flushed" + # # Custom provenance from SpiNNCer + # MAX_SPIKES_IN_A_TICK = "Maximum number of spikes in a timer tick" + # MAX_DMAS_IN_A_TICK = "Maximum number of DMAs in a timer tick" + # MAX_PIPELINE_RESTARTS = "Maximum pipeline restarts" + # TIMER_CALLBACK_COMPLETED = "Was the timer callback completed?" + # SPIKES_PIPELINE_ACTIVATED = "Was the spikes pipeline activated?" + # # Flushed spikes + # MAX_FLUSHED_SPIKES = "Maximum number of spikes flushed in a timer tick" + # TOTAL_FLUSHED_SPIKES = "Total number of spikes flushed" class REGIONS(Enum): """Regions for populations.""" @@ -412,49 +412,49 @@ def _parse_spike_processing_provenance( x, y, p, self.MAX_FILLED_SIZE_OF_INPUT_BUFFER_NAME, prov.max_size_input_buffer) - # SpiNNCer - db.insert_core( - x, y, p, self.MAX_SPIKES_IN_A_TICK, - prov.max_spikes_in_a_tick) - if prov.max_spikes_in_a_tick > 200: - db.insert_report( - f"Max number of spikes for {label} was " - f"{prov.max_spikes_in_a_tick}. Empirically, we " - f"can deal with ~200 for real time performance using a " - f"1.0 ms timestep.") - - db.insert_core( - x, y, p, self.MAX_DMAS_IN_A_TICK, - prov.max_dmas_in_a_tick) - - db.insert_core( - x, y, p, self.MAX_PIPELINE_RESTARTS, - prov.max_pipeline_restarts) - - db.insert_core( - x, y, p, self.TIMER_CALLBACK_COMPLETED, - prov.timer_callback_completed) - - db.insert_core( - x, y, p, self.SPIKES_PIPELINE_ACTIVATED, - prov.spikes_pipeline_activated) - - # FLUSHED SPIKES - db.insert_core( - x, y, p, self.MAX_FLUSHED_SPIKES, - prov.max_flushed_spikes) - if prov.max_flushed_spikes > 0: - db.insert_report( - f"Max number of flushed spikes for {label} was " - f"was {prov.max_flushed_spikes}.") - - db.insert_core( - x, y, p, self.TOTAL_FLUSHED_SPIKES, - prov.total_flushed_spikes) - if prov.total_flushed_spikes > 0: - db.insert_report( - f"Total number of flushed spikes for {label} was " - f"{prov.total_flushed_spikes}.") + # # SpiNNCer + # db.insert_core( + # x, y, p, self.MAX_SPIKES_IN_A_TICK, + # prov.max_spikes_in_a_tick) + # if prov.max_spikes_in_a_tick > 200: + # db.insert_report( + # f"Max number of spikes for {label} was " + # f"{prov.max_spikes_in_a_tick}. Empirically, we " + # f"can deal with ~200 for real time performance using a " + # f"1.0 ms timestep.") + # + # db.insert_core( + # x, y, p, self.MAX_DMAS_IN_A_TICK, + # prov.max_dmas_in_a_tick) + # + # db.insert_core( + # x, y, p, self.MAX_PIPELINE_RESTARTS, + # prov.max_pipeline_restarts) + # + # db.insert_core( + # x, y, p, self.TIMER_CALLBACK_COMPLETED, + # prov.timer_callback_completed) + # + # db.insert_core( + # x, y, p, self.SPIKES_PIPELINE_ACTIVATED, + # prov.spikes_pipeline_activated) + # + # # FLUSHED SPIKES + # db.insert_core( + # x, y, p, self.MAX_FLUSHED_SPIKES, + # prov.max_flushed_spikes) + # if prov.max_flushed_spikes > 0: + # db.insert_report( + # f"Max number of flushed spikes for {label} was " + # f"was {prov.max_flushed_spikes}.") + # + # db.insert_core( + # x, y, p, self.TOTAL_FLUSHED_SPIKES, + # prov.total_flushed_spikes) + # if prov.total_flushed_spikes > 0: + # db.insert_report( + # f"Total number of flushed spikes for {label} was " + # f"{prov.total_flushed_spikes}.") @overrides(PopulationMachineNeurons.set_do_neuron_regeneration) def set_do_neuron_regeneration(self): diff --git a/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_common.py b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_common.py index 6ffef53a08..3bea68e030 100644 --- a/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_common.py +++ b/spynnaker/pyNN/models/neuron/population_synapses_machine_vertex_common.py @@ -68,7 +68,21 @@ class SpikeProcessingFastProvenance(ctypes.LittleEndianStructure): # The latest time a spike was received ("latest_receive", ctypes.c_uint32), # The maximum overflow of spikes in a time step - ("max_spikes_overflow", ctypes.c_uint32) + ("max_spikes_overflow", ctypes.c_uint32), + # Custom provenance from SpiNNCer - max spikes in a tick + ("max_spikes_in_a_tick", ctypes.c_uint32), + # max dmas in a tick + ("max_dmas_in_a_tick", ctypes.c_uint32), + # max pipeline restarts + ("max_pipeline_restarts", ctypes.c_uint32), + # timer callback completed? + ("timer_callback_completed", ctypes.c_uint32), + # spikes pipeline activated? + ("spikes_pipeline_activated", ctypes.c_uint32), + # Max flushed spikes in a timestep + ("max_flushed_spikes", ctypes.c_uint32), + # Total flushed spikes + ("total_flushed_spikes", ctypes.c_uint32) ] N_ITEMS = len(_fields_) @@ -95,6 +109,16 @@ class PopulationSynapsesMachineVertexCommon( LATEST_RECEIVE = "Latest_receive_time" MAX_SPIKE_OVERFLOW = "Max_spike_overflow_in_time_step" + # Custom provenance from SpiNNCer + MAX_SPIKES_IN_A_TICK = "Maximum number of spikes in a timer tick" + MAX_DMAS_IN_A_TICK = "Maximum number of DMAs in a timer tick" + MAX_PIPELINE_RESTARTS = "Maximum pipeline restarts" + TIMER_CALLBACK_COMPLETED = "Was the timer callback completed?" + SPIKES_PIPELINE_ACTIVATED = "Was the spikes pipeline activated?" + # Flushed spikes + MAX_FLUSHED_SPIKES = "Maximum number of spikes flushed in a timer tick" + TOTAL_FLUSHED_SPIKES = "Total number of spikes flushed" + __slots__ = [ "__sdram_partition", "__neuron_vertex", @@ -369,3 +393,47 @@ def _parse_spike_processing_fast_provenance( x, y, p, self.LATEST_RECEIVE, prov.latest_receive) db.insert_core( x, y, p, self.MAX_SPIKE_OVERFLOW, prov.max_spikes_overflow) + + # SpiNNCer + db.insert_core( + x, y, p, self.MAX_SPIKES_IN_A_TICK, + prov.max_spikes_in_a_tick) + if prov.max_spikes_in_a_tick > 200: + db.insert_report( + f"Max number of spikes for {label} was " + f"{prov.max_spikes_in_a_tick}. Empirically, we " + f"can deal with ~200 for real time performance using a " + f"1.0 ms timestep.") + + db.insert_core( + x, y, p, self.MAX_DMAS_IN_A_TICK, + prov.max_dmas_in_a_tick) + + db.insert_core( + x, y, p, self.MAX_PIPELINE_RESTARTS, + prov.max_pipeline_restarts) + + db.insert_core( + x, y, p, self.TIMER_CALLBACK_COMPLETED, + prov.timer_callback_completed) + + db.insert_core( + x, y, p, self.SPIKES_PIPELINE_ACTIVATED, + prov.spikes_pipeline_activated) + + # FLUSHED SPIKES + db.insert_core( + x, y, p, self.MAX_FLUSHED_SPIKES, + prov.max_flushed_spikes) + if prov.max_flushed_spikes > 0: + db.insert_report( + f"Max number of flushed spikes for {label} was " + f"was {prov.max_flushed_spikes}.") + + db.insert_core( + x, y, p, self.TOTAL_FLUSHED_SPIKES, + prov.total_flushed_spikes) + if prov.total_flushed_spikes > 0: + db.insert_report( + f"Total number of flushed spikes for {label} was " + f"{prov.total_flushed_spikes}.") From 1204c5576c9f569fcae5e0cfe9787870ce84caa8 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 1 Dec 2022 17:47:38 +0000 Subject: [PATCH 175/198] Seems like this UFRACT is the culprit --- neural_modelling/src/neuron/models/neuron_model_lif_impl.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index a024f9838a..d880d88a46 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -68,7 +68,7 @@ struct neuron_t { //! 'fixed' computation parameter - time constant multiplier for //! closed-form solution //! exp(-(machine time step in ms)/(R * C)) [.] - UFRACT exp_TC; + REAL exp_TC; //! offset current [nA] REAL I_offset; From 2feb571e26f77258265eba7458851a21781c8472 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 2 Dec 2022 13:06:21 +0000 Subject: [PATCH 176/198] Only need to sum g_syn on the first step --- .../implementations/neuron_impl_standard.h | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index ed11fc37f6..e801423fd9 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -273,19 +273,19 @@ static void neuron_impl_do_timestep_update( input_t *inh_input_values = input_type_get_input_value( inh_syn_values, input_types, NUM_INHIBITORY_RECEPTORS); - // Sum g_syn contributions from all receptors for recording - REAL total_exc = ZERO; - REAL total_inh = ZERO; - - for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { - total_exc += exc_input_values[i]; - } - for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { - total_inh += inh_input_values[i]; - } - // Do recording if on the first step if (i_step == n_steps_per_timestep) { + // Sum g_syn contributions from all receptors for recording + REAL total_exc = ZERO; + REAL total_inh = ZERO; + + for (int i = 0; i < NUM_EXCITATORY_RECEPTORS; i++) { + total_exc += exc_input_values[i]; + } + for (int i = 0; i < NUM_INHIBITORY_RECEPTORS; i++) { + total_inh += inh_input_values[i]; + } + neuron_recording_record_accum( V_RECORDING_INDEX, neuron_index, soma_voltage); neuron_recording_record_accum( From 543e8f7755a25da0ae1e77aa29e842ba7ae95ffc Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 2 Dec 2022 13:09:47 +0000 Subject: [PATCH 177/198] Put the rounding multiplications back in... --- .../src/neuron/models/neuron_model_lif_impl.h | 14 +++++++------- .../neuron/synapse_types/exp_synapse_utils.h | 19 +++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index d880d88a46..2555a88d8b 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -21,7 +21,7 @@ #define _NEURON_MODEL_LIF_CURR_IMPL_H_ #include "neuron_model.h" -//#include "round.h" +#include "round.h" //! definition for LIF neuron parameters struct neuron_params_t { @@ -121,15 +121,15 @@ static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *par static inline void lif_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { // accum = accum * accum + accum - REAL alpha = (input_this_timestep * neuron->R_membrane) + neuron->V_rest; -// REAL alpha = MULT_ROUND_NEAREST_ACCUM( -// input_this_timestep, neuron->R_membrane) + neuron->V_rest; +// REAL alpha = (input_this_timestep * neuron->R_membrane) + neuron->V_rest; + REAL alpha = MULT_ROUND_NEAREST_ACCUM( + input_this_timestep, neuron->R_membrane) + neuron->V_rest; // update membrane voltage // accum - (ufract * (accum - accum)) - neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); -// neuron->V_membrane = alpha - MULT_ROUND_NEAREST_ACCUM( -// neuron->exp_TC, (alpha - V_prev)); +// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); + neuron->V_membrane = alpha - MULT_ROUND_NEAREST_ACCUM( + neuron->exp_TC, (alpha - V_prev)); } //! \brief primary function called in timer loop after synaptic updates diff --git a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h index 731d9c4c10..f047004e65 100644 --- a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h +++ b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h @@ -22,7 +22,7 @@ #include #include -//#include "round.h" +#include "round.h" //! The type of exponential decay parameters typedef struct exp_params_t { @@ -61,11 +61,11 @@ static inline void decay_and_init(exp_state_t *state, exp_params_t *params, //! \param[in,out] exp_param: The parameter to shape static inline void exp_shaping(exp_state_t *exp_param) { // decay value according to decay constant - exp_param->synaptic_input_value = - decay_s1615(exp_param->synaptic_input_value, exp_param->decay); -// exp_param->synaptic_input_value = -// MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, -// exp_param->decay); +// exp_param->synaptic_input_value = +// decay_s1615(exp_param->synaptic_input_value, exp_param->decay); + exp_param->synaptic_input_value = + MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, + exp_param->decay); } //! \brief helper function to add input for a given timer period to a given @@ -73,9 +73,8 @@ static inline void exp_shaping(exp_state_t *exp_param) { //! \param[in,out] parameter: the parameter to update //! \param[in] input: the input to add. static inline void add_input_exp(exp_state_t *parameter, input_t input) { - parameter->synaptic_input_value = parameter->synaptic_input_value + - decay_s1615(input, parameter->init); // parameter->synaptic_input_value = parameter->synaptic_input_value + -// MULT_ROUND_NEAREST_ACCUM(input, parameter->init); - +// decay_s1615(input, parameter->init); + parameter->synaptic_input_value = parameter->synaptic_input_value + + MULT_ROUND_NEAREST_ACCUM(input, parameter->init); } From 001e4d93145a86a0a09e8c31fedb7fade732c8d9 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 5 Dec 2022 13:42:19 +0000 Subject: [PATCH 178/198] Save ITCM --- .../additional_input_ca2_adaptive_impl.h | 2 +- neural_modelling/src/neuron/c_main.c | 6 ++-- .../implementations/neuron_impl_standard.h | 28 +++++++++---------- .../src/neuron/models/neuron_model_izh_impl.h | 2 +- .../src/neuron/models/neuron_model_lif_impl.h | 2 +- .../neuron/synapse_types/exp_synapse_utils.h | 18 ++++++------ .../synapse_types/synapse_types_alpha_impl.h | 8 +++--- 7 files changed, 33 insertions(+), 33 deletions(-) diff --git a/neural_modelling/src/neuron/additional_inputs/additional_input_ca2_adaptive_impl.h b/neural_modelling/src/neuron/additional_inputs/additional_input_ca2_adaptive_impl.h index 24d404a3d7..d622dcf186 100644 --- a/neural_modelling/src/neuron/additional_inputs/additional_input_ca2_adaptive_impl.h +++ b/neural_modelling/src/neuron/additional_inputs/additional_input_ca2_adaptive_impl.h @@ -77,7 +77,7 @@ static inline input_t additional_input_get_input_value_as_current( additional_input->i_ca2 *= additional_input->exp_tau_ca2; // Return the Ca2 - return -additional_input->i_ca2; + return additional_input->i_ca2; } //! \brief Notifies the additional input type that the neuron has spiked diff --git a/neural_modelling/src/neuron/c_main.c b/neural_modelling/src/neuron/c_main.c index b11909a47b..4bd4c6462d 100644 --- a/neural_modelling/src/neuron/c_main.c +++ b/neural_modelling/src/neuron/c_main.c @@ -161,9 +161,9 @@ static inline void process_ring_buffers(void) { neuron_transfer(&ring_buffers[first_index]); // Print the neuron inputs. - #if LOG_LEVEL >= LOG_DEBUG - neuron_print_inputs(); - #endif // LOG_LEVEL >= LOG_DEBUG +#if LOG_LEVEL >= LOG_DEBUG + neuron_print_inputs(); +#endif // LOG_LEVEL >= LOG_DEBUG } //! \brief Background activities called from timer diff --git a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h index e801423fd9..95960bb940 100644 --- a/neural_modelling/src/neuron/implementations/neuron_impl_standard.h +++ b/neural_modelling/src/neuron/implementations/neuron_impl_standard.h @@ -252,7 +252,7 @@ static void neuron_impl_do_timestep_update( additional_input_t *additional_inputs = &additional_input_array[neuron_index]; synapse_types_t *the_synapse_type = &synapse_types_array[neuron_index]; - bool spike = false; +// bool spike = false; // Loop however many times requested; do this in reverse for efficiency, // and because the index doesn't actually matter @@ -320,7 +320,7 @@ static void neuron_impl_do_timestep_update( // If spike occurs, communicate to relevant parts of model if (spike_now) { - spike = true; +// spike = true; // Call relevant model-based functions // Tell the neuron model @@ -329,11 +329,11 @@ static void neuron_impl_do_timestep_update( // Tell the additional input additional_input_has_spiked(additional_inputs); -// // Record the spike -// neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); -// -// // Send the spike -// send_spike(timer_count, time, neuron_index); + // Record the spike + neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); + + // Send the spike + send_spike(timer_count, time, neuron_index); } // Shape the existing input according to the included rule @@ -344,13 +344,13 @@ static void neuron_impl_do_timestep_update( neuron_model_print_state_variables(this_neuron); #endif // LOG_LEVEL >= LOG_DEBUG - if (spike) { - // Record the spike - neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); - - // Send the spike - send_spike(timer_count, time, neuron_index); - } +// if (spike) { +// // Record the spike +// neuron_recording_record_bit(SPIKE_RECORDING_BITFIELD, neuron_index); +// +// // Send the spike +// send_spike(timer_count, time, neuron_index); +// } } } diff --git a/neural_modelling/src/neuron/models/neuron_model_izh_impl.h b/neural_modelling/src/neuron/models/neuron_model_izh_impl.h index a9f6c5d2e0..ac5912d269 100644 --- a/neural_modelling/src/neuron/models/neuron_model_izh_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_izh_impl.h @@ -179,7 +179,7 @@ static inline state_t neuron_model_state_update( } input_t input_this_timestep = total_exc - total_inh - + external_bias + neuron->I_offset + current_offset; + - external_bias + neuron->I_offset + current_offset; // the best AR update so far rk2_kernel_midpoint(neuron->this_h, neuron, input_this_timestep); diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 2555a88d8b..35001e4b0c 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -165,7 +165,7 @@ static inline state_t neuron_model_state_update( } // Get the input in nA input_t input_this_timestep = - total_exc - total_inh + external_bias + neuron->I_offset + current_offset; + total_exc - total_inh - external_bias + neuron->I_offset + current_offset; lif_neuron_closed_form( neuron, neuron->V_membrane, input_this_timestep); diff --git a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h index f047004e65..cd59c0692e 100644 --- a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h +++ b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h @@ -22,7 +22,7 @@ #include #include -#include "round.h" +//#include "round.h" //! The type of exponential decay parameters typedef struct exp_params_t { @@ -61,11 +61,11 @@ static inline void decay_and_init(exp_state_t *state, exp_params_t *params, //! \param[in,out] exp_param: The parameter to shape static inline void exp_shaping(exp_state_t *exp_param) { // decay value according to decay constant -// exp_param->synaptic_input_value = -// decay_s1615(exp_param->synaptic_input_value, exp_param->decay); - exp_param->synaptic_input_value = - MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, - exp_param->decay); + exp_param->synaptic_input_value = + decay_s1615(exp_param->synaptic_input_value, exp_param->decay); +// exp_param->synaptic_input_value = +// MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, +// exp_param->decay); } //! \brief helper function to add input for a given timer period to a given @@ -73,8 +73,8 @@ static inline void exp_shaping(exp_state_t *exp_param) { //! \param[in,out] parameter: the parameter to update //! \param[in] input: the input to add. static inline void add_input_exp(exp_state_t *parameter, input_t input) { -// parameter->synaptic_input_value = parameter->synaptic_input_value + -// decay_s1615(input, parameter->init); parameter->synaptic_input_value = parameter->synaptic_input_value + - MULT_ROUND_NEAREST_ACCUM(input, parameter->init); + decay_s1615(input, parameter->init); +// parameter->synaptic_input_value = parameter->synaptic_input_value + +// MULT_ROUND_NEAREST_ACCUM(input, parameter->init); } diff --git a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h index 5c4310e886..581490207a 100644 --- a/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h +++ b/neural_modelling/src/neuron/synapse_types/synapse_types_alpha_impl.h @@ -145,9 +145,9 @@ static inline void add_input_alpha(alpha_state_t *a_params, input_t input) { a_params->exp_buff = decay_s1615(a_params->exp_buff, a_params->decay) + ONE; - a_params->lin_buff = - (a_params->lin_buff + (input * a_params->dt_divided_by_tau_sqr)) - * (ONE - kdivk(ONE, a_params->exp_buff)); + REAL exp_temp = ONE - kdivk(ONE, a_params->exp_buff); + a_params->lin_buff = (a_params->lin_buff + ( + input * a_params->dt_divided_by_tau_sqr)) * exp_temp; } //! \brief adds the inputs for a give timer period to a given neuron that is @@ -222,7 +222,7 @@ static inline const char *synapse_types_get_type_char( //! \param[in] parameters: the pointer to the parameters to print static inline void synapse_types_print_input( synapse_types_t *parameters) { - io_printf(IO_BUF, "%12.6k - %12.6k", + log_debug("%12.6k - %12.6k", parameters->exc.lin_buff * parameters->exc.exp_buff, parameters->inh.lin_buff * parameters->inh.exp_buff); } From c2de26b13aed405792761f9d95288dbd760da580 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 5 Dec 2022 14:02:20 +0000 Subject: [PATCH 179/198] vera remove trailing line --- neural_modelling/src/neuron/spike_processing_fast.c | 1 - 1 file changed, 1 deletion(-) diff --git a/neural_modelling/src/neuron/spike_processing_fast.c b/neural_modelling/src/neuron/spike_processing_fast.c index 2c698e609c..72c1fb37fd 100644 --- a/neural_modelling/src/neuron/spike_processing_fast.c +++ b/neural_modelling/src/neuron/spike_processing_fast.c @@ -713,4 +713,3 @@ uint32_t spike_processing_get_total_flushed_spikes(){ uint32_t spike_processing_get_max_flushed_spikes(){ return max_flushed_spikes; } - From 94f97a98725cf828e7a2b8aa670b98d0a2552b6a Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 12 Jan 2023 09:15:50 +0000 Subject: [PATCH 180/198] pylint is now more strict --- .../models/neuron/abstract_population_vertex.py | 8 ++++---- .../synapse_dynamics/abstract_synapse_dynamics.py | 1 + spynnaker/pyNN/utilities/utility_calls.py | 13 +++++++------ 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index bb3e7b932b..35231d918e 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1047,6 +1047,7 @@ def __calculate_min_weights(self): # Now check that the maximum weight isn't too big for proj in self.incoming_projections: + # pylint: disable-next=protected-access synapse_info = proj._synapse_information synapse_type = synapse_info.synapse_type connector = synapse_info.connector @@ -1074,6 +1075,7 @@ def __check_weights( :param float weight_scale: The weight_scale from the synapse input_type """ for proj in self.incoming_projections: + # pylint: disable-next=protected-access synapse_info = proj._synapse_information weights = synapse_info.weights synapse_type = synapse_info.synapse_type @@ -1520,13 +1522,11 @@ def get_local_provenance_data(self): with ProvenanceWriter() as db: for i, weight in enumerate(self.__min_weights): db.insert_app_vertex( - self.label, - synapse_names[i], "min_weight", - weight), + self.label, synapse_names[i], "min_weight", weight) for (weight, r_weight) in self.__weight_provenance: proj_info = self.__weight_provenance[weight, r_weight] - for i, (proj, s_info) in enumerate(proj_info): + for i, (_proj, s_info) in enumerate(proj_info): db.insert_connector( s_info.pre_population.label, s_info.post_population.label, diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py index 0e59f6578f..54bf6a90a4 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py @@ -217,6 +217,7 @@ def calculate_min_weight(self, min_weights, max_stdp_spike_delta, :param int synapse_type: the synapse ID for which to calculate the min :rtype: list """ + # pylint: disable=unused-argument # By default no further calculation is required return min_weights diff --git a/spynnaker/pyNN/utilities/utility_calls.py b/spynnaker/pyNN/utilities/utility_calls.py index b718003832..86817afa25 100644 --- a/spynnaker/pyNN/utilities/utility_calls.py +++ b/spynnaker/pyNN/utilities/utility_calls.py @@ -362,6 +362,7 @@ def float_gcd(a, b): :rtype: float """ if (a < b): + # pylint: disable-next=arguments-out-of-order return float_gcd(b, a) # base case @@ -371,7 +372,7 @@ def float_gcd(a, b): return (float_gcd(b, a - math.floor(a / b) * b)) -def float_gcd_of_array(input): +def float_gcd_of_array(input_array): """ Work out the floating point gcd of an array of numbers @@ -379,13 +380,13 @@ def float_gcd_of_array(input): :return: the floating point gcd of the array :rtype: float """ - if len(input) == 1: - return input[0] + if len(input_array) == 1: + return input_array[0] - gcd = float_gcd(input[0], input[1]) + gcd = float_gcd(input_array[0], input_array[1]) - for i in range(2, len(input)): - gcd = float_gcd(gcd, input[i]) + for i in range(2, len(input_array)): + gcd = float_gcd(gcd, input_array[i]) return gcd From 7ea6378b2984d492ee9ac8c08c0c18fd779c4bca Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 17 Jan 2023 12:14:40 +0000 Subject: [PATCH 181/198] Try 16 delay tics by default --- .../abstract_spynnaker_splitter_delay.py | 2 +- .../model_tests/neuron/test_synaptic_manager.py | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py index 2a78e4b82b..29f799e00a 100644 --- a/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py +++ b/spynnaker/pyNN/extra_algorithms/splitter_components/abstract_spynnaker_splitter_delay.py @@ -28,7 +28,7 @@ class AbstractSpynnakerSplitterDelay(object, metaclass=AbstractBase): __slots__ = [] # max delays supported by a slice split machine vertex - MAX_SUPPORTED_DELAY_TICS = 64 # can this be 16? + MAX_SUPPORTED_DELAY_TICS = 16 def max_support_delay(self): """ diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index 0ed9b3e0f6..8d37b84d65 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -407,23 +407,23 @@ def test_set_synapse_dynamics(): # Only undelayed, all edges exist (range(10), [], 1000, 100, None), # Only delayed, all edges exist (note max_delay=20 on master) - ([], range(10), 1000, 100, 100), + ([], range(10), 1000, 100, 20), # All undelayed and delayed edges exist - (range(10), range(10), 1000, 100, 100), + (range(10), range(10), 1000, 100, 20), # Only undelayed, some connections missing but app keys can still work ([0, 1, 2, 3, 4], [], 1000, 100, None), # Only delayed, some connections missing but app keys can still work - ([], [5, 6, 7, 8, 9], 1000, 100, 100), + ([], [5, 6, 7, 8, 9], 1000, 100, 20), # Both delayed and undelayed, some undelayed edges don't exist # (app keys work because undelayed aren't filtered) - ([3, 4, 5, 6, 7], range(10), 1000, 100, 100), + ([3, 4, 5, 6, 7], range(10), 1000, 100, 20), # Both delayed and undelayed, some delayed edges don't exist # (app keys work because all undelayed exist) - (range(10), [4, 5, 6, 7], 1000, 100, 100), + (range(10), [4, 5, 6, 7], 1000, 100, 20), # Should work but number of cores doesn't work out (range(2000), [], 10000, 5, None), # Should work but number of neurons with delays don't work out - ([], range(4), 1024, 256, 576) # 144 on master + ([], range(4), 1024, 256, 144) ]) def test_pop_based_master_pop_table_standard( undelayed_indices_connected, delayed_indices_connected, From d3e4fe59ee7ec248ebb19d14fd296bc5739492a9 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 10 Feb 2023 10:02:35 +0000 Subject: [PATCH 182/198] pylint fix exception --- .../models/neural_projections/connectors/abstract_connector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 1c25d6233d..9ff846b421 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -312,7 +312,7 @@ def get_weight_minimum(self, weights, weight_random_sigma, synapse_info): if len(non_zero_weights) == 0: return 0.0 return utility_calls.float_gcd_of_array(non_zero_weights) - raise Exception("Unrecognised weight format") + raise SpynnakerException("Unrecognised weight format") def _get_weight_maximum(self, weights, n_connections, synapse_info): """ Get the maximum of the weights. From ed6aad6dcc2f539c4a551c43b0ac472ed265605e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Mon, 6 Mar 2023 14:22:17 +0000 Subject: [PATCH 183/198] Turn this error off for now --- neural_modelling/src/common/send_mc.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/neural_modelling/src/common/send_mc.h b/neural_modelling/src/common/send_mc.h index 871676bce1..5c1f9848aa 100644 --- a/neural_modelling/src/common/send_mc.h +++ b/neural_modelling/src/common/send_mc.h @@ -32,10 +32,10 @@ static inline void wait_for_cc(void) { spin1_delay_us(1); n_loops++; } - if (!(cc[CC_TCR] & TX_NOT_FULL_MASK)) { - log_error("Couldn't send spike; TCR=0x%08x\n", cc[CC_TCR]); - rt_error(RTE_SWERR); - } +// if (!(cc[CC_TCR] & TX_NOT_FULL_MASK)) { +// log_error("Couldn't send spike; TCR=0x%08x\n", cc[CC_TCR]); +// rt_error(RTE_SWERR); +// } } //! \brief Perform direct spike sending with hardware for speed From f9720f6e666425785939084225080f0c070f2c26 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 11 Apr 2023 18:17:36 +0100 Subject: [PATCH 184/198] Fix doc error --- spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 1 + 1 file changed, 1 insertion(+) diff --git a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py index 1b14616e22..29d402160e 100644 --- a/spynnaker/pyNN/models/neuron/abstract_population_vertex.py +++ b/spynnaker/pyNN/models/neuron/abstract_population_vertex.py @@ -1571,6 +1571,7 @@ def get_max_delay(self, max_ring_buffer_bits): """ Get the maximum delay and whether a delay extension is needed for a given maximum number of ring buffer bits. + :param int max_ring_buffer_bits: The maximum number of bits that can be used for the ring buffer identifier (i.e. delay, synapse type, neuron index) From 19df82da36a4c2431ed26761956ebcc28a27abd4 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 21 Apr 2023 13:41:43 +0100 Subject: [PATCH 185/198] Remove debug print statements --- .../models/neuron/synapse_dynamics/synapse_dynamics_stdp.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index 6d59873c43..fbd94ccdf8 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -517,9 +517,7 @@ def __get_weight_min_delta(self, max_stdp_spike_delta): def calculate_min_weight(self, min_weights, max_stdp_spike_delta, weight_scale, conn_weight_min, synapse_type): min_delta = self.__get_weight_min_delta(max_stdp_spike_delta) - print("min_delta ", min_delta) min_delta *= weight_scale - print("scaled ", min_delta, synapse_type, conn_weight_min, min_weights) if min_delta is not None and min_delta != 0: # This also depends on the earlier calculated minimum min_delta = float_gcd(min_delta, conn_weight_min) From 3a71b6e42acb13f58b3fcf62cb585322a4ffd2a6 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 23 Jun 2023 12:33:17 +0100 Subject: [PATCH 186/198] DataType is now in FrontEndCommon --- spynnaker/pyNN/models/neuron/population_machine_neurons.py | 4 ++-- spynnaker/pyNN/models/neuron/population_machine_synapses.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/population_machine_neurons.py b/spynnaker/pyNN/models/neuron/population_machine_neurons.py index 7323560b2f..28f6781e2f 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_neurons.py +++ b/spynnaker/pyNN/models/neuron/population_machine_neurons.py @@ -14,14 +14,14 @@ import ctypes from dataclasses import dataclass -from data_specification.enums import DataType - from spinn_utilities.abstract_base import abstractproperty, abstractmethod from spinn_utilities.overrides import overrides from pacman.utilities.utility_calls import get_field_based_keys +from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.interface.provenance import ProvenanceWriter + from spynnaker.pyNN.data import SpynnakerDataView from spynnaker.pyNN.utilities.constants import SPIKE_PARTITION_ID from spynnaker.pyNN.utilities.utility_calls import get_n_bits diff --git a/spynnaker/pyNN/models/neuron/population_machine_synapses.py b/spynnaker/pyNN/models/neuron/population_machine_synapses.py index e1fc486f49..d883b74482 100644 --- a/spynnaker/pyNN/models/neuron/population_machine_synapses.py +++ b/spynnaker/pyNN/models/neuron/population_machine_synapses.py @@ -14,8 +14,7 @@ from spinn_utilities.overrides import overrides from spinn_utilities.abstract_base import abstractproperty, abstractmethod -from data_specification.enums import DataType - +from spinn_front_end_common.interface.ds import DataType from spinn_front_end_common.utilities.helpful_functions import ( locate_memory_region_for_placement) from spinn_front_end_common.abstract_models import ( From 9ac6bb8c1314784b720c2376a442f1db322d2592 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 23 Jun 2023 12:36:13 +0100 Subject: [PATCH 187/198] Upgrade actions OS (to use a newer compiler) --- .github/workflows/c_actions.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/c_actions.yml b/.github/workflows/c_actions.yml index f6b8e70d29..dd105ac1e2 100644 --- a/.github/workflows/c_actions.yml +++ b/.github/workflows/c_actions.yml @@ -19,7 +19,7 @@ name: C Actions on: [push] jobs: build: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 timeout-minutes: 10 steps: - name: Checkout From c2cbfaa1280e205fe0cb3cf705d549fa83aef2dd Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 7 Jul 2023 13:58:14 +0100 Subject: [PATCH 188/198] Update license --- neural_modelling/src/neuron/spike_profiling.h | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/neural_modelling/src/neuron/spike_profiling.h b/neural_modelling/src/neuron/spike_profiling.h index 958dfd858b..225d479c7b 100644 --- a/neural_modelling/src/neuron/spike_profiling.h +++ b/neural_modelling/src/neuron/spike_profiling.h @@ -1,18 +1,17 @@ /* - * Copyright (c) 2017-2021 The University of Manchester + * Copyright (c) 2021 The University of Manchester * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. + * https://www.apache.org/licenses/LICENSE-2.0 * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ #include From 257ce82b9411e0d1d8e081598c56ef52beeffe4d Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 4 Aug 2023 11:07:14 +0100 Subject: [PATCH 189/198] Fix merge --- .../external_devices_models/external_device_lif_control.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py index 1bbca81703..047c5b2157 100644 --- a/spynnaker/pyNN/external_devices_models/external_device_lif_control.py +++ b/spynnaker/pyNN/external_devices_models/external_device_lif_control.py @@ -111,9 +111,5 @@ def create_vertex( self._devices, self._create_edges, max_atoms, self._model, self, self._translator, spikes_per_second, label, ring_buffer_sigma, incoming_spike_buffer_size, drop_late_spikes, splitter, seed, -<<<<<<< HEAD - n_colour_bits, rb_left_shifts) -======= n_colour_bits, min_weights, weight_random_sigma, - max_stdp_spike_delta) ->>>>>>> refs/heads/weight_scale + max_stdp_spike_delta, rb_left_shifts) From f381fb69761d604dc06fe50674109fc5662cb7b5 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 4 Aug 2023 12:02:47 +0100 Subject: [PATCH 190/198] flake8 --- unittests/model_tests/neuron/test_synaptic_manager.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/unittests/model_tests/neuron/test_synaptic_manager.py b/unittests/model_tests/neuron/test_synaptic_manager.py index 35e4fa7e61..e56305d39a 100644 --- a/unittests/model_tests/neuron/test_synaptic_manager.py +++ b/unittests/model_tests/neuron/test_synaptic_manager.py @@ -32,8 +32,8 @@ from spinn_front_end_common.interface.interface_functions import ( load_application_data_specs) from spynnaker.pyNN.data.spynnaker_data_writer import SpynnakerDataWriter -from spynnaker.pyNN.models.neuron.synaptic_matrices import SynapticMatrices,\ - SynapseRegions +from spynnaker.pyNN.models.neuron.synaptic_matrices import ( + SynapticMatrices, SynapseRegions) from spynnaker.pyNN.models.neuron.synapse_dynamics import ( SynapseDynamicsStatic, SynapseDynamicsStructuralSTDP, SynapseDynamicsSTDP, SynapseDynamicsStructuralStatic, From 0161cb0061785c162dda194020301633c30d53b5 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 4 Aug 2023 12:15:42 +0100 Subject: [PATCH 191/198] Not sure how this license got missed in merge --- .../makefiles/neuron/IF_cond_alpha/Makefile | 21 +++++++++---------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile b/neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile index 45546379d5..99f7033f24 100644 --- a/neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile +++ b/neural_modelling/makefiles/neuron/IF_cond_alpha/Makefile @@ -1,17 +1,16 @@ -# Copyright (c) 2017-2019 The University of Manchester +# Copyright (c) 2017 The University of Manchester # -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at # -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. +# https://www.apache.org/licenses/LICENSE-2.0 # -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. APP = $(notdir $(CURDIR)) From 9d0022fb848afb6ee8d2f315b47cb715f28050cf Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 11 Aug 2023 11:59:06 +0100 Subject: [PATCH 192/198] Add round-to-nearest multiplies back in neuron components --- .../input_types/input_type_conductance.h | 10 +++++----- .../neuron/synapse_types/exp_synapse_utils.h | 20 ++++++++++--------- 2 files changed, 16 insertions(+), 14 deletions(-) diff --git a/neural_modelling/src/neuron/input_types/input_type_conductance.h b/neural_modelling/src/neuron/input_types/input_type_conductance.h index 53d7896bb9..fc1814d04c 100644 --- a/neural_modelling/src/neuron/input_types/input_type_conductance.h +++ b/neural_modelling/src/neuron/input_types/input_type_conductance.h @@ -20,7 +20,7 @@ #define _INPUT_TYPE_CONDUCTANCE_H_ #include "input_type.h" -//#include "round.h" +#include "round.h" //! Conductance input parameters struct input_type_params_t { @@ -76,11 +76,11 @@ static inline void input_type_convert_excitatory_input_to_current( state_t membrane_voltage) { for (int i=0; i < NUM_EXCITATORY_RECEPTORS; i++) { // accum = accum * (accum - accum) - exc_input[i] = exc_input[i] * - (input_type->V_rev_E - membrane_voltage); +// exc_input[i] = exc_input[i] * +// (input_type->V_rev_E - membrane_voltage); // RTN accum -// exc_input[i] = MULT_ROUND_NEAREST_ACCUM(exc_input[i], -// (input_type->V_rev_E - membrane_voltage)); + exc_input[i] = MULT_ROUND_NEAREST_ACCUM(exc_input[i], + (input_type->V_rev_E - membrane_voltage)); } } diff --git a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h index 4ff7cbb181..1cec3a17ef 100644 --- a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h +++ b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h @@ -21,7 +21,7 @@ #include #include -//#include "round.h" +#include "round.h" //! The type of exponential decay parameters typedef struct exp_params_t { @@ -60,11 +60,12 @@ static inline void decay_and_init(exp_state_t *state, exp_params_t *params, //! \param[in,out] exp_param: The parameter to shape static inline void exp_shaping(exp_state_t *exp_param) { // decay value according to decay constant - exp_param->synaptic_input_value = - decay_s1615(exp_param->synaptic_input_value, exp_param->decay); -// exp_param->synaptic_input_value = -// MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, -// exp_param->decay); +// exp_param->synaptic_input_value = +// decay_s1615(exp_param->synaptic_input_value, exp_param->decay); + // RTN testing + exp_param->synaptic_input_value = + MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, + exp_param->decay); } //! \brief helper function to add input for a given timer period to a given @@ -72,8 +73,9 @@ static inline void exp_shaping(exp_state_t *exp_param) { //! \param[in,out] parameter: the parameter to update //! \param[in] input: the input to add. static inline void add_input_exp(exp_state_t *parameter, input_t input) { - parameter->synaptic_input_value = parameter->synaptic_input_value + - decay_s1615(input, parameter->init); // parameter->synaptic_input_value = parameter->synaptic_input_value + -// MULT_ROUND_NEAREST_ACCUM(input, parameter->init); +// decay_s1615(input, parameter->init); + // RTN testing + parameter->synaptic_input_value = parameter->synaptic_input_value + + MULT_ROUND_NEAREST_ACCUM(input, parameter->init); } From 69a5ec4ba6b0b42762d863045e9a4c5e05cb84e2 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 11 Aug 2023 14:40:18 +0100 Subject: [PATCH 193/198] Adding rounding here should work but causes UNDEF errors? --- neural_modelling/src/neuron/synapse_row.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index 6a20d4f282..12b7fb1384 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -89,6 +89,7 @@ #define _SYNAPSE_ROW_H_ #include +//#include //! how many bits the synapse weight will take #ifndef SYNAPSE_WEIGHT_BITS @@ -251,6 +252,7 @@ static inline input_t synapse_row_convert_weight_to_input( uint64_t w = (uint64_t) (weight); return kbits((int_k_t) (mw * w)); +// return MULT_ROUND_NEAREST_ACCUM(bitsk(weight), min_weight); } //! \brief Get the index of the ring buffer for a given timestep, synapse type From da5a0b3a4ac7e83a5ce7c6b9d525cf17c20897b8 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Fri, 11 Aug 2023 14:42:32 +0100 Subject: [PATCH 194/198] Add comment about UNDEF --- neural_modelling/src/neuron/synapse_row.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index 12b7fb1384..a20f0e46f4 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -89,7 +89,7 @@ #define _SYNAPSE_ROW_H_ #include -//#include +#include //! how many bits the synapse weight will take #ifndef SYNAPSE_WEIGHT_BITS @@ -252,6 +252,7 @@ static inline input_t synapse_row_convert_weight_to_input( uint64_t w = (uint64_t) (weight); return kbits((int_k_t) (mw * w)); + // This should work but seems to lead to UNDEF errors? // return MULT_ROUND_NEAREST_ACCUM(bitsk(weight), min_weight); } From 0cb28ac5873740e5210ec5ea1b107eecf1830bf3 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 12 Sep 2023 10:14:35 +0100 Subject: [PATCH 195/198] Use stochastic rounding everywhere, particularly for synapse row --- .../src/neuron/input_types/input_type_conductance.h | 12 +++--------- .../src/neuron/models/neuron_model_lif_impl.h | 6 ++---- neural_modelling/src/neuron/synapse_row.h | 10 +++------- .../src/neuron/synapse_types/exp_synapse_utils.h | 10 ++-------- 4 files changed, 10 insertions(+), 28 deletions(-) diff --git a/neural_modelling/src/neuron/input_types/input_type_conductance.h b/neural_modelling/src/neuron/input_types/input_type_conductance.h index fc1814d04c..77c30e6a18 100644 --- a/neural_modelling/src/neuron/input_types/input_type_conductance.h +++ b/neural_modelling/src/neuron/input_types/input_type_conductance.h @@ -76,10 +76,7 @@ static inline void input_type_convert_excitatory_input_to_current( state_t membrane_voltage) { for (int i=0; i < NUM_EXCITATORY_RECEPTORS; i++) { // accum = accum * (accum - accum) -// exc_input[i] = exc_input[i] * -// (input_type->V_rev_E - membrane_voltage); - // RTN accum - exc_input[i] = MULT_ROUND_NEAREST_ACCUM(exc_input[i], + exc_input[i] = MULT_ROUND_STOCHASTIC_ACCUM(exc_input[i], (input_type->V_rev_E - membrane_voltage)); } } @@ -95,11 +92,8 @@ static inline void input_type_convert_inhibitory_input_to_current( state_t membrane_voltage) { for (int i=0; i < NUM_INHIBITORY_RECEPTORS; i++) { // accum = accum * (accum - accum) - inh_input[i] = -inh_input[i] * - (input_type->V_rev_I - membrane_voltage); - // RTN accum -// inh_input[i] = MULT_ROUND_NEAREST_ACCUM(-inh_input[i], -// (input_type->V_rev_I - membrane_voltage)); + inh_input[i] = MULT_ROUND_STOCHASTIC_ACCUM(-inh_input[i], + (input_type->V_rev_I - membrane_voltage)); } } diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 191d51ad9d..5d997435c7 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -120,14 +120,12 @@ static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *par static inline void lif_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { // accum = accum * accum + accum -// REAL alpha = (input_this_timestep * neuron->R_membrane) + neuron->V_rest; - REAL alpha = MULT_ROUND_NEAREST_ACCUM( + REAL alpha = MULT_ROUND_STOCHASTIC_ACCUM( input_this_timestep, neuron->R_membrane) + neuron->V_rest; // update membrane voltage // accum - (ufract * (accum - accum)) -// neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); - neuron->V_membrane = alpha - MULT_ROUND_NEAREST_ACCUM( + neuron->V_membrane = alpha - MULT_ROUND_STOCHASTIC_ACCUM( neuron->exp_TC, (alpha - V_prev)); } diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index a20f0e46f4..e9bbdbaa9c 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -247,13 +247,9 @@ static inline weight_t synapse_row_sparse_weight(uint32_t x) { //! \return the actual input weight for the model static inline input_t synapse_row_convert_weight_to_input( weight_t weight, REAL min_weight) { - // Simply doing weight * min_weight adds unnecessary compiler instructions - uint64_t mw = (uint64_t) bitsk(min_weight); - uint64_t w = (uint64_t) (weight); - - return kbits((int_k_t) (mw * w)); - // This should work but seems to lead to UNDEF errors? -// return MULT_ROUND_NEAREST_ACCUM(bitsk(weight), min_weight); + // Stochastic rounding requires accums so convert weight to appropriate + // value before multiplying + return MULT_ROUND_STOCHASTIC_ACCUM(kbits(weight << 15), min_weight); } //! \brief Get the index of the ring buffer for a given timestep, synapse type diff --git a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h index 1cec3a17ef..e0fc36073d 100644 --- a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h +++ b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h @@ -60,11 +60,8 @@ static inline void decay_and_init(exp_state_t *state, exp_params_t *params, //! \param[in,out] exp_param: The parameter to shape static inline void exp_shaping(exp_state_t *exp_param) { // decay value according to decay constant -// exp_param->synaptic_input_value = -// decay_s1615(exp_param->synaptic_input_value, exp_param->decay); - // RTN testing exp_param->synaptic_input_value = - MULT_ROUND_NEAREST_ACCUM(exp_param->synaptic_input_value, + MULT_ROUND_STOCHASTIC_ACCUM(exp_param->synaptic_input_value, exp_param->decay); } @@ -73,9 +70,6 @@ static inline void exp_shaping(exp_state_t *exp_param) { //! \param[in,out] parameter: the parameter to update //! \param[in] input: the input to add. static inline void add_input_exp(exp_state_t *parameter, input_t input) { -// parameter->synaptic_input_value = parameter->synaptic_input_value + -// decay_s1615(input, parameter->init); - // RTN testing parameter->synaptic_input_value = parameter->synaptic_input_value + - MULT_ROUND_NEAREST_ACCUM(input, parameter->init); + MULT_ROUND_STOCHASTIC_ACCUM(input, parameter->init); } From 17339cb32a5ef8006b45dac08352d59ae862e46e Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Tue, 12 Sep 2023 14:10:21 +0100 Subject: [PATCH 196/198] Add previous method back in comment --- neural_modelling/src/neuron/synapse_row.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/neural_modelling/src/neuron/synapse_row.h b/neural_modelling/src/neuron/synapse_row.h index e9bbdbaa9c..0f32fa1dd8 100644 --- a/neural_modelling/src/neuron/synapse_row.h +++ b/neural_modelling/src/neuron/synapse_row.h @@ -247,6 +247,12 @@ static inline weight_t synapse_row_sparse_weight(uint32_t x) { //! \return the actual input weight for the model static inline input_t synapse_row_convert_weight_to_input( weight_t weight, REAL min_weight) { +// // Simply doing weight * min_weight adds unnecessary compiler instructions +// uint64_t mw = (uint64_t) bitsk(min_weight); +// uint64_t w = (uint64_t) (weight); +// +// return kbits((int_k_t) (mw * w)); + // Stochastic rounding requires accums so convert weight to appropriate // value before multiplying return MULT_ROUND_STOCHASTIC_ACCUM(kbits(weight << 15), min_weight); From e9a695660f131c960636756741849d685ce62585 Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Wed, 20 Sep 2023 14:07:12 +0100 Subject: [PATCH 197/198] Try this instead --- .../src/neuron/input_types/input_type_conductance.h | 12 ++++++++---- .../src/neuron/models/neuron_model_lif_impl.h | 10 ++++++---- .../src/neuron/synapse_types/exp_synapse_utils.h | 12 ++++++++---- 3 files changed, 22 insertions(+), 12 deletions(-) diff --git a/neural_modelling/src/neuron/input_types/input_type_conductance.h b/neural_modelling/src/neuron/input_types/input_type_conductance.h index 77c30e6a18..11fb591fef 100644 --- a/neural_modelling/src/neuron/input_types/input_type_conductance.h +++ b/neural_modelling/src/neuron/input_types/input_type_conductance.h @@ -76,8 +76,10 @@ static inline void input_type_convert_excitatory_input_to_current( state_t membrane_voltage) { for (int i=0; i < NUM_EXCITATORY_RECEPTORS; i++) { // accum = accum * (accum - accum) - exc_input[i] = MULT_ROUND_STOCHASTIC_ACCUM(exc_input[i], - (input_type->V_rev_E - membrane_voltage)); +// exc_input[i] = MULT_ROUND_STOCHASTIC_ACCUM(exc_input[i], +// (input_type->V_rev_E - membrane_voltage)); + exc_input[i] = exc_input[i] * + (input_type->V_rev_E - membrane_voltage); } } @@ -92,8 +94,10 @@ static inline void input_type_convert_inhibitory_input_to_current( state_t membrane_voltage) { for (int i=0; i < NUM_INHIBITORY_RECEPTORS; i++) { // accum = accum * (accum - accum) - inh_input[i] = MULT_ROUND_STOCHASTIC_ACCUM(-inh_input[i], - (input_type->V_rev_I - membrane_voltage)); +// inh_input[i] = MULT_ROUND_STOCHASTIC_ACCUM(-inh_input[i], +// (input_type->V_rev_I - membrane_voltage)); + inh_input[i] = -inh_input[i] * + (input_type->V_rev_I - membrane_voltage); } } diff --git a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h index 5d997435c7..3dbeba8b09 100644 --- a/neural_modelling/src/neuron/models/neuron_model_lif_impl.h +++ b/neural_modelling/src/neuron/models/neuron_model_lif_impl.h @@ -120,13 +120,15 @@ static inline void neuron_model_save_state(neuron_t *state, neuron_params_t *par static inline void lif_neuron_closed_form( neuron_t *neuron, REAL V_prev, input_t input_this_timestep) { // accum = accum * accum + accum - REAL alpha = MULT_ROUND_STOCHASTIC_ACCUM( - input_this_timestep, neuron->R_membrane) + neuron->V_rest; +// REAL alpha = MULT_ROUND_STOCHASTIC_ACCUM( +// input_this_timestep, neuron->R_membrane) + neuron->V_rest; + REAL alpha = input_this_timestep * neuron->R_membrane + neuron->V_rest; // update membrane voltage // accum - (ufract * (accum - accum)) - neuron->V_membrane = alpha - MULT_ROUND_STOCHASTIC_ACCUM( - neuron->exp_TC, (alpha - V_prev)); +// neuron->V_membrane = alpha - MULT_ROUND_STOCHASTIC_ACCUM( +// neuron->exp_TC, (alpha - V_prev)); + neuron->V_membrane = alpha - (neuron->exp_TC * (alpha - V_prev)); } //! \brief primary function called in timer loop after synaptic updates diff --git a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h index e0fc36073d..d377366e1d 100644 --- a/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h +++ b/neural_modelling/src/neuron/synapse_types/exp_synapse_utils.h @@ -60,9 +60,11 @@ static inline void decay_and_init(exp_state_t *state, exp_params_t *params, //! \param[in,out] exp_param: The parameter to shape static inline void exp_shaping(exp_state_t *exp_param) { // decay value according to decay constant - exp_param->synaptic_input_value = - MULT_ROUND_STOCHASTIC_ACCUM(exp_param->synaptic_input_value, - exp_param->decay); +// exp_param->synaptic_input_value = +// MULT_ROUND_STOCHASTIC_ACCUM(exp_param->synaptic_input_value, +// exp_param->decay); + exp_param->synaptic_input_value = + decay_s1615(exp_param->synaptic_input_value, exp_param->decay); } //! \brief helper function to add input for a given timer period to a given @@ -70,6 +72,8 @@ static inline void exp_shaping(exp_state_t *exp_param) { //! \param[in,out] parameter: the parameter to update //! \param[in] input: the input to add. static inline void add_input_exp(exp_state_t *parameter, input_t input) { +// parameter->synaptic_input_value = parameter->synaptic_input_value + +// MULT_ROUND_STOCHASTIC_ACCUM(input, parameter->init); parameter->synaptic_input_value = parameter->synaptic_input_value + - MULT_ROUND_STOCHASTIC_ACCUM(input, parameter->init); + decay_s1615(input, parameter->init); } From 6d8ec3a2da55afe6d970309a58a295164c86f07b Mon Sep 17 00:00:00 2001 From: Andrew Gait Date: Thu, 21 Sep 2023 10:55:59 +0100 Subject: [PATCH 198/198] Use absolute value for min weight change and fix test --- .../models/neuron/synapse_dynamics/synapse_dynamics_stdp.py | 4 ++-- .../test_stdp/test_IF_curr_delta_stdp.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py index fbd94ccdf8..79f08ddf02 100644 --- a/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py +++ b/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py @@ -510,8 +510,8 @@ def __get_weight_min_delta(self, max_stdp_spike_delta): :param float max_stdp_spike_delta: The maximum expected time between spikes in milliseconds """ - return self.__weight_dependence.weight_change_minimum( - self.__timing_dependence.minimum_delta(max_stdp_spike_delta)) + return abs(self.__weight_dependence.weight_change_minimum( + self.__timing_dependence.minimum_delta(max_stdp_spike_delta))) @overrides(AbstractPlasticSynapseDynamics.calculate_min_weight) def calculate_min_weight(self, min_weights, max_stdp_spike_delta, diff --git a/spynnaker_integration_tests/test_stdp/test_IF_curr_delta_stdp.py b/spynnaker_integration_tests/test_stdp/test_IF_curr_delta_stdp.py index 0ce6fb41c8..99b7248186 100644 --- a/spynnaker_integration_tests/test_stdp/test_IF_curr_delta_stdp.py +++ b/spynnaker_integration_tests/test_stdp/test_IF_curr_delta_stdp.py @@ -114,7 +114,7 @@ def mad_pair_additive_delta(self): sim.end() - self.assertTrue(numpy.allclose(weights_exc, weights_inh, rtol=0.001)) + self.assertTrue(numpy.allclose(weights_exc, weights_inh, rtol=0.01)) def nearest_pair_additive_delta(self): timestep = 1 @@ -210,7 +210,7 @@ def nearest_pair_additive_delta(self): sim.end() - self.assertTrue(numpy.allclose(weights_exc, weights_inh, rtol=0.001)) + self.assertTrue(numpy.allclose(weights_exc, weights_inh, rtol=0.01)) def test_mad_pair_additive_delta(self): self.runsafe(self.mad_pair_additive_delta)