20#include "local_only_impl.h"
21#include "local_only_2d_common.h"
24#include "../population_table/population_table.h"
59 uint16_t positive_synapse_type;
60 uint16_t negative_synapse_type;
71 uint32_t n_connectors;
86static inline lc_weight_t *get_weights(
connector *conn) {
87 return (lc_weight_t *) &(conn->pool_stride_div[conn->n_dims]);
91bool local_only_impl_initialise(
void *address){
92 log_info(
"+++++++++++++++++ CONV init ++++++++++++++++++++");
96 config = spin1_malloc(config_size);
98 log_error(
"Can't allocate %u bytes of memory for config with %u sources",
105 if (
config->n_connectors == 0) {
112 source_infos = spin1_malloc(
config->n_sources *
sizeof(source_infos[0]));
113 if (source_infos == NULL) {
114 log_error(
"Can't allocate memory for source infos");
118 connectors = spin1_malloc(
config->n_connectors *
sizeof(connectors[0]));
119 if (connectors == NULL) {
120 log_error(
"Can't allocate memory for connectors");
126 for (uint32_t i = 0; i <
config->n_sources; i++) {
127 uint32_t n_bytes =
sizeof(*s_info) + (s_info->n_dims *
sizeof(
source_dim));
128 source_infos[i] = spin1_malloc(n_bytes);
129 if (source_infos[i] == NULL) {
130 log_error(
"Can't allocate %u bytes for source_infos[%u]", n_bytes, i);
135 s_info = (
source_info *) &s_info->source_dim[source_infos[i]->n_dims];
140 for (uint32_t i = 0; i <
config->n_connectors; i++) {
142 uint32_t n_bytes =
sizeof(*conn) + (conn->n_weights *
sizeof(lc_weight_t)) +
146 connectors[i] = spin1_malloc(n_bytes);
147 if (connectors[i] == NULL) {
148 log_error(
"Can't allocate %u bytes for connectors[%u]", n_bytes, i);
156 lc_weight_t* weights = get_weights(conn);
157 uint32_t n_weights = connectors[i]->n_weights;
159 if (n_weights & 0x1) {
162 conn = (
connector *) &weights[n_weights];
165 for (
uint s = 0; s <
config->n_sources; s++) {
168 log_info(
"Source %u: Key = 0x%08x, Mask = 0x%08x, %u Dimensions",
169 s, source_infos[s]->
key_info.
key, source_infos[s]->key_info.mask,
170 source_infos[s]->n_dims);
171 for (uint32_t d = 0; d < source_infos[s]->n_dims; d++) {
172 log_info(
" Dim %u, core size=%u, cores per size=%u, last core=%u",
173 d, source_infos[s]->
source_dim[d].size_per_core,
175 source_infos[s]->
source_dim[d].size_last_core);
177 for (uint32_t c = start; c < end; c++) {
178 log_info(
" Connector %u, %u dims, %u weights, +synapse %u, -synapse %u,"
179 " delay_stage %u, delay %u",
180 c, connectors[c]->n_dims, connectors[c]->n_weights,
181 connectors[c]->positive_synapse_type, connectors[c]->negative_synapse_type,
182 connectors[c]->delay_stage, connectors[c]->delay);
189static inline bool key_to_index_lookup(uint32_t spike,
source_info **rs_info) {
190 for (uint32_t i = 0; i <
config->n_sources; i++) {
201 uint32_t *sizes, uint32_t *core_coords,
div_const *divs,
202 uint32_t neurons_per_core, lc_weight_t **weights) {
205 uint32_t first_neuron = c->
delay_stage * neurons_per_core;
206 uint32_t last_neuron = first_neuron + neurons_per_core;
207 if (local_id < first_neuron || local_id >= last_neuron) {
210 local_id -= first_neuron;
213 uint32_t last_extent = 1;
215 uint32_t remainder = local_id;
216 for (uint32_t j = 0; j < s_info->n_dims; j++) {
217 div_const stride_div = c->pool_stride_div[j];
220 uint32_t coord = div_by_const(remainder, divs[j]);
221 remainder -= coord * sizes[j];
225 coord = div_by_const(coord, stride_div);
228 index += (coord * last_extent);
232 last_extent = div_by_const(last_extent, stride_div);
234 lc_weight_t *all_weights = get_weights(c);
235 *weights = &all_weights[index *
config->n_post];
245void local_only_impl_process_spike(
246 uint32_t time, uint32_t spike, uint16_t*
ring_buffers) {
250 if (!key_to_index_lookup(spike, &s_info)) {
255 uint32_t core_id = get_core_id(spike, s_info->
key_info);
256 uint32_t local_id = get_local_id(spike, s_info->
key_info);
257 uint32_t n_dims = s_info->n_dims;
258 uint32_t sizes[n_dims];
259 uint32_t core_coords[n_dims];
261 uint32_t neurons_per_core = 1;
262 uint32_t core_remainder = core_id;
263 for (uint32_t j = 0; j < n_dims; j++) {
266 core_coords[j] = div_by_const(core_remainder, s_dim->
cum_cores_div);
267 bool is_last_core = core_coords[j] == (s_dim->
cores - 1);
268 core_remainder -= core_coords[j] * s_dim->
cum_cores;
284 lc_weight_t *weights;
285 if (!get_conn_weights(
connector, s_info, local_id, sizes, core_coords,
286 divs, neurons_per_core, &weights)) {
290 for (uint32_t post_index = 0; post_index <
config->n_post; post_index++) {
292 lc_weight_t weight = weights[post_index];
296 uint32_t rb_index = 0;
309 log_debug(
"Updating ring_buffers[%u] for post neuron %u with weight %u",
310 rb_index, post_index, weight);
313 uint32_t accumulation =
ring_buffers[rb_index] + weight;
314 uint32_t sat_test = accumulation & 0x10000;
316 accumulation = sat_test - 1;
static weight_t * ring_buffers
The ring buffers to be used in the simulation.
void log_error(const char *message,...)
void log_debug(const char *message,...)
void log_info(const char *message,...)
uint32_t synapse_delay_mask
The mask to get the synaptic delay from a "synapse".
uint32_t synapse_type_index_bits
The number of bits used by the synapse type and post-neuron index.
uint32_t synapse_index_bits
The number of bits used by just the post-neuron index.
key_info key_info
Information about the key.
A region of SDRAM used to transfer synapses.
Collection of rates to apply over time to a particular spike source.
void spin1_memcpy(void *dst, void const *src, uint len)
uint16_t delay
The delay in time steps.
uint16_t positive_synapse_type
The index of the synapse for positive weights.
uint16_t negative_synapse_type
The index of the synapse for negative weights.
uint16_t delay_stage
The delay stage.
Structure for constants for precise constant integer division (see div_by_const)
uint32_t start
The index into connectors for this entry.
uint32_t count
The number of entries in connectors for this entry.
uint32_t key
The key to match against the incoming message.
uint32_t mask
The mask to select the relevant bits of key for matching.
div_const cum_size_per_core_div
The values used to divide to get the dimension value from a scalar.
div_const cum_size_last_core_div
The division by the dimension on the last core.
uint32_t cum_cores
The cumulative cores to divide by to get this dimension.
uint32_t cum_size_per_core
The cumulative size per core to be divided by to get this dimension.
uint32_t size_per_core
The size of the source in the dimension.
uint32_t size_last_core
The size of the last core in the dimension.
uint32_t cores
The number of cores in the full population in this dimension.
uint32_t cum_size_last_core
The cumulative size on the last core to divide by to get this dimension.
div_const cum_cores_div
Division by cores per dim.
static index_t synapse_row_get_ring_buffer_index(uint32_t simulation_timestep, uint32_t synapse_type_index, uint32_t neuron_index, uint32_t synapse_type_index_bits, uint32_t synapse_index_bits, uint32_t synapse_delay_mask)
Get the index of the ring buffer for a given timestep, synapse type and neuron index.