Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
31 commits
Select commit Hold shift + click to select a range
8ce3372
Added makefile for IF_curr_delta neurons to be used in convolutions
emijan-kth Jan 12, 2023
2e9c65f
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Jan 24, 2023
7accc3f
Added makefile for IF_curr_delta neurons to be used in convolutions
emijan-kth Jan 12, 2023
4324daf
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Jan 24, 2023
1d3e0fe
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Feb 2, 2023
6f70b30
Multiple fixes to convolutions. (#2)
emijan-kth Feb 2, 2023
047274b
Merge branch 'SpiNNakerManchester:master' into master
emijan-kth Feb 6, 2023
00388de
Merge branch 'local_only_delays'
emijan-kth Feb 10, 2023
bb90642
Fixed merge error: local_only_delays should have increased size of co…
emijan-kth Feb 10, 2023
91eeed6
Added option to ConvolutionConnector for delays varying horizontally …
emijan-kth Feb 10, 2023
4003c95
shapes and delays
emijan-kth Feb 15, 2023
584ed95
Set kernel_shape overrides shape of kernel_weights
emijan-kth Feb 16, 2023
13cf51e
Multisynaptic connections with varying delays.
emijan-kth Feb 16, 2023
27137e2
Merge pull request #1286 from emijan-kth/master
Christian-B Feb 20, 2023
5a7faa9
Merge branch 'master' into emijan-kth
Christian-B Feb 20, 2023
bdbade4
Merge branch 'master' into emijan-kth
Christian-B Feb 24, 2023
56d3974
Merge branch 'SpiNNakerManchester:local_only_delays' into local_only_…
emijan-kth Feb 26, 2023
eac4934
Merge branch 'local_only_delays'
emijan-kth Feb 26, 2023
986aa80
Revert "Added option to ConvolutionConnector for delays varying horiz…
emijan-kth Feb 27, 2023
cce37d4
Merge branch 'wip-strides-delays'
emijan-kth Feb 27, 2023
7d2a5bf
Fixed merge error: should have increased size of connector struct.
emijan-kth Feb 27, 2023
f882202
Work-in-progress: Merge remote-tracking branch 'upstream/master'
emijan-kth Mar 3, 2023
af90e25
Completed merge.
emijan-kth Mar 4, 2023
466aead
Merge branch 'master' into emijan-kth
rowleya Mar 14, 2023
b6a447e
Fix counting
rowleya Mar 14, 2023
1699340
Merge remote-tracking branch 'emijan-kth/master' into emijan-kth
rowleya Mar 14, 2023
8022750
Line too long
rowleya Mar 14, 2023
6ff18c8
Flake8
rowleya Mar 14, 2023
0b216a2
Rats
rowleya Mar 14, 2023
4e312c4
Merge remote-tracking branch 'upstream/master'
emijan-kth Mar 14, 2023
83f43f5
Merge remote-tracking branch 'emijan-kth/master' into emijan-kth
rowleya Mar 14, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Copyright (c) 2023 The University of Manchester
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

APP = $(notdir $(CURDIR))

NEURON_MODEL = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.c
NEURON_MODEL_H = $(NEURON_DIR)/neuron/models/neuron_model_lif_impl.h
INPUT_TYPE_H = $(NEURON_DIR)/neuron/input_types/input_type_delta.h
NEURON_IMPL_H = $(NEURON_DIR)/neuron/implementations/neuron_impl_standard.h
THRESHOLD_TYPE_H = $(NEURON_DIR)/neuron/threshold_types/threshold_type_static.h
SYNAPSE_TYPE_H = $(NEURON_DIR)/neuron/synapse_types/synapse_types_delta_impl.h
LOCAL_ONLY_IMPL = $(NEURON_DIR)/neuron/local_only/local_only_conv_impl.c

include ../local_only.mk
3 changes: 2 additions & 1 deletion neural_modelling/makefiles/local_only_combined/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.

MODELS = IF_curr_exp_conv\
MODELS = IF_curr_delta_conv\
IF_curr_exp_conv\
IF_curr_exp_pool_dense

all:
Expand Down
62 changes: 42 additions & 20 deletions neural_modelling/src/neuron/local_only/local_only_conv_impl.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ typedef struct {
lc_shape_t kernel;
lc_shape_t padding;
lc_coord_t recip_strides;
lc_coord_t strides;
lc_coord_t recip_pool_strides;
uint16_t positive_synapse_type;
uint16_t negative_synapse_type;
Expand Down Expand Up @@ -138,27 +139,34 @@ bool local_only_impl_initialise(void *address){
return true;
}

//! \brief Multiply an integer by a 16-bit reciprocal and return the floored
//! \brief Calculate the remainder from a division
static inline int16_t calc_remainder(int16_t dividend, int16_t divisor, int16_t quotient) {
int16_t remainder = dividend - quotient * divisor;
log_debug("remainder: %d = %d * %d + %d",
dividend, quotient, divisor, remainder);
return remainder;
}

//! \brief Calculate remainder Multiply an integer by a 16-bit reciprocal and return the floored
//! integer result
static inline int16_t recip_multiply(int16_t integer, int16_t recip) {
int32_t i = integer;
int32_t r = recip;
return (int16_t) ((i * r) >> RECIP_FRACT_BITS);
}

//! \brief Do a mapping from pre to post 2D spaces, we use the standard
//! padding, kernel, strides from Convolutional Neural Networks
//! because of the way we're looping through the kernel, we divide the kernel
//! shape by 2.
//! \brief Do a mapping from pre to post 2D spaces
static inline lc_coord_t map_pre_to_post(connector *connector, lc_coord_t pre,
int16_t half_kh, int16_t half_kw) {
lc_coord_t post = pre;
post.col = recip_multiply(post.col, connector->recip_pool_strides.col);
post.row = recip_multiply(post.row, connector->recip_pool_strides.row);
post.col = post.col - half_kw + connector->padding.width;
post.row = post.row - half_kh + connector->padding.height;
post.col = recip_multiply(post.col, connector->recip_strides.col);
post.row = recip_multiply(post.row, connector->recip_strides.row);
lc_coord_t *start_i) {
pre.col = recip_multiply(pre.col, connector->recip_pool_strides.col);
pre.row = recip_multiply(pre.row, connector->recip_pool_strides.row);
pre.col += connector->padding.width;
pre.row += connector->padding.height;
lc_coord_t post;
post.col = recip_multiply(pre.col, connector->recip_strides.col);
post.row = recip_multiply(pre.row, connector->recip_strides.row);
start_i->col = calc_remainder(pre.col, connector->strides.col, post.col);
start_i->row = calc_remainder(pre.row, connector->strides.row, post.row);
return post;
}

Expand All @@ -169,22 +177,34 @@ static inline lc_coord_t map_pre_to_post(connector *connector, lc_coord_t pre,
static inline void do_convolution_operation(
uint32_t time, lc_coord_t pre_coord, connector *connector,
uint16_t *ring_buffers) {
int32_t half_kh = connector->kernel.height / 2;
int32_t half_kw = connector->kernel.width / 2;
lc_coord_t post_coord = map_pre_to_post(connector, pre_coord, half_kh, half_kw);
lc_coord_t start_i;
log_debug("kernel height: %d, kernel width: %d, "
"padding height: %d, padding width: %d, "
"strides row: %d, strides col: %d",
connector->kernel.height, connector->kernel.width,
connector->padding.height, connector->padding.width,
connector->strides.row, connector->strides.col);
lc_coord_t post_coord = map_pre_to_post(connector, pre_coord, &start_i);
log_debug("pre row %d, col %d AS post row %d, col %d",
pre_coord.row, pre_coord.col, post_coord.row, post_coord.col);
lc_weight_t *connector_weights = &weights[connector->kernel_index];

int32_t kw = connector->kernel.width;
for (int32_t r = -half_kh, kr = 0; r <= half_kh; r++, kr++) {
int32_t tmp_row = post_coord.row + r;
for (int32_t i_row = start_i.row, tmp_row = post_coord.row;
i_row < connector->kernel.height; i_row += connector->strides.row, --tmp_row) {
int32_t kr = connector->kernel.height - 1 - i_row;
log_debug("i_row = %u, kr = %u, tmp_row = %u", i_row, kr, tmp_row);

if ((tmp_row < config->post_start.row) || (tmp_row > config->post_end.row)) {
log_debug("tmp_row outside");
continue;
}
for (int32_t c = -half_kw, kc = 0; c <= half_kw; c++, kc++) {
int32_t tmp_col = post_coord.col + c;
for (int32_t i_col = start_i.col, tmp_col = post_coord.col;
i_col < connector->kernel.width; i_col += connector->strides.col, --tmp_col) {
int32_t kc = connector->kernel.width - 1 - i_col;
log_debug("i_col = %u, kc = %u, tmp_col = %u", i_col, kc, tmp_col);
if ((tmp_col < config->post_start.col) || (tmp_col > config->post_end.col)) {
log_debug("tmp_col outside");
continue;
}

Expand All @@ -193,8 +213,10 @@ static inline void do_convolution_operation(
((tmp_row - config->post_start.row) * config->post_shape.width)
+ (tmp_col - config->post_start.col);
uint32_t k = (kr * kw) + kc;
log_debug("weight index = %u", k);
lc_weight_t weight = connector_weights[k];
if (weight == 0) {
log_debug("zero weight");
continue;
}
uint32_t rb_index = 0;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@
#: The number of 16-bit shorts in the connector struct,
#: ignoring the source_key_info struct but including the delay and the
#: 32-bit weight index
CONNECTOR_CONFIG_SHORTS = 16
CONNECTOR_CONFIG_SHORTS = 18


class ConvolutionConnector(AbstractConnector):
Expand Down Expand Up @@ -216,11 +216,10 @@ def get_post_shape(self, shape):
shape = (post_pool_shape // self.__pool_stride) + 1

kernel_shape = numpy.array(self.__kernel_weights.shape)
post_shape = (shape - (kernel_shape - 1) +
(2 * self.__padding_shape))
post_shape = shape - kernel_shape + (2 * self.__padding_shape)

return numpy.clip(
post_shape // self.__strides, 1, numpy.inf).astype('int')
post_shape // self.__strides + 1, 1, numpy.inf).astype('int')

@overrides(AbstractConnector.validate_connection)
def validate_connection(self, application_edge, synapse_info):
Expand All @@ -231,7 +230,10 @@ def validate_connection(self, application_edge, synapse_info):
"The ConvolutionConnector only works where the Populations"
" of a Projection are both 2D. Please ensure that both the"
" Populations use a Grid2D structure.")
expected_post_shape = tuple(self.get_post_shape(pre.atoms_shape))
pre_shape = pre.atoms_shape
expected_post_shape = tuple(self.get_post_shape(
(pre_shape[1], pre_shape[0])))
expected_post_shape = expected_post_shape[1], expected_post_shape[0]
if expected_post_shape != post.atoms_shape:
raise ConfigurationException(
f"With a source population with shape {pre.atoms_shape}, "
Expand Down Expand Up @@ -284,10 +286,22 @@ def get_connected_vertices(self, s_info, source_vertex, target_vertex):
pre_slices = [m_vertex.vertex_slice for m_vertex in pre_vertices]
pre_slices_x = [vtx_slice.get_slice(0) for vtx_slice in pre_slices]
pre_slices_y = [vtx_slice.get_slice(1) for vtx_slice in pre_slices]
pre_ranges = [[[px.start, py.start], [px.stop - 1, py.stop - 1]]
pre_ranges = [[[py.start, px.start], [py.stop - 1, px.stop - 1]]
for px, py in zip(pre_slices_x, pre_slices_y)]
pres_as_posts = self.__pre_as_post(pre_ranges)
hlf_k_w, hlf_k_h = numpy.array(self.__kernel_weights.shape) // 2
pre_vertex_in_post_layer, start_i = self.__pre_as_post(pre_ranges)

pre_vertex_in_post_layer_upper_left = pre_vertex_in_post_layer[:, 0]
pre_vertex_in_post_layer_lower_right = pre_vertex_in_post_layer[:, 1]

kernel_shape = numpy.array(self.__kernel_weights.shape)

j = (kernel_shape - 1 - start_i) // self.__strides
j_upper_left = j[:, 0]

pre_vertex_max_reach_in_post_layer_upper_left = (
pre_vertex_in_post_layer_upper_left - j_upper_left)
pre_vertex_max_reach_in_post_layer_lower_right = (
pre_vertex_in_post_layer_lower_right)

connected = list()
for post in target_vertex.splitter.get_in_coming_vertices(
Expand All @@ -296,18 +310,20 @@ def get_connected_vertices(self, s_info, source_vertex, target_vertex):
post_slice_x = post_slice.get_slice(0)
post_slice_y = post_slice.get_slice(1)

# Get ranges allowed in post
min_x = post_slice_x.start - hlf_k_w
max_x = (post_slice_x.stop + hlf_k_w) - 1
min_y = post_slice_y.start - hlf_k_h
max_y = (post_slice_y.stop + hlf_k_h) - 1
# Get ranges allowed in post vertex
min_x = post_slice_x.start
max_x = post_slice_x.stop - 1
min_y = post_slice_y.start
max_y = post_slice_y.stop - 1

# Test that the start coords are in range i.e. less than max
start_in_range = numpy.logical_not(
numpy.any(pres_as_posts[:, 0] > [max_x, max_y], axis=1))
numpy.any(pre_vertex_max_reach_in_post_layer_upper_left >
[max_y, max_x], axis=1))
# Test that the end coords are in range i.e. more than min
end_in_range = numpy.logical_not(
numpy.any(pres_as_posts[:, 1] < [min_x, min_y], axis=1))
numpy.any(pre_vertex_max_reach_in_post_layer_lower_right <
[min_y, min_x], axis=1))
# When both things are true, we have a vertex in range
pre_in_range = pre_vertices[
numpy.logical_and(start_in_range, end_in_range)]
Expand All @@ -319,28 +335,42 @@ def get_max_n_incoming_slices(self, source_vertex, target_vertex):
pre_slices = list(source_vertex.splitter.get_out_going_slices())
pre_slices_x = [vtx_slice.get_slice(0) for vtx_slice in pre_slices]
pre_slices_y = [vtx_slice.get_slice(1) for vtx_slice in pre_slices]
pre_ranges = [[[px.start, py.start], [px.stop - 1, py.stop - 1]]
pre_ranges = [[[py.start, px.start], [py.stop - 1, px.stop - 1]]
for px, py in zip(pre_slices_x, pre_slices_y)]
pres_as_posts = self.__pre_as_post(pre_ranges)
hlf_k_w, hlf_k_h = numpy.array(self.__kernel_weights.shape) // 2
pre_vertex_in_post_layer, start_i = self.__pre_as_post(pre_ranges)

pre_vertex_in_post_layer_upper_left = pre_vertex_in_post_layer[:, 0]
pre_vertex_in_post_layer_lower_right = pre_vertex_in_post_layer[:, 1]

kernel_shape = numpy.array(self.__kernel_weights.shape)

j = (kernel_shape - 1 - start_i) // self.__strides
j_upper_left = j[:, 0]

pre_vertex_max_reach_in_post_layer_upper_left = (
pre_vertex_in_post_layer_upper_left - j_upper_left)
pre_vertex_max_reach_in_post_layer_lower_right = (
pre_vertex_in_post_layer_lower_right)

max_connected = 0
for post_slice in target_vertex.splitter.get_in_coming_slices():
post_slice_x = post_slice.get_slice(0)
post_slice_y = post_slice.get_slice(1)

# Get ranges allowed in post
min_x = post_slice_x.start - hlf_k_w
max_x = (post_slice_x.stop + hlf_k_w) - 1
min_y = post_slice_y.start - hlf_k_h
max_y = (post_slice_y.stop + hlf_k_h) - 1
min_x = post_slice_x.start
max_x = post_slice_x.stop - 1
min_y = post_slice_y.start
max_y = post_slice_y.stop - 1

# Test that the start coords are in range i.e. less than max
start_in_range = numpy.logical_not(
numpy.any(pres_as_posts[:, 0] > [max_x, max_y], axis=1))
numpy.any(pre_vertex_max_reach_in_post_layer_upper_left >
[max_y, max_x], axis=1))
# Test that the end coords are in range i.e. more than min
end_in_range = numpy.logical_not(
numpy.any(pres_as_posts[:, 1] < [min_x, min_y], axis=1))
numpy.any(pre_vertex_max_reach_in_post_layer_lower_right <
[min_y, min_x], axis=1))
# When both things are true, we have a vertex in range
pre_in_range = numpy.logical_and(start_in_range, end_in_range)
n_connected = pre_in_range.sum()
Expand All @@ -351,17 +381,18 @@ def get_max_n_incoming_slices(self, source_vertex, target_vertex):
def __pre_as_post(self, pre_coords):
""" Write pre coords as post coords.

:param Iterable pre_coords: An iterable of (x, y) coordinates
:param Iterable pre_coords: An iterable of (y, x) coordinates
:rtype: numpy.ndarray
"""
coords = numpy.array(pre_coords)
if self.__pool_stride is not None:
coords //= self.__pool_stride

kernel_shape = numpy.array(self.__kernel_weights.shape)
coords = coords - kernel_shape // 2 + self.__padding_shape
coords //= self.__strides
return coords
coords += self.__padding_shape
coord_by_strides = coords // self.__strides
start_i = coords % self.__strides

return coord_by_strides, start_i

@property
def kernel_n_bytes(self):
Expand All @@ -383,9 +414,9 @@ def get_local_only_data(
delay, weight_index):
# Get info about things
kernel_shape = self.__kernel_weights.shape
ps_x, ps_y = 1, 1
ps_y, ps_x = 1, 1
if self.__pool_stride is not None:
ps_x, ps_y = self.__pool_stride
ps_y, ps_x = self.__pool_stride

# Start with source key info
values = [key, mask, n_colour_bits]
Expand Down Expand Up @@ -413,9 +444,10 @@ def get_local_only_data(
self.__negative_receptor_type)
short_values = numpy.array([
start[1], start[0],
kernel_shape[1], kernel_shape[0],
self.__padding_shape[1], self.__padding_shape[0],
self.__recip(self.__strides[1]), self.__recip(self.__strides[0]),
kernel_shape[0], kernel_shape[1],
self.__padding_shape[0], self.__padding_shape[1],
self.__recip(self.__strides[0]), self.__recip(self.__strides[1]),
self.__strides[0], self.__strides[1],
self.__recip(ps_y), self.__recip(ps_x),
pos_synapse_type, neg_synapse_type], dtype="uint16")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,6 @@ def write_parameters(self, spec, region, machine_vertex, weight_scales):
data.extend(s_info.connector.get_local_only_data(
app_edge, source.vertex_slice, source.key, source.mask,
app_edge.pre_vertex.n_colour_bits, self.__delay, weight_index))
n_weights = next_weight_index
if next_weight_index % 2 != 0:
n_weights += 1

# Write the common spec
post_slice = machine_vertex.vertex_slice
Expand Down