From 0ea4e776b4b41cfb94d82974f5f43948f6e194f4 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Wed, 22 Jan 2020 13:33:19 +0000 Subject: [PATCH 01/12] fiddles to allow result test suite --- .../abstract_generate_connector_on_machine.py | 12 ++++++++++-- spynnaker/pyNN/models/neuron/synaptic_manager.py | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py index 4f39f2e2d3..10c11b02f6 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py @@ -17,6 +17,9 @@ from enum import Enum import numpy from six import with_metaclass + +from spinn_front_end_common.utilities import globals_variables, \ + helpful_functions from spinn_utilities.abstract_base import abstractproperty, AbstractBase from data_specification.enums.data_type import DataType from spinn_front_end_common.utilities.globals_variables import get_simulator @@ -72,7 +75,8 @@ class AbstractGenerateConnectorOnMachine(with_metaclass( __slots__ = [ "__delay_seed", "__weight_seed", - "__connector_seed" + "__connector_seed", + "__use_expander" ] def __init__(self, safe=True, callback=None, verbose=False): @@ -81,6 +85,9 @@ def __init__(self, safe=True, callback=None, verbose=False): self.__delay_seed = dict() self.__weight_seed = dict() self.__connector_seed = dict() + config = globals_variables.get_simulator().config + self.__use_expander = helpful_functions.read_config_boolean( + config, "Synapses", "use_expander") def _generate_lists_on_machine(self, values): """ Checks if the connector should generate lists on machine rather\ @@ -94,7 +101,8 @@ def _generate_lists_on_machine(self, values): # Only certain types of random distributions are supported for\ # generation on the machine - if IS_PYNN_8 and get_simulator().is_a_pynn_random(values): + if (IS_PYNN_8 and get_simulator().is_a_pynn_random(values) and + self.__use_expander): return values.name in PARAM_TYPE_BY_NAME return False diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index ecdedf29c1..4f5f54d679 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -19,6 +19,7 @@ import numpy import scipy.stats # @UnresolvedImport from scipy import special # @UnresolvedImport + from spinn_utilities.helpful_functions import get_valid_components from data_specification.enums import DataType from spinn_front_end_common.utilities.helpful_functions import ( From 497b0ce870b8e409f8e7d1b0e764c98cfd4c089a Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Thu, 6 Feb 2020 10:04:31 +0000 Subject: [PATCH 02/12] more hacks to get correct data out --- .../connectors/multapse_connector.py | 2 +- .../algorithms_metadata.xml | 9 ++++++--- .../spynnaker_data_specification_writer.py | 13 +++++++------ 3 files changed, 14 insertions(+), 10 deletions(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py index 88b8539682..0e4961abdf 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/multapse_connector.py @@ -177,7 +177,7 @@ def create_synaptic_block( # Now do the actual random choice from the available connections try: - chosen = numpy.random.choice( + chosen = self._rng.choice( pairs.shape[0], size=n_connections, replace=self.__with_replacement) except Exception as e: # pylint: disable=broad-except diff --git a/spynnaker/pyNN/overridden_pacman_functions/algorithms_metadata.xml b/spynnaker/pyNN/overridden_pacman_functions/algorithms_metadata.xml index f91ff9bd43..90db23b0fd 100644 --- a/spynnaker/pyNN/overridden_pacman_functions/algorithms_metadata.xml +++ b/spynnaker/pyNN/overridden_pacman_functions/algorithms_metadata.xml @@ -93,6 +93,10 @@ graph_mapper MemoryGraphMapper + + machine_graph + MemoryMachineGraph + placements @@ -101,10 +105,9 @@ write_text_specs machine data_n_timesteps - - graph_mapper - + machine_graph + DataSpecificationTargets RegionSizes diff --git a/spynnaker/pyNN/overridden_pacman_functions/spynnaker_data_specification_writer.py b/spynnaker/pyNN/overridden_pacman_functions/spynnaker_data_specification_writer.py index 63ffb7d42a..c88c890df9 100644 --- a/spynnaker/pyNN/overridden_pacman_functions/spynnaker_data_specification_writer.py +++ b/spynnaker/pyNN/overridden_pacman_functions/spynnaker_data_specification_writer.py @@ -15,7 +15,8 @@ from spinn_front_end_common.interface.interface_functions import ( GraphDataSpecificationWriter) -from spynnaker.pyNN.models.utility_models.delays import DelayExtensionVertex +from spynnaker.pyNN.models.utility_models.delays import DelayExtensionVertex, \ + DelayExtensionMachineVertex class SpynnakerDataSpecificationWriter( @@ -28,16 +29,16 @@ class SpynnakerDataSpecificationWriter( def __call__( self, placements, hostname, report_default_directory, write_text_specs, machine, - data_n_timesteps, graph_mapper=None): + data_n_timesteps, graph_mapper, machine_graph): # pylint: disable=too-many-arguments, signature-differs delay_extensions = list() placement_order = list() - for placement in placements.placements: - associated_vertex = graph_mapper.get_application_vertex( - placement.vertex) - if isinstance(associated_vertex, DelayExtensionVertex): + for machine_vertex in machine_graph.vertices: + placement = placements.get_placement_of_vertex(machine_vertex) + + if isinstance(machine_vertex, DelayExtensionMachineVertex): delay_extensions.append(placement) else: placement_order.append(placement) From 9371644a184f246f18a3aa045393c8729c888f87 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Mon, 13 Jul 2020 10:46:59 +0100 Subject: [PATCH 03/12] whatever --- spynnaker/pyNN/models/neuron/synaptic_manager.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neuron/synaptic_manager.py b/spynnaker/pyNN/models/neuron/synaptic_manager.py index 4bd6672ba4..a53fef7379 100644 --- a/spynnaker/pyNN/models/neuron/synaptic_manager.py +++ b/spynnaker/pyNN/models/neuron/synaptic_manager.py @@ -1100,7 +1100,8 @@ def _retrieve_synaptic_block( monitor_api.load_system_routing_tables( txrx, monitor_cores, placements) monitor_api.set_cores_for_data_streaming( - txrx, monitor_cores, placements) + txrx, monitor_cores, placements, n_channels=8, + intermediate_channel_waits=4) # read in the synaptic block if not is_single: From ae1541f56a604bfb5f4baf5668ce7cc16f936d9b Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Mon, 9 Nov 2020 14:03:36 +0000 Subject: [PATCH 04/12] fixes --- spynnaker/pyNN/spynnaker.cfg | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index 1ac50c3526..b60e08a716 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -72,3 +72,7 @@ notify_hostname = localhost # Uncomment the following to change from the defaults live_spike_port = 17895 live_spike_host = 0.0.0.0 + +[Synapses] + +use_expander = True \ No newline at end of file From 2ddc31d01527df28364316785cc980d5479c2574 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Mon, 9 Nov 2020 14:14:39 +0000 Subject: [PATCH 05/12] add new cfg param to the mock sim --- unittests/mocks.py | 1 + 1 file changed, 1 insertion(+) diff --git a/unittests/mocks.py b/unittests/mocks.py index 8842de2233..8c8241f0ff 100644 --- a/unittests/mocks.py +++ b/unittests/mocks.py @@ -107,6 +107,7 @@ def __init__(self): "enable_buffered_recording": "False"} self.config["MasterPopTable"] = {"generator": "BinarySearch"} self.config["Reports"] = {"n_profile_samples": 0} + self.config["Synapses"] = {"use_expander": True} def add_population(self, pop): pass From 6b29473b8decaa5a1af2c805554be317543b2e14 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Tue, 10 Nov 2020 10:14:57 +0000 Subject: [PATCH 06/12] more fixes --- spynnaker/pyNN/extra_algorithms/__init__.py | 5 +- .../extra_algorithms/algorithms_metadata.xml | 92 +++ ...ynnaker_host_execute_data_specification.py | 616 ++++++++++++++++++ .../delays/delay_extension_machine_vertex.py | 4 +- .../delays/delay_extension_vertex.py | 2 +- spynnaker/pyNN/spynnaker.cfg | 2 +- 6 files changed, 716 insertions(+), 5 deletions(-) create mode 100644 spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py diff --git a/spynnaker/pyNN/extra_algorithms/__init__.py b/spynnaker/pyNN/extra_algorithms/__init__.py index 2dc787ce59..bff809c085 100644 --- a/spynnaker/pyNN/extra_algorithms/__init__.py +++ b/spynnaker/pyNN/extra_algorithms/__init__.py @@ -20,8 +20,11 @@ from .spynnaker_machine_bit_field_router_compressor import ( SpynnakerMachineBitFieldPairRouterCompressor, SpynnakerMachineBitFieldUnorderedRouterCompressor) +from .spynnaker_host_execute_data_specification import ( + SpyNNakerHostExecuteDataSpecification) __all__ = ["GraphEdgeWeightUpdater", "OnChipBitFieldGenerator", "SpynnakerDataSpecificationWriter", "SpynnakerMachineBitFieldPairRouterCompressor", - "SpynnakerMachineBitFieldUnorderedRouterCompressor"] + "SpynnakerMachineBitFieldUnorderedRouterCompressor", + "SpyNNakerHostExecuteDataSpecification"] diff --git a/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml b/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml index 396fdf12a6..3dc0c64bd2 100644 --- a/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml +++ b/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml @@ -18,6 +18,98 @@ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="https://github.com/SpiNNakerManchester/PACMAN https://raw.githubusercontent.com/SpiNNakerManchester/PACMAN/master/pacman/operations/algorithms_metadata_schema.xsd"> + + spynnaker.pyNN.extra_algorithms + SpyNNakerHostExecuteDataSpecification + execute_application_data_specs + + + transceiver + MemoryTransceiver + + + machine + MemoryExtendedMachine + + + app_id + APPID + + + dsg_targets + DataSpecificationTargets + + + report_folder + ReportFolder + + + java_caller + JavaCaller + + + processor_to_app_data_base_address + ProcessorToAppDataBaseAddress + + + uses_advanced_monitors + UsingAdvancedMonitorSupport + + + executable_targets + ExecutableTargets + + + placements + MemoryPlacements + + + extra_monitor_cores + MemoryExtraMonitorVertices + + + extra_monitor_cores_to_ethernet_connection_map + MemoryMCGatherVertexToEthernetConnectedChipMapping + + + disable_advanced_monitor_usage + DisableAdvancedMonitorUsageForDataIn + + + region_sizes + RegionSizes + + + + transceiver + machine + region_sizes + app_id + dsg_targets + uses_advanced_monitors + placements + + + report_folder + java_caller + processor_to_app_data_base_address + executable_targets + extra_monitor_cores + extra_monitor_cores_to_ethernet_connection_map + disable_advanced_monitor_usage + DataLoaded + DataLoaded + + + ProcessorToAppDataBaseAddress + TotalSDRAMTracker + MatrixTracker + ExpanderTracker + TimeToUseIO + DataLoaded + DataLoaded + + spynnaker.pyNN.extra_algorithms.synapse_expander synapse_expander diff --git a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py new file mode 100644 index 0000000000..14de53bfcb --- /dev/null +++ b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py @@ -0,0 +1,616 @@ +# Copyright (c) 2017-2019 The University of Manchester +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from collections import defaultdict + +import datetime +import logging +import numpy +from six import iteritems, itervalues + +from spinn_front_end_common.interface.interface_functions import \ + HostExecuteDataSpecification +from spinn_front_end_common.interface.interface_functions.host_execute_data_specification import \ + filter_out_system_executables, _MEM_REGIONS, system_cores, \ + filter_out_app_executables +from spinn_utilities.progress_bar import ProgressBar +from spinn_utilities.log import FormatAdapter +from data_specification import DataSpecificationExecutor +from data_specification.exceptions import DataSpecificationException +from spinn_front_end_common.interface.ds.ds_write_info import DsWriteInfo +from spinn_front_end_common.utilities.helpful_functions import ( + write_address_to_user0) +from spinn_front_end_common.utilities.utility_objs import ( + ExecutableType, DataWritten) +from spinn_front_end_common.utilities.helpful_functions import ( + emergency_recover_states_from_failure) +from spinn_front_end_common.utilities import globals_variables, \ + helpful_functions +from spinn_utilities.timer import Timer +from spynnaker.pyNN.models.neuron import PopulationMachineVertex +from spynnaker.pyNN.models.utility_models.delays import \ + DelayExtensionMachineVertex +from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS + +logger = FormatAdapter(logging.getLogger(__name__)) + + +class SpyNNakerHostExecuteDataSpecification(HostExecuteDataSpecification): + """ Executes the host based data specification. + """ + + __slots__ = [ + # the application ID of the simulation + "_app_id", + "_core_to_conn_map", + # The path where the SQLite database holding the data will be placed, + # and where any java provenance can be written. + "_db_folder", + # The support class to run via Java. If None pure python is used. + "_java", + # The python representation of the SpiNNaker machine. + "_machine", + "_monitors", + "_placements", + # The spinnman instance. + "_txrx", + # The write info; a dict of cores to a dict of + # 'start_address', 'memory_used', 'memory_written' + "_write_info_map"] + + first = True + + def __init__(self): + self._app_id = None + self._core_to_conn_map = None + self._db_folder = None + self._java = None + self._machine = None + self._monitors = None + self._placements = None + self._txrx = None + self._write_info_map = None + + def __java_database(self, dsg_targets, progress, region_sizes): + """ + :param DataSpecificationTargets dsg_tagets: + :param ~spinn_utilities.progress_bar.ProgressBar progress: + :param dict(tuple(int,int,int)int) region_sizes: + :rtype: DsWriteInfo + """ + # Copy data from WriteMemoryIOData to database + dw_write_info = DsWriteInfo(dsg_targets.get_database()) + dw_write_info.clear_write_info() + if self._write_info_map is not None: + for core, info in iteritems(self._write_info_map): + (x, y, p) = core + dw_write_info.set_info(x, y, p, info) + del region_sizes[core] + for core in region_sizes: + (x, y, p) = core + dw_write_info.set_size_info(x, y, p, region_sizes[core]) + progress.update() + dsg_targets.set_app_id(self._app_id) + self._java.set_machine(self._machine) + self._java.set_report_folder(self._db_folder) + progress.update() + return dw_write_info + + def __java_all(self, dsg_targets, region_sizes): + """ Does the Data Specification Execution and loading using Java + + :param DataSpecificationTargets dsg_targets: + map of placement to file path + :return: map of of cores to descriptions of what was written + :rtype: DsWriteInfo + """ + # create a progress bar for end users + progress = ProgressBar( + 3, "Executing data specifications and loading data using Java") + + # Copy data from WriteMemoryIOData to database + dw_write_info = self.__java_database( + dsg_targets, progress, region_sizes) + + self._java.execute_data_specification() + + progress.end() + return dw_write_info + + def __python_all(self, dsg_targets, region_sizes, placements): + """ Does the Data Specification Execution and loading using Python + + :param Placements placements: placements + :param DataSpecificationTargets dsg_targets: + map of placement to file path + :param dict(tuple(int,int,int),int) region_sizes: + map between vertex and list of region sizes + :return: dict of cores to descriptions of what was written + :rtype: dict(tuple(int,int,int), DataWritten) + """ + # While the database supports having the info in it a python bugs does + # not like iterating over and writing intermingled so using a dict + results = self._write_info_map + if results is None: + results = dict() + + # create a progress bar for end users + progress = ProgressBar( + dsg_targets.n_targets(), + "Executing data specifications and loading data") + + # allocate and set user 0 before loading data + base_addresses = dict() + for core, _ in iteritems(dsg_targets): + base_addresses[core] = self.__malloc_region_storage( + core, region_sizes[core]) + + for core, reader in progress.over(iteritems(dsg_targets)): + x, y, p = core + data_written, _matrix, _connection, _total, _executor, _time = \ + self.__python_execute( + core, reader, self._txrx.write_memory, + base_addresses[core], region_sizes[core], + placements.get_vertex_on_processor(x, y, p)) + results[core] = data_written + + return results + + def execute_application_data_specs( + self, transceiver, machine, app_id, dsg_targets, + uses_advanced_monitors, executable_targets, region_sizes, + placements=None, extra_monitor_cores=None, + extra_monitor_cores_to_ethernet_connection_map=None, + report_folder=None, java_caller=None, + processor_to_app_data_base_address=None, + disable_advanced_monitor_usage=False): + """ Execute the data specs for all non-system targets. + + :param ~spinn_machine.Machine machine: + the python representation of the SpiNNaker machine + :param ~spinnman.transceiver.Transceiver transceiver: + the spinnman instance + :param int app_id: the application ID of the simulation + :param dict(tuple(int,int,int),int) region_sizes: + the coord for region sizes for each core + :param DataSpecificationTargets dsg_targets: + map of placement to file path + :param bool uses_advanced_monitors: + whether to use fast data in protocol + :param ~spinnman.model.ExecutableTargets executable_targets: + what core will running what binary + :param ~pacman.model.placements.Placements placements: + where vertices are located + :param list(ExtraMonitorSupportMachineVertex) extra_monitor_cores: + the deployed extra monitors, if any + :param extra_monitor_cores_to_ethernet_connection_map: + how to talk to extra monitor cores + :type extra_monitor_cores_to_ethernet_connection_map: + dict(tuple(int,int), DataSpeedUpPacketGatherMachineVertex) + :param processor_to_app_data_base_address: + map of placement and DSG data + :type processor_to_app_data_base_address: + dict(tuple(int,int,int), DsWriteInfo) + :param bool disable_advanced_monitor_usage: + whether to avoid using advanced monitors even if they're available + :return: map of placement and DSG data + :rtype: dict(tuple(int,int,int),DataWritten) or DsWriteInfo + """ + # pylint: disable=too-many-arguments + if processor_to_app_data_base_address is None: + processor_to_app_data_base_address = dict() + self._write_info_map = processor_to_app_data_base_address + self._db_folder = report_folder + self._java = java_caller + self._machine = machine + self._txrx = transceiver + self._app_id = app_id + self._monitors = extra_monitor_cores + self._placements = placements + self._core_to_conn_map = extra_monitor_cores_to_ethernet_connection_map + + # Allow override to disable + if disable_advanced_monitor_usage: + uses_advanced_monitors = False + + impl_method = self.__java_app if java_caller else self.__python_app + try: + return impl_method( + dsg_targets, executable_targets, uses_advanced_monitors, + region_sizes) + except: # noqa: E722 + if uses_advanced_monitors: + emergency_recover_states_from_failure( + self._txrx, self._app_id, executable_targets) + raise + + def __set_router_timeouts(self): + config = globals_variables.get_simulator().config + n_channels = helpful_functions.read_config_int( + config, "SpinnMan", "multi_packets_in_flight_n_channels") + intermediate_channel_waits = helpful_functions.read_config_int( + config, "SpinnMan", "multi_packets_in_flight_channel_waits") + for receiver in itervalues(self._core_to_conn_map): + receiver.load_system_routing_tables( + self._txrx, self._monitors, self._placements) + receiver.set_cores_for_data_streaming( + self._txrx, self._monitors, self._placements, + n_channels, intermediate_channel_waits) + + def __reset_router_timeouts(self): + # reset router timeouts + for receiver in itervalues(self._core_to_conn_map): + receiver.unset_cores_for_data_streaming( + self._txrx, self._monitors, self._placements) + # reset router tables + receiver.load_application_routing_tables( + self._txrx, self._monitors, self._placements) + + def __select_writer(self, x, y): + chip = self._machine.get_chip_at(x, y) + ethernet_chip = self._machine.get_chip_at( + chip.nearest_ethernet_x, chip.nearest_ethernet_y) + gatherer = self._core_to_conn_map[ethernet_chip.x, ethernet_chip.y] + return gatherer.send_data_into_spinnaker + + def __python_app( + self, dsg_targets, executable_targets, use_monitors, + region_sizes): + """ + :param DataSpecificationTargets dsg_targets: + :param ~spinnman.model.ExecutableTargets executable_targets: + :param bool use_monitors: + :param dict(tuple(int,int,int),int) region_sizes: + :return: dict of cores to descriptions of what was written + :rtype: dict(tuple(int,int,int),DataWritten) + """ + dsg_targets = filter_out_system_executables( + dsg_targets, executable_targets) + + if use_monitors: + self.__set_router_timeouts() + + # create a progress bar for end users + progress = ProgressBar( + len(dsg_targets) * 2, + "Executing data specifications and loading data for " + "application vertices") + + # allocate and set user 0 before loading data + base_addresses = dict() + for core, _ in progress.over( + iteritems(dsg_targets), finish_at_end=False): + base_addresses[core] = self.__malloc_region_storage( + core, region_sizes[core]) + + total_sizes = defaultdict(int) + matrix_sizes = defaultdict(int) + connection_build_sizes = defaultdict(int) + total_sizes_vertex = defaultdict(int) + + time = datetime.timedelta() + for core, reader in progress.over(iteritems(dsg_targets)): + x, y, p = core + chip = self._machine.get_chip_at(x, y) + data_written, matrix, connection, total, executor, time_taken = \ + self.__python_execute( + core, reader, + self.__select_writer(x, y) + if use_monitors else self._txrx.write_memory, + base_addresses[core], region_sizes[core], + self._placements.get_vertex_on_processor(x, y, p)) + self._write_info_map[core] = data_written + time += time_taken + + vertex = self._placements.get_vertex_on_processor(x, y, p) + + for region_id in _MEM_REGIONS: + region = executor.get_region(region_id) + if region is not None: + total_sizes[(x, y, p, region_id)] = \ + region.max_write_pointer + total_sizes_vertex[(vertex.label, region_id)] = \ + region.max_write_pointer + + # write information for the memory map report + total_sizes[(x, y, p, -1)] = total + total_sizes[ + (0, chip.nearest_ethernet_x, chip.nearest_ethernet_y)] += total + total_sizes[(-1, -1, -1, -1)] += total + + matrix_sizes[(x, y, p)] = matrix + matrix_sizes[ + (0, chip.nearest_ethernet_x, chip.nearest_ethernet_y)] += \ + matrix + matrix_sizes[(-1, -1, -1)] += matrix + + connection_build_sizes[(x, y, p)] = connection + connection_build_sizes[ + (0, chip.nearest_ethernet_x, chip.nearest_ethernet_y)] += \ + connection + connection_build_sizes[(-1, -1, -1)] += connection + + if use_monitors: + self.__reset_router_timeouts() + return ( + self._write_info_map, total_sizes, matrix_sizes, + connection_build_sizes, time.total_seconds()) + + def __java_app( + self, dsg_targets, executable_targets, use_monitors, + region_sizes): + """ + :param DataSpecificationTargets dsg_targets: + :param ~spinnman.model.ExecutableTargets executable_targets: + :param bool use_monitors: + :param dict(tuple(int,int,int),int) region_sizes: + :return: map of cores to descriptions of what was written + :rtype: DsWriteInfo + """ + # create a progress bar for end users + progress = ProgressBar( + 4, "Executing data specifications and loading data for " + "application vertices using Java") + + dsg_targets.mark_system_cores(system_cores(executable_targets)) + progress.update() + + # Copy data from WriteMemoryIOData to database + dw_write_info = self.__java_database( + dsg_targets, progress, region_sizes) + if use_monitors: + self._java.set_placements(self._placements, self._txrx) + + self._java.execute_app_data_specification(use_monitors) + + progress.end() + return ( + dw_write_info, + {(-1, -1, -1, -1): dsg_targets.sum_over_region_sizes()}, + {(-1, -1, -1): 0}, {(-1, -1, -1): 0}, + dsg_targets.time_to_load_in_seconds()) + + def execute_system_data_specs( + self, transceiver, machine, app_id, dsg_targets, region_sizes, + executable_targets, placements, report_folder=None, + java_caller=None, processor_to_app_data_base_address=None): + """ Execute the data specs for all system targets. + + :param ~spinnman.transceiver.Transceiver transceiver: + the spinnman instance + :param ~spinn_machine.Machine machine: + the python representation of the spinnaker machine + :param int app_id: the application ID of the simulation + :param dict(tuple(int,int,int),str) dsg_targets: + map of placement to file path + :param dict(tuple(int,int,int),int) region_sizes: + the coordinates for region sizes for each core + :param ~spinnman.model.ExecutableTargets executable_targets: + the map between binaries and locations and executable types + :param str report_folder: + :param JavaCaller java_caller: + :param processor_to_app_data_base_address: + :type processor_to_app_data_base_address: + dict(tuple(int,int,int),DataWritten) + :return: map of placement and DSG data, and loaded data flag. + :rtype: dict(tuple(int,int,int),DataWritten) or DsWriteInfo + """ + # pylint: disable=too-many-arguments + + if processor_to_app_data_base_address is None: + processor_to_app_data_base_address = dict() + self._write_info_map = processor_to_app_data_base_address + self._machine = machine + self._txrx = transceiver + self._app_id = app_id + self._db_folder = report_folder + self._java = java_caller + self._placements = placements + + impl_method = self.__java_sys if java_caller else self.__python_sys + return impl_method(dsg_targets, executable_targets, region_sizes) + + def __java_sys(self, dsg_targets, executable_targets, region_sizes): + """ Does the Data Specification Execution and loading using Java + + :param DataSpecificationTargets dsg_targets: + map of placement to file path + :param ~spinnman.model.ExecutableTargets executable_targets: + the map between binaries and locations and executable types + :param dict(tuple(int,int,int),int) region_sizes: + the coord for region sizes for each core + :return: map of cores to descriptions of what was written + :rtype: DsWriteInfo + """ + # create a progress bar for end users + progress = ProgressBar( + 4, "Executing data specifications and loading data for system " + "vertices using Java") + + dsg_targets.mark_system_cores(system_cores(executable_targets)) + progress.update() + + # Copy data from WriteMemoryIOData to database + dw_write_info = self.__java_database( + dsg_targets, progress, region_sizes) + + self._java.execute_system_data_specification() + + progress.end() + return dw_write_info + + def __python_sys(self, dsg_targets, executable_targets, region_sizes): + """ Does the Data Specification Execution and loading using Python + + :param DataSpecificationTargets dsg_targets: + map of placement to file path + :param ~spinnman.model.ExecutableTargets executable_targets: + the map between binaries and locations and executable types + :param dict(tuple(int,int,int),int) region_sizes: + the coord for region sizes for each core + :return: dict of cores to descriptions of what was written + :rtype: dict(tuple(int,int,int),DataWritten) + """ + # While the database supports having the info in it a python bugs does + # not like iterating over and writing intermingled so using a dict + sys_targets = filter_out_app_executables( + dsg_targets, executable_targets) + + # create a progress bar for end users + progress = ProgressBar( + len(sys_targets) * 2, + "Executing data specifications and loading data for " + "system vertices") + + # allocate and set user 0 before loading data + base_addresses = dict() + for core, _ in progress.over( + iteritems(sys_targets), finish_at_end=False): + base_addresses[core] = self.__malloc_region_storage( + core, region_sizes[core]) + + for core, reader in progress.over(iteritems(sys_targets)): + x, y, p = core + data_written, _matrix, _connection, _total, _ex, _time = \ + self.__python_execute( + core, reader, self._txrx.write_memory, base_addresses[core], + region_sizes[core], + self._placements.get_vertex_on_processor(x, y, p)) + self._write_info_map[core] = data_written + + return self._write_info_map + + def __malloc_region_storage(self, core, size): + """ Allocates the storage for all DSG regions on the core and tells \ + the core and our caller where that storage is. + + :param tuple(int,int,int) core: Which core we're talking about. + :param int size: + The total size of all storage for regions on that core, including + for the header metadata. + :return: address of region header table (not yet filled) + :rtype: int + """ + (x, y, p) = core + + # allocate memory where the app data is going to be written; this + # raises an exception in case there is not enough SDRAM to allocate + start_address = self._txrx.malloc_sdram(x, y, size, self._app_id) + + # set user 0 register appropriately to the application data + write_address_to_user0(self._txrx, x, y, p, start_address) + + return start_address + + def __python_execute( + self, core, reader, writer_func, base_address, size_allocated, + machine_vertex): + """ + :param tuple(int,int,int) core: + :param ~.AbstractDataReader reader: + :param callable(tuple(int,int,int,bytearray),None) writer_func: + :param int base_address: + :param int size_allocated: + :param MachineVertex machine_vertex: the machine vertex + :rtype: DataWritten + """ + x, y, p = core + total_size = 0 + + # Maximum available memory. + # However, system updates the memory available independently, so the + # space available check actually happens when memory is allocated. + memory_available = self._machine.get_chip_at(x, y).sdram.size + + # generate data spec executor + executor = DataSpecificationExecutor(reader, memory_available) + + # run data spec executor + try: + executor.execute() + except DataSpecificationException: + logger.error("Error executing data specification for {}, {}, {}", + x, y, p) + raise + + # Do the actual writing ------------------------------------ + + # Write the header and pointer table + header = executor.get_header() + pointer_table = executor.get_pointer_table(base_address) + data_to_write = numpy.concatenate((header, pointer_table)).tostring() + + # NB: DSE meta-block is always small (i.e., one SDP write) + self._txrx.write_memory(x, y, base_address, data_to_write) + bytes_written = len(data_to_write) + + matrix_size, connection_builder_size = self.__get_extra_sizes( + machine_vertex, executor) + + for region_id in _MEM_REGIONS: + region = executor.get_region(region_id) + if region is not None: + total_size += region.max_write_pointer + + time_total = datetime.timedelta() + timer = Timer() + # Write each region + for region_id in _MEM_REGIONS: + region = executor.get_region(region_id) + if region is None: + continue + max_pointer = region.max_write_pointer + if region.unfilled or max_pointer == 0: + continue + + # Get the data up to what has been written + data = region.region_data[:max_pointer] + + # Write the data to the position + with timer: + writer_func(x, y, pointer_table[region_id], data) + time_total += timer.measured_interval + + bytes_written += len(data) + + return ( + DataWritten(base_address, size_allocated, bytes_written), + matrix_size, connection_builder_size, total_size, executor, + time_total) + + @staticmethod + def __get_extra_sizes(machine_vertex, executor): + matrix_size, connection_builder_size = 0, 0 + + if isinstance(machine_vertex, PopulationMachineVertex): + regions = POPULATION_BASED_REGIONS + region = executor.get_region(regions.SYNAPTIC_MATRIX.value) # 4 + matrix_size += region.max_write_pointer + region = executor.get_region(regions.DIRECT_MATRIX.value) # 10 + if region is not None: + matrix_size += region.max_write_pointer + region = executor.get_region(regions.CONNECTOR_BUILDER.value) # 9 + if region is not None: + connection_builder_size += region.max_write_pointer + + if isinstance(machine_vertex, DelayExtensionMachineVertex): + regions = DelayExtensionMachineVertex.DELAY_EXTENSION_REGIONS + region = executor.get_region(regions.DELAY_PARAMS.value) # 1 + matrix_size += region.max_write_pointer + region = executor.get_region(regions.EXPANDER_REGION.value) # 3 + if region is not None: + connection_builder_size += region.max_write_pointer + + return matrix_size, connection_builder_size diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py index b3facae66d..c228f960e0 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_machine_vertex.py @@ -32,7 +32,7 @@ class DelayExtensionMachineVertex( __slots__ = [ "__resources"] - class _DELAY_EXTENSION_REGIONS(Enum): + class DELAY_EXTENSION_REGIONS(Enum): SYSTEM = 0 DELAY_PARAMS = 1 PROVENANCE_REGION = 2 @@ -74,7 +74,7 @@ def __init__(self, resources_required, label, constraints=None, @property @overrides(ProvidesProvenanceDataFromMachineImpl._provenance_region_id) def _provenance_region_id(self): - return self._DELAY_EXTENSION_REGIONS.PROVENANCE_REGION.value + return self.DELAY_EXTENSION_REGIONS.PROVENANCE_REGION.value @property @overrides( diff --git a/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py b/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py index a60f4e6254..9024d2b2da 100644 --- a/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py +++ b/spynnaker/pyNN/models/utility_models/delays/delay_extension_vertex.py @@ -51,7 +51,7 @@ # 6. n_delay_stages _DELAY_PARAM_HEADER_WORDS = 6 # pylint: disable=protected-access -_DELEXT_REGIONS = DelayExtensionMachineVertex._DELAY_EXTENSION_REGIONS +_DELEXT_REGIONS = DelayExtensionMachineVertex.DELAY_EXTENSION_REGIONS _EXPANDER_BASE_PARAMS_SIZE = 3 * BYTES_PER_WORD # The microseconds per timestep will be divided by this for the max offset diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index b60e08a716..5f073dc345 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -37,7 +37,7 @@ drop_late_spikes = True application_to_machine_graph_algorithms = PartitionAndPlacePartitioner machine_graph_to_machine_algorithms = EdgeToNKeysMapper,OneToOnePlacer,NerRoute,BasicTagAllocator,ProcessPartitionConstraints,MallocBasedRoutingInfoAllocator,BasicRoutingTableGenerator,RouterCollisionPotentialReport machine_graph_to_virtual_machine_algorithms = EdgeToNKeysMapper,OneToOnePlacer,NerRoute,BasicTagAllocator,ProcessPartitionConstraints,MallocBasedRoutingInfoAllocator,BasicRoutingTableGenerator,MundyRouterCompressor -loading_algorithms = SpynnakerMachineBitFieldUnorderedRouterCompressor,BitFieldCompressorReport +loading_algorithms = SpyNNakerHostExecuteDataSpecification,SpynnakerMachineBitFieldUnorderedRouterCompressor,BitFieldCompressorReport #loading_algorithms = HostBasedBitFieldRouterCompressor #loading_algorithms = MundyOnChipRouterCompression From 053e1a9154f75abc05fb0d83cc0c42d50ab80bf9 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Tue, 10 Nov 2020 11:12:00 +0000 Subject: [PATCH 07/12] fixes --- .../extra_algorithms/algorithms_metadata.xml | 66 +++++++++++++++++++ ...ynnaker_host_execute_data_specification.py | 2 +- spynnaker/pyNN/spynnaker.cfg | 2 +- 3 files changed, 68 insertions(+), 2 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml b/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml index 3dc0c64bd2..0e2859dca1 100644 --- a/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml +++ b/spynnaker/pyNN/extra_algorithms/algorithms_metadata.xml @@ -88,6 +88,7 @@ dsg_targets uses_advanced_monitors placements + BinariesLoaded report_folder @@ -110,6 +111,71 @@ DataLoaded + + spynnaker.pyNN.extra_algorithms + SpyNNakerHostExecuteDataSpecification + execute_system_data_specs + + + transceiver + MemoryTransceiver + + + machine + MemoryExtendedMachine + + + app_id + APPID + + + dsg_targets + DataSpecificationTargets + + + executable_targets + ExecutableTargets + + + report_folder + ReportFolder + + + java_caller + JavaCaller + + + processor_to_app_data_base_address + ProcessorToAppDataBaseAddress + + + region_sizes + RegionSizes + + + placements + MemoryPlacements + + + + transceiver + machine + app_id + dsg_targets + executable_targets + region_sizes + placements + + + report_folder + java_caller + processor_to_app_data_base_address + + + ProcessorToAppDataBaseAddress + DataLoaded + + spynnaker.pyNN.extra_algorithms.synapse_expander synapse_expander diff --git a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py index 14de53bfcb..bb600f4da4 100644 --- a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py +++ b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py @@ -387,7 +387,7 @@ def execute_system_data_specs( executable_targets, placements, report_folder=None, java_caller=None, processor_to_app_data_base_address=None): """ Execute the data specs for all system targets. - + :param Placements placements: placements :param ~spinnman.transceiver.Transceiver transceiver: the spinnman instance :param ~spinn_machine.Machine machine: diff --git a/spynnaker/pyNN/spynnaker.cfg b/spynnaker/pyNN/spynnaker.cfg index 5f073dc345..a1592c29da 100644 --- a/spynnaker/pyNN/spynnaker.cfg +++ b/spynnaker/pyNN/spynnaker.cfg @@ -37,7 +37,7 @@ drop_late_spikes = True application_to_machine_graph_algorithms = PartitionAndPlacePartitioner machine_graph_to_machine_algorithms = EdgeToNKeysMapper,OneToOnePlacer,NerRoute,BasicTagAllocator,ProcessPartitionConstraints,MallocBasedRoutingInfoAllocator,BasicRoutingTableGenerator,RouterCollisionPotentialReport machine_graph_to_virtual_machine_algorithms = EdgeToNKeysMapper,OneToOnePlacer,NerRoute,BasicTagAllocator,ProcessPartitionConstraints,MallocBasedRoutingInfoAllocator,BasicRoutingTableGenerator,MundyRouterCompressor -loading_algorithms = SpyNNakerHostExecuteDataSpecification,SpynnakerMachineBitFieldUnorderedRouterCompressor,BitFieldCompressorReport +loading_algorithms = SpyNNakerHostExecuteSystemDataSpecification,SpyNNakerHostExecuteDataSpecification,SpynnakerMachineBitFieldUnorderedRouterCompressor,BitFieldCompressorReport #loading_algorithms = HostBasedBitFieldRouterCompressor #loading_algorithms = MundyOnChipRouterCompression From 9b561158a2247e702c5e46352077150ddfe413ab Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Tue, 10 Nov 2020 11:35:06 +0000 Subject: [PATCH 08/12] fixes --- ...ynnaker_host_execute_data_specification.py | 30 +++++++------------ .../abstract_generate_connector_on_machine.py | 4 +-- 2 files changed, 13 insertions(+), 21 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py index bb600f4da4..07a0acb065 100644 --- a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py +++ b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py @@ -20,11 +20,12 @@ import numpy from six import iteritems, itervalues -from spinn_front_end_common.interface.interface_functions import \ - HostExecuteDataSpecification -from spinn_front_end_common.interface.interface_functions.host_execute_data_specification import \ - filter_out_system_executables, _MEM_REGIONS, system_cores, \ - filter_out_app_executables +from spinn_front_end_common.interface.interface_functions import ( + HostExecuteDataSpecification) +from spinn_front_end_common.interface.interface_functions. \ + host_execute_data_specification import ( + filter_out_app_executables, MEM_REGIONS, system_cores, + filter_out_system_executables) from spinn_utilities.progress_bar import ProgressBar from spinn_utilities.log import FormatAdapter from data_specification import DataSpecificationExecutor @@ -32,12 +33,9 @@ from spinn_front_end_common.interface.ds.ds_write_info import DsWriteInfo from spinn_front_end_common.utilities.helpful_functions import ( write_address_to_user0) -from spinn_front_end_common.utilities.utility_objs import ( - ExecutableType, DataWritten) +from spinn_front_end_common.utilities.utility_objs import DataWritten from spinn_front_end_common.utilities.helpful_functions import ( emergency_recover_states_from_failure) -from spinn_front_end_common.utilities import globals_variables, \ - helpful_functions from spinn_utilities.timer import Timer from spynnaker.pyNN.models.neuron import PopulationMachineVertex from spynnaker.pyNN.models.utility_models.delays import \ @@ -237,17 +235,11 @@ def execute_application_data_specs( raise def __set_router_timeouts(self): - config = globals_variables.get_simulator().config - n_channels = helpful_functions.read_config_int( - config, "SpinnMan", "multi_packets_in_flight_n_channels") - intermediate_channel_waits = helpful_functions.read_config_int( - config, "SpinnMan", "multi_packets_in_flight_channel_waits") for receiver in itervalues(self._core_to_conn_map): receiver.load_system_routing_tables( self._txrx, self._monitors, self._placements) receiver.set_cores_for_data_streaming( - self._txrx, self._monitors, self._placements, - n_channels, intermediate_channel_waits) + self._txrx, self._monitors, self._placements) def __reset_router_timeouts(self): # reset router timeouts @@ -316,7 +308,7 @@ def __python_app( vertex = self._placements.get_vertex_on_processor(x, y, p) - for region_id in _MEM_REGIONS: + for region_id in MEM_REGIONS: region = executor.get_region(region_id) if region is not None: total_sizes[(x, y, p, region_id)] = \ @@ -559,7 +551,7 @@ def __python_execute( matrix_size, connection_builder_size = self.__get_extra_sizes( machine_vertex, executor) - for region_id in _MEM_REGIONS: + for region_id in MEM_REGIONS: region = executor.get_region(region_id) if region is not None: total_size += region.max_write_pointer @@ -567,7 +559,7 @@ def __python_execute( time_total = datetime.timedelta() timer = Timer() # Write each region - for region_id in _MEM_REGIONS: + for region_id in MEM_REGIONS: region = executor.get_region(region_id) if region is None: continue diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py index d4e9d05689..96d837b755 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py @@ -18,8 +18,8 @@ import numpy from six import with_metaclass -from spinn_front_end_common.utilities import globals_variables, \ - helpful_functions +from spinn_front_end_common.utilities import ( + globals_variables, helpful_functions) from spinn_utilities.abstract_base import abstractproperty, AbstractBase from data_specification.enums.data_type import DataType from spinn_front_end_common.utilities.constants import BYTES_PER_WORD From a0858b507a4e74cbcade48fe25980d2ce9d6c455 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Tue, 10 Nov 2020 11:38:00 +0000 Subject: [PATCH 09/12] pep8 --- .../spynnaker_host_execute_data_specification.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py index 07a0acb065..144ea02b86 100644 --- a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py +++ b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py @@ -371,7 +371,7 @@ def __java_app( return ( dw_write_info, {(-1, -1, -1, -1): dsg_targets.sum_over_region_sizes()}, - {(-1, -1, -1): 0}, {(-1, -1, -1): 0}, + {(-1, -1, -1): 0}, {(-1, -1, -1): 0}, dsg_targets.time_to_load_in_seconds()) def execute_system_data_specs( @@ -475,11 +475,11 @@ def __python_sys(self, dsg_targets, executable_targets, region_sizes): for core, reader in progress.over(iteritems(sys_targets)): x, y, p = core - data_written, _matrix, _connection, _total, _ex, _time = \ + data_written, _matrix, _connection, _total, _ex, _time = ( self.__python_execute( - core, reader, self._txrx.write_memory, base_addresses[core], - region_sizes[core], - self._placements.get_vertex_on_processor(x, y, p)) + core, reader, self._txrx.write_memory, + base_addresses[core], region_sizes[core], + self._placements.get_vertex_on_processor(x, y, p))) self._write_info_map[core] = data_written return self._write_info_map From bbd71e3007a3d4c9f910cf9a233462a685962f19 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Wed, 18 Nov 2020 10:20:41 +0000 Subject: [PATCH 10/12] fix via @andrewgait --- .../models/neural_projections/connectors/abstract_connector.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py index 3c7bcaa2c2..f1a892bf5e 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py @@ -148,7 +148,7 @@ def _get_delay_maximum(self, delays, n_connections): if high is None: return max_estimated_delay - # The maximum is the minimum of the possible maximums + # The minimum is the maximum of the possible minimums return min(max_estimated_delay, high) elif numpy.isscalar(delays): return delays From 1920c15188d6d3556d4fa4eb370876eed2b016b4 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Wed, 18 Nov 2020 11:20:48 +0000 Subject: [PATCH 11/12] docs? --- ...ynnaker_host_execute_data_specification.py | 64 +++++++++---------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py index 144ea02b86..577dad4964 100644 --- a/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py +++ b/spynnaker/pyNN/extra_algorithms/spynnaker_host_execute_data_specification.py @@ -38,8 +38,8 @@ emergency_recover_states_from_failure) from spinn_utilities.timer import Timer from spynnaker.pyNN.models.neuron import PopulationMachineVertex -from spynnaker.pyNN.models.utility_models.delays import \ - DelayExtensionMachineVertex +from spynnaker.pyNN.models.utility_models.delays import ( + DelayExtensionMachineVertex) from spynnaker.pyNN.utilities.constants import POPULATION_BASED_REGIONS logger = FormatAdapter(logging.getLogger(__name__)) @@ -109,7 +109,7 @@ def __java_database(self, dsg_targets, progress, region_sizes): def __java_all(self, dsg_targets, region_sizes): """ Does the Data Specification Execution and loading using Java - :param DataSpecificationTargets dsg_targets: + :param DataSpecificationTargets dsg_targets: \ map of placement to file path :return: map of of cores to descriptions of what was written :rtype: DsWriteInfo @@ -131,9 +131,9 @@ def __python_all(self, dsg_targets, region_sizes, placements): """ Does the Data Specification Execution and loading using Python :param Placements placements: placements - :param DataSpecificationTargets dsg_targets: + :param DataSpecificationTargets dsg_targets: \ map of placement to file path - :param dict(tuple(int,int,int),int) region_sizes: + :param dict(tuple(int,int,int),int) region_sizes: \ map between vertex and list of region sizes :return: dict of cores to descriptions of what was written :rtype: dict(tuple(int,int,int), DataWritten) @@ -176,32 +176,32 @@ def execute_application_data_specs( disable_advanced_monitor_usage=False): """ Execute the data specs for all non-system targets. - :param ~spinn_machine.Machine machine: + :param ~spinn_machine.Machine machine: \ the python representation of the SpiNNaker machine - :param ~spinnman.transceiver.Transceiver transceiver: + :param ~spinnman.transceiver.Transceiver transceiver:\ the spinnman instance :param int app_id: the application ID of the simulation - :param dict(tuple(int,int,int),int) region_sizes: + :param dict(tuple(int,int,int),int) region_sizes:\ the coord for region sizes for each core - :param DataSpecificationTargets dsg_targets: + :param DataSpecificationTargets dsg_targets:\ map of placement to file path - :param bool uses_advanced_monitors: + :param bool uses_advanced_monitors:\ whether to use fast data in protocol - :param ~spinnman.model.ExecutableTargets executable_targets: + :param ~spinnman.model.ExecutableTargets executable_targets:\ what core will running what binary - :param ~pacman.model.placements.Placements placements: + :param ~pacman.model.placements.Placements placements:\ where vertices are located - :param list(ExtraMonitorSupportMachineVertex) extra_monitor_cores: + :param list(ExtraMonitorSupportMachineVertex) extra_monitor_cores:\ the deployed extra monitors, if any - :param extra_monitor_cores_to_ethernet_connection_map: + :param extra_monitor_cores_to_ethernet_connection_map:\ how to talk to extra monitor cores - :type extra_monitor_cores_to_ethernet_connection_map: + :type extra_monitor_cores_to_ethernet_connection_map:\ dict(tuple(int,int), DataSpeedUpPacketGatherMachineVertex) - :param processor_to_app_data_base_address: + :param processor_to_app_data_base_address:\ map of placement and DSG data - :type processor_to_app_data_base_address: + :type processor_to_app_data_base_address:\ dict(tuple(int,int,int), DsWriteInfo) - :param bool disable_advanced_monitor_usage: + :param bool disable_advanced_monitor_usage:\ whether to avoid using advanced monitors even if they're available :return: map of placement and DSG data :rtype: dict(tuple(int,int,int),DataWritten) or DsWriteInfo @@ -380,21 +380,21 @@ def execute_system_data_specs( java_caller=None, processor_to_app_data_base_address=None): """ Execute the data specs for all system targets. :param Placements placements: placements - :param ~spinnman.transceiver.Transceiver transceiver: + :param ~spinnman.transceiver.Transceiver transceiver:\ the spinnman instance - :param ~spinn_machine.Machine machine: + :param ~spinn_machine.Machine machine:\ the python representation of the spinnaker machine :param int app_id: the application ID of the simulation - :param dict(tuple(int,int,int),str) dsg_targets: + :param dict(tuple(int,int,int),str) dsg_targets:\ map of placement to file path - :param dict(tuple(int,int,int),int) region_sizes: + :param dict(tuple(int,int,int),int) region_sizes:\ the coordinates for region sizes for each core - :param ~spinnman.model.ExecutableTargets executable_targets: + :param ~spinnman.model.ExecutableTargets executable_targets:\ the map between binaries and locations and executable types :param str report_folder: :param JavaCaller java_caller: :param processor_to_app_data_base_address: - :type processor_to_app_data_base_address: + :type processor_to_app_data_base_address:\ dict(tuple(int,int,int),DataWritten) :return: map of placement and DSG data, and loaded data flag. :rtype: dict(tuple(int,int,int),DataWritten) or DsWriteInfo @@ -417,11 +417,11 @@ def execute_system_data_specs( def __java_sys(self, dsg_targets, executable_targets, region_sizes): """ Does the Data Specification Execution and loading using Java - :param DataSpecificationTargets dsg_targets: + :param DataSpecificationTargets dsg_targets:\ map of placement to file path - :param ~spinnman.model.ExecutableTargets executable_targets: + :param ~spinnman.model.ExecutableTargets executable_targets:\ the map between binaries and locations and executable types - :param dict(tuple(int,int,int),int) region_sizes: + :param dict(tuple(int,int,int),int) region_sizes:\ the coord for region sizes for each core :return: map of cores to descriptions of what was written :rtype: DsWriteInfo @@ -446,11 +446,11 @@ def __java_sys(self, dsg_targets, executable_targets, region_sizes): def __python_sys(self, dsg_targets, executable_targets, region_sizes): """ Does the Data Specification Execution and loading using Python - :param DataSpecificationTargets dsg_targets: + :param DataSpecificationTargets dsg_targets:\ map of placement to file path - :param ~spinnman.model.ExecutableTargets executable_targets: + :param ~spinnman.model.ExecutableTargets executable_targets:\ the map between binaries and locations and executable types - :param dict(tuple(int,int,int),int) region_sizes: + :param dict(tuple(int,int,int),int) region_sizes:\ the coord for region sizes for each core :return: dict of cores to descriptions of what was written :rtype: dict(tuple(int,int,int),DataWritten) @@ -489,8 +489,8 @@ def __malloc_region_storage(self, core, size): the core and our caller where that storage is. :param tuple(int,int,int) core: Which core we're talking about. - :param int size: - The total size of all storage for regions on that core, including + :param int size:\ + The total size of all storage for regions on that core, including\ for the header metadata. :return: address of region header table (not yet filled) :rtype: int From 9e7f6b478d0991e0cd4e2f83d3001b5336a29119 Mon Sep 17 00:00:00 2001 From: alan-stokes Date: Fri, 27 Nov 2020 14:14:04 +0000 Subject: [PATCH 12/12] thanks to @rowley who found this one --- .../connectors/abstract_generate_connector_on_machine.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py index 96d837b755..0aeb6d0697 100644 --- a/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py +++ b/spynnaker/pyNN/models/neural_projections/connectors/abstract_generate_connector_on_machine.py @@ -90,13 +90,16 @@ def _generate_lists_on_machine(self, values): :rtype: bool """ + if not self.__use_expander: + return False + # Scalars are fine on the machine if numpy.isscalar(values): return True # Only certain types of random distributions are supported for\ # generation on the machine - if isinstance(values, RandomDistribution) and self.__use_expander: + if isinstance(values, RandomDistribution): return values.name in PARAM_TYPE_BY_NAME return False