From ee3f46b688a5d40b6c76e999d360ca55214833ab Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 16:23:49 +0100 Subject: [PATCH 01/29] Create kspaceFirstOrder1D.py --- kwave/kspaceFirstOrder1D.py | 974 ++++++++++++++++++++++++++++++++++++ 1 file changed, 974 insertions(+) create mode 100644 kwave/kspaceFirstOrder1D.py diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py new file mode 100644 index 00000000..7d2ed643 --- /dev/null +++ b/kwave/kspaceFirstOrder1D.py @@ -0,0 +1,974 @@ +import numpy as np +import scipy.fft +from tqdm import tqdm +from typing import Union + +from kwave.kgrid import kWaveGrid +from kwave.kmedium import kWaveMedium +from kwave.ksensor import kSensor +from kwave.ksource import kSource +from kwave.kWaveSimulation import kWaveSimulation + +from kwave.ktransducer import NotATransducer + +from kwave.utils.data import scale_time +from kwave.utils.math import sinc +from kwave.utils.pml import get_pml +from kwave.utils.tictoc import TicToc +from kwave.utils.dotdictionary import dotdict + +from kwave.options.simulation_options import SimulationOptions + +from kwave.kWaveSimulation_helper import extract_sensor_data + +def kspace_first_order_1D(kgrid: kWaveGrid, + source: kSource, + sensor: Union[NotATransducer, kSensor], + medium: kWaveMedium, + simulation_options: SimulationOptions, + verbose: bool = False): + + """ + KSPACEFIRSTORDER1D 1D time-domain simulation of wave propagation. + + DESCRIPTION: + kspaceFirstOrder1D simulates the time-domain propagation of + compressional waves through a one-dimensional homogeneous or + heterogeneous acoustic medium given four input structures: kgrid, + medium, source, and sensor. The computation is based on a first-order + k-space model which accounts for power law absorption and a + heterogeneous sound speed and density. If medium.BonA is specified, + cumulative nonlinear effects are also modelled. At each time-step + (defined by kgrid.dt and kgrid.Nt or kgrid.t_array), the acoustic + field parameters at the positions defined by sensor.mask are recorded + and stored. If kgrid.t_array is set to 'auto', this array is + automatically generated using the makeTime method of the kWaveGrid + class. An anisotropic absorbing boundary layer called a perfectly + matched layer (PML) is implemented to prevent waves that leave one + side of the domain being reintroduced from the opposite side (a + consequence of using the FFT to compute the spatial derivatives in + the wave equation). This allows infinite domain simulations to be + computed using small computational grids. + + For a homogeneous medium the formulation is exact and the time-steps + are only limited by the effectiveness of the perfectly matched layer. + For a heterogeneous medium, the solution represents a leap-frog + pseudospectral method with a k-space correction that improves the + accuracy of computing the temporal derivatives. This allows larger + time-steps to be taken for the same level of accuracy compared to + conventional pseudospectral time-domain methods. The computational + grids are staggered both spatially and temporally. + + An initial pressure distribution can be specified by assigning a + matrix (the same size as the computational grid) of arbitrary numeric + values to source.p0. A time varying pressure source can similarly be + specified by assigning a binary matrix (i.e., a matrix of 1's and 0's + with the same dimensions as the computational grid) to source.p_mask + where the 1's represent the grid points that form part of the source. + The time varying input signals are then assigned to source.p. This + can be a single time series (in which case it is applied to all + source elements), or a matrix of time series following the source + elements using MATLAB's standard column-wise linear matrix index + ordering. A time varying velocity source can be specified in an + analogous fashion, where the source location is specified by + source.u_mask, and the time varying input velocity is assigned to + source.ux. + + The field values are returned as arrays of time series at the sensor + locations defined by sensor.mask. This can be defined in three + different ways. (1) As a binary matrix (i.e., a matrix of 1's and 0's + with the same dimensions as the computational grid) representing the + grid points within the computational grid that will collect the data. + (2) As the grid coordinates of two opposing ends of a line in the + form [x1; x2]. This is equivalent to using a binary sensor mask + covering the same region, however, the output is indexed differently + as discussed below. (3) As a series of Cartesian coordinates within + the grid which specify the location of the pressure values stored at + each time step. If the Cartesian coordinates don't exactly match the + coordinates of a grid point, the output values are calculated via + interpolation. The Cartesian points must be given as a 1 by N matrix + corresponding to the x positions, where the Cartesian origin is + assumed to be in the center of the grid. If no output is required, + the sensor input can be replaced with an empty array []. + + If sensor.mask is given as a set of Cartesian coordinates, the + computed sensor_data is returned in the same order. If sensor.mask is + given as a binary matrix, sensor_data is returned using MATLAB's + standard column-wise linear matrix index ordering. In both cases, the + recorded data is indexed as sensor_data(sensor_point_index, + time_index). For a binary sensor mask, the field values at a + particular time can be restored to the sensor positions within the + computation grid using unmaskSensorData. If sensor.mask is given as a + list of opposing ends of a line, the recorded data is indexed as + sensor_data(line_index).p(x_index, time_index), where x_index + corresponds to the grid index within the line, and line_index + corresponds to the number of lines if more than one is specified. + + By default, the recorded acoustic pressure field is passed directly + to the output sensor_data. However, other acoustic parameters can + also be recorded by setting sensor.record to a cell array of the form + {'p', 'u', 'p_max', ...}. For example, both the particle velocity and + the acoustic pressure can be returned by setting sensor.record = + {'p', 'u'}. If sensor.record is given, the output sensor_data is + returned as a structure with the different outputs appended as + structure fields. For example, if sensor.record = {'p', 'p_final', + 'p_max', 'u'}, the output would contain fields sensor_data.p, + sensor_data.p_final, sensor_data.p_max, and sensor_data.ux. Most of + the output parameters are recorded at the given sensor positions and + are indexed as sensor_data.field(sensor_point_index, time_index) or + sensor_data(line_index).field(x_index, time_index) if using a sensor + mask defined as opposing ends of a line. The exceptions are the + averaged quantities ('p_max', 'p_rms', 'u_max', 'p_rms', 'I_avg'), + the 'all' quantities ('p_max_all', 'p_min_all', 'u_max_all', + 'u_min_all'), and the final quantities ('p_final', 'u_final'). The + averaged quantities are indexed as + sensor_data.p_max(sensor_point_index) or + sensor_data(line_index).p_max(x_index) if using line ends, while the + final and 'all' quantities are returned over the entire grid and are + always indexed as sensor_data.p_final(nx), regardless of the type of + sensor mask. + + kspaceFirstOrder1D may also be used for time reversal image + reconstruction by assigning the time varying pressure recorded over + an arbitrary sensor surface to the input field + sensor.time_reversal_boundary_data. This data is then enforced in + time reversed order as a time varying Dirichlet boundary condition + over the sensor surface given by sensor.mask. The boundary data must + be indexed as sensor.time_reversal_boundary_data(sensor_point_index, + time_index). If sensor.mask is given as a set of Cartesian + coordinates, the boundary data must be given in the same order. An + equivalent binary sensor mask (computed using nearest neighbour + interpolation) is then used to place the pressure values into the + computational grid at each time step. If sensor.mask is given as a + binary matrix of sensor points, the boundary data must be ordered + using MATLAB's standard column-wise linear matrix indexing. If no + additional inputs are required, the source input can be replaced with + an empty array []. + + Acoustic attenuation compensation can also be included during time + reversal image reconstruction by assigning the absorption parameters + medium.alpha_coeff and medium.alpha_power and reversing the sign of + the absorption term by setting medium.alpha_sign = [-1, 1]. This + forces the propagating waves to grow according to the absorption + parameters instead of decay. The reconstruction should then be + regularised by assigning a filter to medium.alpha_filter (this can be + created using getAlphaFilter). + + Note: To run a simple photoacoustic image reconstruction example + using time reversal (that commits the 'inverse crime' of using the + same numerical parameters and model for data simulation and image + reconstruction), the sensor_data returned from a k-Wave simulation + can be passed directly to sensor.time_reversal_boundary_data with the + input fields source.p0 and source.p removed or set to zero. + + USAGE: + sensor_data = kspaceFirstOrder1D(kgrid, medium, source, sensor) + sensor_data = kspaceFirstOrder1D(kgrid, medium, source, sensor, ...) + + INPUTS: + The minimum fields that must be assigned to run an initial value problem + (for example, a photoacoustic forward simulation) are marked with a *. + + kgrid* - k-Wave grid object returned by kWaveGrid + containing Cartesian and k-space grid fields + kgrid.t_array* - evenly spaced array of time values [s] (set + to 'auto' by kWaveGrid) + + + medium.sound_speed* - sound speed distribution within the acoustic + medium [m/s] + medium.sound_speed_ref - reference sound speed used within the + k-space operator (phase correction term) + [m/s] + medium.density* - density distribution within the acoustic + medium [kg/m^3] + medium.BonA - parameter of nonlinearity + medium.alpha_power - power law absorption exponent + medium.alpha_coeff - power law absorption coefficient + [dB/(MHz^y cm)] + medium.alpha_mode - optional input to force either the + absorption or dispersion terms in the + equation of state to be excluded; valid + inputs are 'no_absorption' or + 'no_dispersion' + medium.alpha_filter - frequency domain filter applied to the + absorption and dispersion terms in the + equation of state + medium.alpha_sign - two element array used to control the sign + of absorption and dispersion terms in the + equation of state + + + source.p0* - initial pressure within the acoustic medium + source.p - time varying pressure at each of the source + positions given by source.p_mask + source.p_mask - binary matrix specifying the positions of + the time varying pressure source + distribution + source.p_mode - optional input to control whether the input + pressure is injected as a mass source or + enforced as a dirichlet boundary condition; + valid inputs are 'additive' (the default) or + 'dirichlet' + source.ux - time varying particle velocity in the + x-direction at each of the source positions + given by source.u_mask + source.u_mask - binary matrix specifying the positions of + the time varying particle velocity + distribution + source.u_mode - optional input to control whether the input + velocity is applied as a force source or + enforced as a dirichlet boundary condition; + valid inputs are 'additive' (the default) or + 'dirichlet' + + + sensor.mask* - binary matrix or a set of Cartesian points + where the pressure is recorded at each + time-step + sensor.record - cell array of the acoustic parameters to + record in the form sensor.record = {'p', + 'u', ...}; valid inputs are: + 'p' (acoustic pressure) + 'p_max' (maximum pressure) + 'p_min' (minimum pressure) + 'p_rms' (RMS pressure) + 'p_final' (final pressure field at all grid points) + 'p_max_all' (maximum pressure at all grid points) + 'p_min_all' (minimum pressure at all grid points) + 'u' (particle velocity) + 'u_max' (maximum particle velocity) + 'u_min' (minimum particle velocity) + 'u_rms' (RMS particle velocity) + 'u_final' (final particle velocity field at all grid points) + 'u_max_all' (maximum particle velocity at all grid points) + 'u_min_all' (minimum particle velocity at all grid points) + 'u_non_staggered' (particle velocity on non-staggered grid) + 'I' (time varying acoustic intensity) + 'I_avg' (average acoustic intensity) + sensor.record_start_index + - time index at which the sensor should start + recording the data specified by + sensor.record (default = 1) + sensor.time_reversal_boundary_data + - time varying pressure enforced as a + Dirichlet boundary condition over + sensor.mask + sensor.frequency_response + - two element array specifying the center + frequency and percentage bandwidth of a + frequency domain Gaussian filter applied to + the sensor_data + + Note: For heterogeneous medium parameters, medium.sound_speed and + medium.density must be given in matrix form with the same dimensions as + kgrid. For homogeneous medium parameters, these can be given as single + numeric values. If the medium is homogeneous and velocity inputs or + outputs are not required, it is not necessary to specify medium.density. + + OPTIONAL INPUTS: + Optional 'string', value pairs that may be used to modify the default + computational settings. + + 'CartInterp' - Interpolation mode used to extract the + pressure when a Cartesian sensor mask is + given. If set to 'nearest' and more than one + Cartesian point maps to the same grid point, + duplicated data points are discarded and + sensor_data will be returned with less + points than that specified by sensor.mask + (default = 'linear'). + 'CreateLog' - Boolean controlling whether the command line + output is saved using the diary function + with a date and time stamped filename + (default = False). + 'DataCast' - String input of the data type that variables + are cast to before computation. For example, + setting to 'single' will speed up the + computation time (due to the improved + efficiency of fftn and ifftn for this data + type) at the expense of a loss in precision. + This variable is also useful for utilising + GPU parallelisation through libraries such + as the Parallel Computing Toolbox by setting + 'DataCast' to 'gpuArray-single' (default = + 'off'). + 'DataRecast' - Boolean controlling whether the output data + is cast back to double precision. If set to + False, sensor_data will be returned in the + data format set using the 'DataCast' option. + 'DisplayMask' - Binary matrix overlaid onto the animated + simulation display. Elements set to 1 within + the display mask are set to black within the + display (default = sensor.mask). + 'LogScale' - Boolean controlling whether the pressure + field is log compressed before display + (default = False). The data is compressed by + scaling both the positive and negative + values between 0 and 1 (truncating the data + to the given plot scale), adding a scalar + value (compression factor) and then using + the corresponding portion of a log10 plot + for the compression (the negative parts are + remapped to be negative thus the default + color scale will appear unchanged). The + amount of compression can be controlled by + adjusting the compression factor which can + be given in place of the Boolean input. The + closer the compression factor is to zero, + the steeper the corresponding part of the + log10 plot used, and the greater the + compression (the default compression factor + is 0.02). + 'MovieArgs' - Settings for VideoWriter. Parameters must be + given as {'param', value, ...} pairs within + a cell array (default = {}), where 'param' + corresponds to a writable property of a + VideoWriter object. + 'MovieName' - Name of the movie produced when + 'RecordMovie' is set to true (default = + 'date-time-kspaceFirstOrder1D'). + 'MovieProfile' - Profile input passed to VideoWriter. + 'PlotFreq' - The number of iterations which must pass + before the simulation plot is updated + (default = 10). + 'PlotLayout' - Boolean controlling whether a four panel + plot of the initial simulation layout is + produced (initial pressure, sensor mask, + sound speed, density) (default = False). + 'PlotPML' - Boolean controlling whether the perfectly + matched layer is shown in the simulation + plots. If set to False, the PML is not + displayed (default = true). + 'PlotScale' - [min, max] values used to control the + scaling for imagesc (visualisation). If set + to 'auto', a symmetric plot scale is chosen + automatically for each plot frame. + 'PlotSim' - Boolean controlling whether the simulation + iterations are progressively plotted + (default = true). + 'PMLAlpha' - Absorption within the perfectly matched + layer in Nepers per grid point (default = + 2). + 'PMLInside' - Boolean controlling whether the perfectly + matched layer is inside or outside the grid. + If set to False, the input grids are + enlarged by PMLSize before running the + simulation (default = true). + 'PMLSize' - Size of the perfectly matched layer in grid + points. To remove the PML, set the + appropriate PMLAlpha to zero rather than + forcing the PML to be of zero size (default + = 20). + 'RecordMovie' - Boolean controlling whether the displayed + image frames are captured and stored as a + movie using VideoWriter (default = False). + 'Smooth' - Boolean controlling whether source.p0, + medium.sound_speed, and medium.density are + smoothed using smooth before computation. + 'Smooth' can either be given as a single + Boolean value or as a 3 element array to + control the smoothing of source.p0, + medium.sound_speed, and medium.density, + independently (default = [true, False, + False]). + + OUTPUTS: + If sensor.record is not defined by the user: + sensor_data - time varying pressure recorded at the sensor + positions given by sensor.mask + + If sensor.record is defined by the user: + sensor_data.p - time varying pressure recorded at the sensor + positions given by sensor.mask (returned if + 'p' is set) + sensor_data.p_max - maximum pressure recorded at the sensor + positions given by sensor.mask (returned if + 'p_max' is set) + sensor_data.p_min - minimum pressure recorded at the sensor + positions given by sensor.mask (returned if + 'p_min' is set) + sensor_data.p_rms - rms of the time varying pressure recorded at + the sensor positions given by sensor.mask + (returned if 'p_rms' is set) + sensor_data.p_final - final pressure field at all grid points + within the domain (returned if 'p_final' is + set) + sensor_data.p_max_all - maximum pressure recorded at all grid points + within the domain (returned if 'p_max_all' + is set) + sensor_data.p_min_all - minimum pressure recorded at all grid points + within the domain (returned if 'p_min_all' + is set) + sensor_data.ux - time varying particle velocity in the + x-direction recorded at the sensor positions + given by sensor.mask (returned if 'u' is + set) + sensor_data.ux_max - maximum particle velocity in the x-direction + recorded at the sensor positions given by + sensor.mask (returned if 'u_max' is set) + sensor_data.ux_min - minimum particle velocity in the x-direction + recorded at the sensor positions given by + sensor.mask (returned if 'u_min' is set) + sensor_data.ux_rms - rms of the time varying particle velocity in + the x-direction recorded at the sensor + positions given by sensor.mask (returned if + 'u_rms' is set) + sensor_data.ux_final - final particle velocity field in the + x-direction at all grid points within the + domain (returned if 'u_final' is set) + sensor_data.ux_max_all - maximum particle velocity in the x-direction + recorded at all grid points within the + domain (returned if 'u_max_all' is set) + sensor_data.ux_min_all - minimum particle velocity in the x-direction + recorded at all grid points within the + domain (returned if 'u_min_all' is set) + sensor_data.ux_non_staggered + - time varying particle velocity in the + x-direction recorded at the sensor positions + given by sensor.mask after shifting to the + non-staggered grid (returned if + 'u_non_staggered' is set) + sensor_data.Ix - time varying acoustic intensity in the + x-direction recorded at the sensor positions + given by sensor.mask (returned if 'I' is + set) + sensor_data.Ix_avg - average acoustic intensity in the + x-direction recorded at the sensor positions + given by sensor.mask (returned if 'I_avg' is + set) + + ABOUT: + author - Bradley Treeby and Ben Cox + date - 22nd April 2009 + last update - 25th July 2019 + + This function is part of the k-Wave Toolbox (http://www.k-wave.org) + Copyright (C) 2009-2019 Bradley Treeby and Ben Cox + + See also kspaceFirstOrderAS, kspaceFirstOrder2D, kspaceFirstOrder3D, + kWaveGrid, kspaceSecondOrder + + This file is part of k-Wave. k-Wave is free software: you can + redistribute it and/or modify it under the terms of the GNU Lesser + General Public License as published by the Free Software Foundation, + either version 3 of the License, or (at your option) any later version. + + k-Wave is distributed in the hope that it will be useful, but WITHOUT ANY + WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for + more details. + + You should have received a copy of the GNU Lesser General Public License + along with k-Wave. If not, see . + + """ + + # ========================================================================= + # CHECK INPUT STRUCTURES AND OPTIONAL INPUTS + # ========================================================================= + + # start the timer and store the start time + timer = TicToc() + timer.tic() + + # run script to check inputs and create the required arrays + k_sim = kWaveSimulation(kgrid=kgrid, source=source, sensor=sensor, medium=medium, + simulation_options=simulation_options) + + # this will create the sensor_data dotdict + k_sim.input_checking("kspaceFirstOrder1D") + + # aliases from simulation + sensor_data = k_sim.sensor_data + options = k_sim.options + record = k_sim.record + + # ========================================================================= + # CALCULATE MEDIUM PROPERTIES ON STAGGERED GRID + # ========================================================================= + + # interpolate the values of the density at the staggered grid locations + # where sgx = (x + dx/2) + rho0 = k_sim.rho0 + m_rho0: int = np.squeeze(rho0).ndim + + if (m_rho0 > 0 and options.use_sg): + + points = np.squeeze(k_sim.kgrid.x_vec) + + # rho0 is heterogeneous and staggered grids are used + rho0_sgx = np.interp(points + k_sim.kgrid.dx / 2.0, points, np.squeeze(k_sim.rho0)) + + # set values outside of the interpolation range to original values + rho0_sgx[np.isnan(rho0_sgx)] = np.squeeze(k_sim.rho0)[np.isnan(rho0_sgx)] + + else: + # rho0 is homogeneous or staggered grids are not used + rho0_sgx = k_sim.rho0 + + # invert rho0 so it doesn't have to be done each time step + rho0_sgx_inv = 1.0 / rho0_sgx + + rho0_sgx_inv = rho0_sgx_inv[:, np.newaxis] + + # clear unused variables + # del rho0_sgx + + # ========================================================================= + # PREPARE DERIVATIVE AND PML OPERATORS + # ========================================================================= + + # get the regular PML operators based on the reference sound speed and PML settings + Nx = k_sim.kgrid.Nx + dx = k_sim.kgrid.dx + dt = k_sim.kgrid.dt + Nt = k_sim.kgrid.Nt + + pml_x_alpha = options.pml_x_alpha + pml_x_size = options.pml_x_size + c_ref = k_sim.c_ref + + kx_vec = np.squeeze(k_sim.kgrid.k_vec[0]) + + c0 = medium.sound_speed + + # get the PML operators based on the reference sound speed and PML settings + pml_x = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, False, 0).T + pml_x_sgx = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, True, 0).T + + # define the k-space derivative operator + ddx_k = scipy.fft.ifftshift(1j * kx_vec) + ddx_k = ddx_k[:, np.newaxis] + + # define the staggered grid shift operators (the option options.use_sg exists for debugging) + if options.use_sg: + ddx_k_shift_pos = scipy.fft.ifftshift( np.exp( 1j * kx_vec * dx / 2.0)) + ddx_k_shift_neg = scipy.fft.ifftshift( np.exp(-1j * kx_vec * dx / 2.0)) + ddx_k_shift_pos = ddx_k_shift_pos[:, np.newaxis] + ddx_k_shift_neg = ddx_k_shift_neg[:, np.newaxis] + else: + ddx_k_shift_pos = 1.0 + ddx_k_shift_neg = 1.0 + + + # create k-space operator (the option options.use_kspace exists for debugging) + if options.use_kspace: + kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * kgrid.dt / 2.0)) + kappa = kappa[:, np.newaxis] + if (hasattr(options, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ + (hasattr(options, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): + source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) + source_kappa = source_kappa[:, np.newaxis] + else: + kappa = 1.0 + source_kappa = 1.0 + + + # ========================================================================= + # DATA CASTING + # ========================================================================= + + # preallocate the loop variables using the castZeros anonymous function + # (this creates a matrix of zeros in the data type specified by data_cast) + if not (options.data_cast == 'off'): + myType = np.single + else: + myType = np.double + + grid_shape = (Nx, 1) + + # preallocate the loop variables + p = np.zeros(grid_shape, dtype=myType) + rhox = np.zeros(grid_shape, dtype=myType) + ux_sgx = np.zeros(grid_shape, dtype=myType) + p_k = np.zeros(grid_shape, dtype=myType) + + c0 = c0.astype(myType) + + verbose: bool = False + + # ========================================================================= + # CREATE INDEX VARIABLES + # ========================================================================= + + # setup the time index variable + if (not options.time_rev): + index_start: int = 0 + index_step: int = 1 + index_end: int = Nt + else: + # throw error for unsupported feature + raise TypeError('Time reversal using sensor.time_reversal_boundary_data is not currently supported.') + + # reverse the order of the input data + sensor.time_reversal_boundary_data = np.fliplr(sensor.time_reversal_boundary_data) + index_start = 0 + index_step = 0 + + # stop one time point before the end so the last points are not + # propagated + index_end = kgrid.Nt - 1 + + # These should be zero indexed + if hasattr(k_sim, 's_source_sig_index') and k_sim.s_source_pos_index is not None: + k_sim.s_source_pos_index = np.squeeze(k_sim.s_source_pos_index) - int(1) + + if hasattr(k_sim, 'u_source_pos_index') and k_sim.u_source_pos_index is not None: + k_sim.u_source_pos_index = np.squeeze(k_sim.u_source_pos_index) - int(1) + + if hasattr(k_sim, 'p_source_pos_index') and k_sim.p_source_pos_index is not None: + k_sim.p_source_pos_index = np.squeeze(k_sim.p_source_pos_index) - int(1) + + if hasattr(k_sim, 's_source_sig_index') and k_sim.s_source_sig_index is not None: + k_sim.s_source_sig_index = np.squeeze(k_sim.s_source_sig_index) - int(1) + + if hasattr(k_sim, 'u_source_sig_index') and k_sim.u_source_sig_index is not None: + k_sim.u_source_sig_index = np.squeeze(k_sim.u_source_sig_index) - int(1) + + if hasattr(k_sim, 'p_source_sig_index') and k_sim.p_source_sig_index is not None: + k_sim.p_source_sig_index = np.squeeze(k_sim.p_source_sig_index) - int(1) + + # # ========================================================================= + # # PREPARE VISUALISATIONS + # # ========================================================================= + + # # pre-compute suitable axes scaling factor + # if options.plot_layout or options.plot_sim + # [x_sc, scale, prefix] = scaleSI(max(kgrid.x)); ##ok + # end + + # # run subscript to plot the simulation layout if 'PlotLayout' is set to true + # if options.plot_layout + # kspaceFirstOrder_plotLayout; + # end + + # # initialise the figure used for animation if 'PlotSim' is set to 'true' + # if options.plot_sim + # kspaceFirstOrder_initialiseFigureWindow; + # end + + # # initialise movie parameters if 'RecordMovie' is set to 'true' + # if options.record_movie + # kspaceFirstOrder_initialiseMovieParameters; + # end + + # ========================================================================= + # LOOP THROUGH TIME STEPS + # ========================================================================= + + # update command line status + t0 = timer.toc() + t0_scale = scale_time(t0) + print('\tprecomputation completed in', t0_scale) + print('\tstarting time loop...') + + # start time loop + for t_index in tqdm(np.arange(index_start, index_end, index_step, dtype=int)): + + # print("0.", np.shape(p)) + + # enforce time reversal bounday condition + # if options.time_rev: + # # load pressure value and enforce as a Dirichlet boundary condition + # p[k_sim.sensor_mask_index] = sensor.time_reversal_boundary_data[:, t_index] + # # update p_k + # p_k = scipy.fft.fft(p) + # # compute rhox using an adiabatic equation of state + # rhox_mod = p / c0**2 + # rhox[k_sim.sensor_mask_index] = rhox_mod[k_sim.sensor_mask_index] + + + # print("1.", np.shape(p)) + + # calculate ux at the next time step using dp/dx at the current time step + if not options.nonuniform_grid and not options.use_finite_difference: + + if verbose: + print("Here 1-----.", np.shape(pml_x), np.shape(pml_x_sgx), np.shape(ux_sgx), + '......', np.shape(ddx_k), np.shape(ddx_k_shift_pos), np.shape(kappa), + ',,,,,,', np.shape(p_k), np.shape(rho0_sgx_inv)) + + + + # calculate gradient using the k-space method on a regular grid + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - + dt * rho0_sgx_inv * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * p_k, axes=(0,)))) + + elif options.use_finite_difference: + print("\t\tEXIT! options.use_finite_difference") + match options.use_finite_difference: + case 2: + + # calculate gradient using second-order accurate finite + # difference scheme (including half step forward) + dpdx = (np.append(p[1:], 0.0) - p) / kgrid.dx + # dpdx = ([p(2:end); 0] - p) / kgrid.dx; + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) + + case 4: + + # calculate gradient using fourth-order accurate finite + # difference scheme (including half step forward) + # dpdx = ([0; p(1:(end-1))] - 27*p + 27*[p(2:end); 0] - [p(3:end); 0; 0])/(24*kgrid.dx); + dpdx = (np.insert(p[:-1], 0, 0) - 27.0 * p + 27 * np.append(p[1:], 0.0) - np.append(p[2:], [0, 0])) / (24.0 * kgrid.dx) + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) + + else: + print("\t\tEXIT! else", ) + + # calculate gradient using the k-space method on a non-uniform grid + # via the mapped pseudospectral method + ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - + dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * np.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) + + + # print("2.", np.shape(p)) + + # # add in the velocity source term + # if (k_sim.source_ux is not False and t_index < np.shape(source.ux)[1]): + # #if options.source_ux >= t_index: + # match source.u_mode: + # case 'dirichlet': + # # enforce the source values as a dirichlet boundary condition + # ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + # case 'additive': + # # extract the source values into a matrix + # source_mat = np.zeros([kgrid.Nx, 1]) + # source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + # # apply the k-space correction + # source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) + # # add the source values to the existing field values including the k-space correction + # ux_sgx = ux_sgx + source_mat + # case 'additive-no-correction': + # # add the source values to the existing field values + # ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + source.ux[k_sim.u_source_sig_index, t_index] + + + # print("3.", np.shape(p)) + + # calculate du/dx at the next time step + if not options.nonuniform_grid and not options.use_finite_difference: + + if verbose: + print("Here 1.", np.shape(p), np.shape(ux_sgx), np.shape(ddx_k), np.shape(ddx_k_shift_neg), np.shape(kappa)) + + # calculate gradient using the k-space method on a regular grid + duxdx = np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,) ) ) + + if verbose: + print("Here 1(end). duxdx:", np.shape(duxdx)) + + elif options.use_finite_difference: + print("\t\tEXIT! options.use_finite_difference") + match options.use_finite_difference: + case 2: + + # calculate gradient using second-order accurate finite difference scheme (including half step backward) + # duxdx = (ux_sgx - [0; ux_sgx(1:end - 1)]) / kgrid.dx; + duxdx = (ux_sgx - np.append(ux_sgx[:-1], 0)) / kgrid.dx + + case 4: + + # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) + duxdx = (np.append([0, 0], ux_sgx[:-2]) - 27.0 * np.append(0, ux_sgx[:-1]) + 27.0 * ux_sgx - np.append(ux_sgx[1:], 0)) / (24.0 * kgrid.dx) + # duxdx = ([0; 0; ux_sgx(1:(end - 2))] - 27 * [0; ux_sgx(1:(end - 1))] + 27 * ux_sgx - [ux_sgx(2:end); 0]) / (24 * kgrid.dx); + + else: + # calculate gradients using a non-uniform grid via the mapped + # pseudospectral method + duxdx = kgrid.dxudxn * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) + + + # print("4.", np.shape(p)) + + # calculate rhox at the next time step + if not k_sim.is_nonlinear: + # use linearised mass conservation equation + + if verbose: + print("pre:", pml_x.shape, rhox.shape, duxdx.shape, dt, rho0.shape) + + rhox = pml_x * (pml_x * rhox - dt * rho0 * duxdx) + + if verbose: + print("post:", pml_x.shape, rhox.shape, duxdx.shape, dt, rho0.shape) + + else: + # use nonlinear mass conservation equation (explicit calculation) + rhox = pml_x * (pml_x * rhox - dt * (2.0 * rhox + rho0) * duxdx) + + # print("5.", np.shape(p)) + + # add in the pre-scaled pressure source term as a mass source + # if options.source_p >= t_index: + # if (k_sim.source_p is not False and t_index < np.shape(source.p)[1]): + # print("??????????") + # match source.p_mode: + # case 'dirichlet': + # # enforce source values as a dirichlet boundary condition + # rhox[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] + # case 'additive': + # # extract the source values into a matrix + # source_mat = np.zeros((kgrid.Nx, 1), dtype=myType) + # source_mat[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] + # # apply the k-space correction + # source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) + # # add the source values to the existing field values + # # including the k-space correction + # rhox = rhox + source_mat + # case 'additive-no-correction': + # # add the source values to the existing field values + # rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + source.p[k_sim.p_source_sig_index, t_index] + + + # print("6.", np.shape(p)) + + # equation of state + if not k_sim.is_nonlinear: + # print("is linear", k_sim.equation_of_state, type(k_sim.equation_of_state)) + match k_sim.equation_of_state: + case 'lossless': + + # print("Here 2. lossless / linear", np.shape(p)) + + # calculate p using a linear adiabatic equation of state + p = np.squeeze(c0**2) * np.squeeze(rhox) + + # print("3.", np.shape(p), np.squeeze(c0**2).shape, np.squeeze(rhox).shape) + + case 'absorbing': + + # print("Here 2. absorbing / linear", np.shape(p)) + + # calculate p using a linear absorbing equation of state + p = np.squeeze(c0**2 * (rhox + + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) + - medium.absorb_eta * np.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) ) ) + + + case 'stokes': + + # print("Here 2. stokes / linear") + + # calculate p using a linear absorbing equation of state + # assuming alpha_power = 2 + p = c0**2 * (rhox + medium.absorb_tau * rho0 * duxdx) + + + else: + match k_sim.equation_of_state: + case 'lossless': + + print("Here 2. lossless / nonlinear") + + # calculate p using a nonlinear adiabatic equation of state + p = c0**2 * (rhox + medium.BonA * rhox**2 / (2.0 * rho0)) + + case 'absorbing': + + print("Here 2. absorbing / nonlinear") + + # calculate p using a nonlinear absorbing equation of state + p = c0**2 * ( rhox + + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) + - medium.absorb_eta * np.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) + + medium.BonA * rhox**2 / (2.0 * rho0) ) + + case 'stokes': + + print("Here 2. stokes / nonlinear") + + # calculate p using a nonlinear absorbing equation of state + # assuming alpha_power = 2 + p = c0**2 * (rhox + + medium.absorb_tau * rho0 * duxdx + + medium.BonA * rhox**2 / (2.0 * rho0) ) + + + # print("7.", np.shape(p), k_sim.source.p0.shape) + + # enforce initial conditions if source.p0 is defined instead of time varying sources + if t_index == 0 and k_sim.source_p0: + + # print(np.shape(rhox)) + + if k_sim.source.p0.ndim == 1: + p0 = k_sim.source.p0[:, np.newaxis] + else: + p0 = k_sim.source.p0 + + # add the initial pressure to rho as a mass source + p = p0 + rhox = p0 / c0**2 + + # compute u(t = t1 - dt/2) based on u(dt/2) = -u(-dt/2) which forces u(t = t1) = 0 + if not options.use_finite_difference: + + # calculate gradient using the k-space method on a regular grid + ux_sgx = dt * rho0_sgx_inv * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * scipy.fft.fftn(p, axes=(0,)), axes=(0,) )) / 2.0 + + p_k = scipy.fft.fftn(p, axes=(0,)) + + + else: + match options.use_finite_difference: + case 2: + + # calculate gradient using second-order accurate finite difference scheme (including half step forward) + # dpdx = ([p(2:end); 0] - p) / kgrid.dx; + + dpdx = (np.append(p[1:], 0.0) - p) / kgrid.dx + ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 + + case 4: + + # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) + # dpdx = ([p(3:end); 0; 0] - 27 * [p(2:end); 0] + 27 * p - [0; p(1:(end-1))]) / (24 * kgrid.dx) + dpdx = (np.append(p[2:], [0, 0]) - 27.0 * np.append(p[1:], 0) + 27.0 * p - np.append(0, p[:-1])) / (24.0 * kgrid.dx) + ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 + + + else: + # precompute fft of p here so p can be modified for visualisation + p_k = scipy.fft.fftn(p, axes=(0,)) + p_k = p_k[:, np.newaxis] + + # extract required sensor data from the pressure and particle velocity + # fields if the number of time steps elapsed is greater than + # sensor.record_start_index (defaults to 1) + if options.use_sensor and not options.time_rev and (t_index >= sensor.record_start_index): + + # update index for data storage + file_index: int = t_index - sensor.record_start_index + + # run sub-function to extract the required data + extract_options = dotdict({'record_u_non_staggered': k_sim.record.u_non_staggered, + 'record_u_split_field': k_sim.record.u_split_field, + 'record_I': k_sim.record.I, + 'record_I_avg': k_sim.record.I_avg, + 'binary_sensor_mask': k_sim.binary_sensor_mask, + 'record_p': k_sim.record.p, + 'record_p_max': k_sim.record.p_max, + 'record_p_min': k_sim.record.p_min, + 'record_p_rms': k_sim.record.p_rms, + 'record_p_max_all': k_sim.record.p_max_all, + 'record_p_min_all': k_sim.record.p_min_all, + 'record_u': k_sim.record.u, + 'record_u_max': k_sim.record.u_max, + 'record_u_min': k_sim.record.u_min, + 'record_u_rms': k_sim.record.u_rms, + 'record_u_max_all': k_sim.record.u_max_all, + 'record_u_min_all': k_sim.record.u_min_all, + 'compute_directivity': False}) + + # run sub-function to extract the required data from the acoustic variables + sensor_data = extract_sensor_data(kgrid.dim, sensor_data, file_index, k_sim.sensor_mask_index, extract_options, record, p, ux_sgx) + + + + return sensor_data + + + + From 2cbbfcfd04cd73c69cb33a207cf141a046571371 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 16:30:13 +0100 Subject: [PATCH 02/29] Add 1D simulation example folder This script simulates and visualizes pressure fields in a one-dimensional heterogeneous medium using k-Wave. --- .../ivp_1D_simulation/ivp_1D_simulation.py | 96 +++++++++++++++++++ 1 file changed, 96 insertions(+) create mode 100644 examples/ivp_1D_simulation/ivp_1D_simulation.py diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.py b/examples/ivp_1D_simulation/ivp_1D_simulation.py new file mode 100644 index 00000000..6bfb9e16 --- /dev/null +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.py @@ -0,0 +1,96 @@ +""" +Simulations In One Dimension Example + +This example provides a simple demonstration of using k-Wave for the +simulation and detection of the pressure field generated by an initial +pressure distribution within a one-dimensional heterogeneous propagation +medium. It builds on the Homogeneous Propagation Medium and Heterogeneous +Propagation Medium examples. +""" + +import numpy as np +import matplotlib.pyplot as plt + +from kwave.data import Vector + +from kwave.kgrid import kWaveGrid +from kwave.kmedium import kWaveMedium +from kwave.ksensor import kSensor +from kwave.ksource import kSource + +from kwave.kspaceFirstOrder1D import kspace_first_order_1D + +from kwave.options.simulation_options import SimulationOptions + + +# ========================================================================= +# SIMULATION +# ========================================================================= + +# create the computational grid +Nx: int = 512 # number of grid points in the x (row) direction +dx: float = 0.05e-3 # grid point spacing in the x direction [m] + +grid_size_points = Vector([Nx, ]) +grid_spacing_meters = Vector([dx, ]) + +# create the k-space grid +kgrid = kWaveGrid(grid_size_points, grid_spacing_meters) + +# define the properties of the propagation medium +sound_speed = 1500.0 * np.ones((Nx, 1)) # [m/s] +sound_speed[:np.round(Nx / 3).astype(int) - 1] = 2000.0 # [m/s] +density = 1000.0 * np.ones((Nx, 1)) # [kg/m^3] +density[np.round(4 * Nx / 5).astype(int) - 1:] = 1500.0 # [kg/m^3] +medium = kWaveMedium(sound_speed=sound_speed, density=density) + +# Create the source object +source = kSource() + +# create initial pressure distribution using a smoothly shaped sinusoid +x_pos: int = 280 # [grid points] +width: int = 100 # [grid points] +height: int = 1 # [au] +p0 = np.linspace(0.0, 2.0 * np.pi, width + 1) + +part1 = np.zeros(x_pos).astype(float) +part2 = (height / 2.0) * np.sin(p0 - np.pi / 2.0) + (height / 2.0) +part3 = np.zeros(Nx - x_pos - width - 1).astype(float) +source.p0 = np.concatenate([part1, part2, part3]) + +# create a Cartesian sensor mask recording the pressure +sensor = kSensor() +sensor.record = ["p"] + +# this hack is needed to ensure that the sensor is in [1,2] dimensions +mask = np.array([-10e-3, 10e-3]) # [mm] +mask = mask[:, np.newaxis].T +sensor.mask = mask + +# set the simulation time to capture the reflections +c_max = np.max(medium.sound_speed.flatten()) # [m/s] +t_end = 2.5 * kgrid.x_size / c_max # [s] + +# define the time array +kgrid.makeTime(c_max, t_end=t_end) + +# define the simulation options +simulation_options = SimulationOptions(data_cast="off", save_to_disk=False) + +# run the simulation +sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options) + +# ========================================================================= +# VISUALISATION +# ========================================================================= + +# plot the recorded time signals +_, ax1 = plt.subplots() +ax1.plot(sensor_data['p'][0, :], 'b-') +ax1.plot(sensor_data['p'][1, :], 'r-') +ax1.grid(True) +ax1.set_ylim(-0.1, 0.7) +ax1.set_ylabel('Pressure') +ax1.set_xlabel('Time Step') +ax1.legend(['Sensor Position 1', 'Sensor Position 2']) +plt.show() From 00fa6c60e2395a00795a57ea83c6ac65580f8f94 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 16:37:50 +0100 Subject: [PATCH 03/29] Add files via upload --- .../1d-demo-simulation.ipynb | 422 ++++++++++++++++++ 1 file changed, 422 insertions(+) create mode 100644 examples/ivp_1D_simulation/1d-demo-simulation.ipynb diff --git a/examples/ivp_1D_simulation/1d-demo-simulation.ipynb b/examples/ivp_1D_simulation/1d-demo-simulation.ipynb new file mode 100644 index 00000000..8cacccc6 --- /dev/null +++ b/examples/ivp_1D_simulation/1d-demo-simulation.ipynb @@ -0,0 +1,422 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "b9KS_6atASoi" + }, + "outputs": [], + "source": [ + "!pip install git+https://github.com/djps/k-wave-python@1d-demo" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VCcwZsQ2ASoj" + }, + "outputs": [], + "source": [ + "from copy import deepcopy\n", + "\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "from kwave.data import Vector\n", + "from kwave.kgrid import kWaveGrid\n", + "from kwave.kmedium import kWaveMedium\n", + "from kwave.ksensor import kSensor\n", + "from kwave.ksource import kSource\n", + "from kwave.kspaceFirstOrderAS import kspaceFirstOrderASC\n", + "from kwave.options.simulation_execution_options import SimulationExecutionOptions\n", + "from kwave.options.simulation_options import SimulationOptions, SimulationType\n", + "from kwave.utils.filters import extract_amp_phase\n", + "from kwave.utils.kwave_array import kWaveArray\n", + "from kwave.utils.mapgen import focused_bowl_oneil\n", + "from kwave.utils.math import round_even\n", + "from kwave.utils.signals import create_cw_signals" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xUqsDgayASoj" + }, + "source": [ + "Set parameters" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FwbGsg_gASok" + }, + "outputs": [], + "source": [ + "# medium parameters\n", + "c0: float = 1500.0 # sound speed [m/s]\n", + "rho0: float = 1000.0 # density [kg/m^3]\n", + "\n", + "# source parameters\n", + "source_f0 = 1.0e6 # source frequency [Hz]\n", + "source_roc = 30e-3 # bowl radius of curvature [m]\n", + "source_diameter = 30e-3 # bowl aperture diameter [m]\n", + "source_amp = np.array([1.0e6]) # source pressure [Pa]\n", + "source_phase = np.array([0.0]) # source phase [radians]\n", + "\n", + "# grid parameters\n", + "axial_size: float = 50.0e-3 # total grid size in the axial dimension [m]\n", + "lateral_size: float = 45.0e-3 # total grid size in the lateral dimension [m]\n", + "\n", + "# computational parameters\n", + "ppw: int = 3 # number of points per wavelength\n", + "t_end: float = 40e-6 # total compute time [s] (this must be long enough to reach steady state)\n", + "record_periods: int = 1 # number of periods to record\n", + "cfl: float = 0.05 # CFL number\n", + "source_x_offset: int = 20 # grid points to offset the source\n", + "bli_tolerance: float = 0.01 # tolerance for truncation of the off-grid source points\n", + "upsampling_rate: int = 10 # density of integration points relative to grid" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "misFfiLaASok" + }, + "source": [ + "Set grid" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "maEowzD6ASok" + }, + "outputs": [], + "source": [ + "# calculate the grid spacing based on the PPW and F0\n", + "dx: float = c0 / (ppw * source_f0) # [m]\n", + "\n", + "# compute the size of the grid\n", + "Nx: int= round_even(axial_size / dx) + source_x_offset\n", + "Ny: int = round_even(lateral_size / dx)\n", + "\n", + "grid_size_points = Vector([Nx, Ny])\n", + "grid_spacing_meters = Vector([dx, dx])\n", + "\n", + "# create the k-space grid\n", + "kgrid = kWaveGrid(grid_size_points, grid_spacing_meters)\n", + "\n", + "# compute points per temporal period\n", + "ppp: int = round(ppw / cfl)\n", + "\n", + "# compute corresponding time spacing\n", + "dt: float = 1.0 / (ppp * source_f0)\n", + "\n", + "# create the time array using an integer number of points per period\n", + "Nt: int = round(t_end / dt)\n", + "kgrid.setTime(Nt, dt)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a4WRzAZ_ASol" + }, + "source": [ + "Define source" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "s4ZO890kASol" + }, + "outputs": [], + "source": [ + "source = kSource()\n", + "\n", + "# create time varying source\n", + "source_sig = create_cw_signals(np.squeeze(kgrid.t_array), source_f0, source_amp, source_phase)\n", + "\n", + "# set arc position and orientation\n", + "arc_pos = [kgrid.x_vec[0].item() + source_x_offset * kgrid.dx, 0]\n", + "focus_pos = [kgrid.x_vec[-1].item(), 0]\n", + "\n", + "# create empty kWaveArray\n", + "karray = kWaveArray(axisymmetric=True,\n", + " bli_tolerance=bli_tolerance,\n", + " upsampling_rate=upsampling_rate,\n", + " single_precision=True)\n", + "\n", + "# add bowl shaped element\n", + "karray.add_arc_element(arc_pos, source_roc, source_diameter, focus_pos)\n", + "\n", + "# assign binary mask\n", + "source.p_mask = karray.get_array_binary_mask(kgrid)\n", + "\n", + "# assign source signals\n", + "source.p = karray.get_distributed_source_signal(kgrid, source_sig)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BsRaUQKaASol" + }, + "source": [ + "Set up medium" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "keTsJqIcASol" + }, + "outputs": [], + "source": [ + "medium = kWaveMedium(sound_speed=c0, density=rho0)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "d1L-HjQfASol" + }, + "source": [ + "Set up sensor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "eUdyQIObASol" + }, + "outputs": [], + "source": [ + "sensor = kSensor()\n", + "\n", + "# set sensor mask to record central plane, not including the source point\n", + "sensor.mask = np.zeros((Nx, Ny), dtype=bool)\n", + "sensor.mask[(source_x_offset + 1):, :] = True\n", + "\n", + "# record the pressure\n", + "sensor.record = ['p']\n", + "\n", + "# record only the final few periods when the field is in steady state\n", + "sensor.record_start_index = kgrid.Nt - (record_periods * ppp) + 1" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "uOdMyqagASom" + }, + "source": [ + "Define simulation options" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "nJ5iCYwYASom" + }, + "outputs": [], + "source": [ + "simulation_options = SimulationOptions(\n", + " simulation_type=SimulationType.AXISYMMETRIC,\n", + " data_cast='single',\n", + " data_recast=False,\n", + " save_to_disk=True,\n", + " save_to_disk_exit=False,\n", + " pml_inside=False)\n", + "\n", + "execution_options = SimulationExecutionOptions(\n", + " is_gpu_simulation=False,\n", + " delete_data=False,\n", + " verbose_level=2)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2Pg9FCSoASom" + }, + "source": [ + "Run simulation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uCJcEDABASom" + }, + "outputs": [], + "source": [ + "sensor_data = kspaceFirstOrderASC(medium=deepcopy(medium),\n", + " kgrid=deepcopy(kgrid),\n", + " source=deepcopy(source),\n", + " sensor=deepcopy(sensor),\n", + " simulation_options=simulation_options,\n", + " execution_options=execution_options)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "in-JqhZaASom" + }, + "source": [ + "Post-processing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "acNlmnj1ASom" + }, + "outputs": [], + "source": [ + "# extract amplitude from the sensor data\n", + "amp, _, _ = extract_amp_phase(sensor_data['p'].T, 1.0 / kgrid.dt, source_f0,\n", + " dim=1, fft_padding=1, window='Rectangular')\n", + "\n", + "# reshape data\n", + "amp = np.reshape(amp, (Nx - (source_x_offset+1), Ny), order='F')\n", + "\n", + "# extract pressure on axis\n", + "amp_on_axis = amp[:, 0]\n", + "\n", + "# define axis vectors for plotting\n", + "x_vec = np.squeeze(kgrid.x_vec[(source_x_offset + 1):, :] - kgrid.x_vec[source_x_offset])\n", + "y_vec = kgrid.y_vec\n", + "\n", + "# =========================================================================\n", + "# ANALYTICAL SOLUTION\n", + "# =========================================================================\n", + "\n", + "# calculate the wavenumber\n", + "knumber = 2.0 * np.pi * source_f0 / c0\n", + "\n", + "# define radius and axis\n", + "x_max = (Nx-1) * dx\n", + "delta_x = x_max / 10000.0\n", + "x_ref = np.arange(0.0, x_max + delta_x, delta_x)\n", + "\n", + "# calculate analytical solution\n", + "p_ref_axial, _, _ = focused_bowl_oneil(source_roc,\n", + " source_diameter,\n", + " source_amp[0] / (c0 * rho0),\n", + " source_f0,\n", + " c0,\n", + " rho0,\n", + " axial_positions=x_ref)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZchMY9PnASom" + }, + "source": [ + "Plot solution" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "P1bgt__WASom" + }, + "outputs": [], + "source": [ + "# plot the pressure along the focal axis of the piston\n", + "fig1, ax1 = plt.subplots(1, 1)\n", + "ax1.plot(1e3 * x_ref, 1e-6 * p_ref_axial, 'k-', label='Exact')\n", + "ax1.plot(1e3 * x_vec, 1e-6 * amp_on_axis, 'b.', label='k-Wave')\n", + "ax1.legend()\n", + "ax1.set(xlabel='Axial Position [mm]',\n", + " ylabel='Pressure [MPa]',\n", + " title='Axial Pressure')\n", + "ax1.set_xlim(0.0, 1e3 * axial_size)\n", + "ax1.set_ylim(0.0, 20)\n", + "ax1.grid()\n", + "\n", + "# get grid weights\n", + "grid_weights = karray.get_array_grid_weights(kgrid)\n", + "\n", + "# define axis vectors for plotting\n", + "yvec = np.squeeze(kgrid.y_vec) - kgrid.y_vec[0].item()\n", + "y_vec = np.hstack((-np.flip(yvec)[:-1], yvec))\n", + "x_vec = np.squeeze(kgrid.x_vec[(source_x_offset + 1):, :] - kgrid.x_vec[source_x_offset])\n", + "\n", + "data = np.hstack((np.fliplr(amp[:, :-1]), amp)) / 1e6\n", + "sp = np.hstack((np.fliplr(source.p_mask[:, :-1]), source.p_mask))\n", + "gw = np.hstack((np.fliplr(grid_weights[:, :-1]), grid_weights))\n", + "\n", + "fig3, (ax3a, ax3b) = plt.subplots(1, 2)\n", + "ax3a.pcolormesh(1e3 * np.squeeze(y_vec),\n", + " 1e3 * np.squeeze(kgrid.x_vec[:, :] - kgrid.x_vec[source_x_offset]),\n", + " sp,\n", + " shading='gouraud')\n", + "ax3a.set(xlabel='y [mm]',\n", + " ylabel='x [mm]',\n", + " title='Source Mask')\n", + "ax3b.pcolormesh(1e3 * np.squeeze(y_vec),\n", + " 1e3 * np.squeeze(kgrid.x_vec[:, :] - kgrid.x_vec[source_x_offset]),\n", + " gw,\n", + " shading='gouraud')\n", + "ax3b.set(xlabel='y [mm]',\n", + " ylabel='x [mm]',\n", + " title='Off-Grid Source Weights')\n", + "fig3.tight_layout(pad=1.2)\n", + "\n", + "# plot the pressure field\n", + "fig4, ax4 = plt.subplots(1, 1)\n", + "ax4.pcolormesh(1e3 * np.squeeze(y_vec),\n", + " 1e3 * np.squeeze(x_vec),\n", + " data,\n", + " shading='gouraud')\n", + "ax4.set(xlabel='Lateral Position [mm]',\n", + " ylabel='Axial Position [mm]',\n", + " title='Pressure Field')\n", + "ax4.invert_yaxis()\n", + "\n", + "# show figures\n", + "plt.show()" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file From e7265c5eb331031a61e64155dbc579fa706faba1 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 16:38:35 +0100 Subject: [PATCH 04/29] Rename 1d-demo-simulation to ivp_1D_simulation --- .../{1d-demo-simulation.ipynb => ivp_1D_simulation.ipynb} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename examples/ivp_1D_simulation/{1d-demo-simulation.ipynb => ivp_1D_simulation.ipynb} (99%) diff --git a/examples/ivp_1D_simulation/1d-demo-simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb similarity index 99% rename from examples/ivp_1D_simulation/1d-demo-simulation.ipynb rename to examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index 8cacccc6..9a896d7f 100644 --- a/examples/ivp_1D_simulation/1d-demo-simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -419,4 +419,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From 23d0d528168433770ac0017a63076cdfa68825c2 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 16:49:54 +0100 Subject: [PATCH 05/29] Created using Colab --- .../ivp_1D_simulation/ivp_1D_simulation.ipynb | 513 +++++++----------- 1 file changed, 207 insertions(+), 306 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index 9a896d7f..71bdb4c7 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -2,41 +2,172 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { - "id": "b9KS_6atASoi" + "id": "b9KS_6atASoi", + "collapsed": true, + "outputId": "0e709cc8-7858-4540-91f6-21e241f2cddf", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + } }, - "outputs": [], + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Collecting git+https://github.com/djps/k-wave-python@1d-demo\n", + " Cloning https://github.com/djps/k-wave-python (to revision 1d-demo) to /tmp/pip-req-build-n_4rh2qu\n", + " Running command git clone --filter=blob:none --quiet https://github.com/djps/k-wave-python /tmp/pip-req-build-n_4rh2qu\n", + " Running command git checkout -b 1d-demo --track origin/1d-demo\n", + " Switched to a new branch '1d-demo'\n", + " Branch '1d-demo' set up to track remote branch '1d-demo' from 'origin'.\n", + " Resolved https://github.com/djps/k-wave-python to commit e7265c5eb331031a61e64155dbc579fa706faba1\n", + " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", + " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", + " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + "Collecting beartype==0.22.4 (from k-Wave-python==0.4.1)\n", + " Downloading beartype-0.22.4-py3-none-any.whl.metadata (36 kB)\n", + "Collecting deepdiff==8.6.1 (from k-Wave-python==0.4.1)\n", + " Downloading deepdiff-8.6.1-py3-none-any.whl.metadata (8.6 kB)\n", + "Collecting deprecated>=1.2.14 (from k-Wave-python==0.4.1)\n", + " Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n", + "Requirement already satisfied: h5py==3.15.1 in /usr/local/lib/python3.12/dist-packages (from k-Wave-python==0.4.1) (3.15.1)\n", + "Collecting jaxtyping==0.3.2 (from k-Wave-python==0.4.1)\n", + " Downloading jaxtyping-0.3.2-py3-none-any.whl.metadata (7.0 kB)\n", + "Collecting matplotlib==3.10.3 (from k-Wave-python==0.4.1)\n", + " Downloading matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n", + "Requirement already satisfied: numpy<2.3.0,>=1.22.2 in /usr/local/lib/python3.12/dist-packages (from k-Wave-python==0.4.1) (2.0.2)\n", + "Collecting opencv-python==4.11.0.86 (from k-Wave-python==0.4.1)\n", + " Downloading opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (20 kB)\n", + "Collecting scipy==1.15.3 (from k-Wave-python==0.4.1)\n", + " Downloading scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (61 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.0/62.0 kB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hCollecting orderly-set<6,>=5.4.1 (from deepdiff==8.6.1->k-Wave-python==0.4.1)\n", + " Downloading orderly_set-5.5.0-py3-none-any.whl.metadata (6.6 kB)\n", + "Collecting wadler-lindig>=0.1.3 (from jaxtyping==0.3.2->k-Wave-python==0.4.1)\n", + " Downloading wadler_lindig-0.1.7-py3-none-any.whl.metadata (17 kB)\n", + "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (1.3.3)\n", + "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (0.12.1)\n", + "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (4.60.1)\n", + "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (1.4.9)\n", + "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (25.0)\n", + "Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (11.3.0)\n", + "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (3.2.5)\n", + "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (2.9.0.post0)\n", + "Requirement already satisfied: wrapt<3,>=1.10 in /usr/local/lib/python3.12/dist-packages (from deprecated>=1.2.14->k-Wave-python==0.4.1) (2.0.1)\n", + "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.7->matplotlib==3.10.3->k-Wave-python==0.4.1) (1.17.0)\n", + "Downloading beartype-0.22.4-py3-none-any.whl (1.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m23.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading deepdiff-8.6.1-py3-none-any.whl (91 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m91.4/91.4 kB\u001b[0m \u001b[31m6.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading jaxtyping-0.3.2-py3-none-any.whl (55 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m55.4/55.4 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (8.6 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.6/8.6 MB\u001b[0m \u001b[31m99.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (63.0 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m63.0/63.0 MB\u001b[0m \u001b[31m13.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (37.3 MB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m37.3/37.3 MB\u001b[0m \u001b[31m23.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading deprecated-1.3.1-py2.py3-none-any.whl (11 kB)\n", + "Downloading orderly_set-5.5.0-py3-none-any.whl (13 kB)\n", + "Downloading wadler_lindig-0.1.7-py3-none-any.whl (20 kB)\n", + "Building wheels for collected packages: k-Wave-python\n", + " Building wheel for k-Wave-python (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", + " Created wheel for k-Wave-python: filename=k_wave_python-0.4.1-py3-none-any.whl size=205572 sha256=043823da9c43851a2c587e0472799f9d7560384b19f78970b56cbdd074380e90\n", + " Stored in directory: /tmp/pip-ephem-wheel-cache-agme1_65/wheels/34/51/59/3b5b5fca32858fe6767c81f208a87c7ae2f45bf492d1b10e45\n", + "Successfully built k-Wave-python\n", + "Installing collected packages: wadler-lindig, scipy, orderly-set, opencv-python, deprecated, beartype, matplotlib, jaxtyping, deepdiff, k-Wave-python\n", + " Attempting uninstall: scipy\n", + " Found existing installation: scipy 1.16.3\n", + " Uninstalling scipy-1.16.3:\n", + " Successfully uninstalled scipy-1.16.3\n", + " Attempting uninstall: opencv-python\n", + " Found existing installation: opencv-python 4.12.0.88\n", + " Uninstalling opencv-python-4.12.0.88:\n", + " Successfully uninstalled opencv-python-4.12.0.88\n", + " Attempting uninstall: beartype\n", + " Found existing installation: beartype 0.22.6\n", + " Uninstalling beartype-0.22.6:\n", + " Successfully uninstalled beartype-0.22.6\n", + " Attempting uninstall: matplotlib\n", + " Found existing installation: matplotlib 3.10.0\n", + " Uninstalling matplotlib-3.10.0:\n", + " Successfully uninstalled matplotlib-3.10.0\n", + "Successfully installed beartype-0.22.4 deepdiff-8.6.1 deprecated-1.3.1 jaxtyping-0.3.2 k-Wave-python-0.4.1 matplotlib-3.10.3 opencv-python-4.11.0.86 orderly-set-5.5.0 scipy-1.15.3 wadler-lindig-0.1.7\n" + ] + }, + { + "output_type": "display_data", + "data": { + "application/vnd.colab-display-data+json": { + "pip_warning": { + "packages": [ + "matplotlib", + "mpl_toolkits" + ] + }, + "id": "6af6b7c3a3304b1c9f22d9ff43cc9587" + } + }, + "metadata": {} + } + ], "source": [ "!pip install git+https://github.com/djps/k-wave-python@1d-demo" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": { - "id": "VCcwZsQ2ASoj" + "id": "VCcwZsQ2ASoj", + "outputId": "fe64876b-ec1b-43cd-f2ab-b781f6c0824d", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 505 + } }, - "outputs": [], + "outputs": [ + { + "output_type": "error", + "ename": "ImportError", + "evalue": "cannot import name 'extract_sensor_data' from 'kwave.kWaveSimulation_helper' (/usr/local/lib/python3.12/dist-packages/kwave/kWaveSimulation_helper/__init__.py)", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipython-input-3394989936.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mksource\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkSource\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkspaceFirstOrder1D\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkspace_first_order_1D\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msimulation_options\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSimulationOptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/kwave/kspaceFirstOrder1D.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msimulation_options\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSimulationOptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 22\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkWaveSimulation_helper\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mextract_sensor_data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 23\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m def kspace_first_order_1D(kgrid: kWaveGrid,\n", + "\u001b[0;31mImportError\u001b[0m: cannot import name 'extract_sensor_data' from 'kwave.kWaveSimulation_helper' (/usr/local/lib/python3.12/dist-packages/kwave/kWaveSimulation_helper/__init__.py)", + "", + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" + ], + "errorDetails": { + "actions": [ + { + "action": "open_url", + "actionText": "Open Examples", + "url": "/notebooks/snippets/importing_libraries.ipynb" + } + ] + } + } + ], "source": [ - "from copy import deepcopy\n", - "\n", - "import matplotlib.pyplot as plt\n", "import numpy as np\n", + "import matplotlib.pyplot as plt\n", "\n", "from kwave.data import Vector\n", + "\n", "from kwave.kgrid import kWaveGrid\n", "from kwave.kmedium import kWaveMedium\n", "from kwave.ksensor import kSensor\n", "from kwave.ksource import kSource\n", - "from kwave.kspaceFirstOrderAS import kspaceFirstOrderASC\n", - "from kwave.options.simulation_execution_options import SimulationExecutionOptions\n", - "from kwave.options.simulation_options import SimulationOptions, SimulationType\n", - "from kwave.utils.filters import extract_amp_phase\n", - "from kwave.utils.kwave_array import kWaveArray\n", - "from kwave.utils.mapgen import focused_bowl_oneil\n", - "from kwave.utils.math import round_even\n", - "from kwave.utils.signals import create_cw_signals" + "\n", + "from kwave.kspaceFirstOrder1D import kspace_first_order_1D\n", + "\n", + "from kwave.options.simulation_options import SimulationOptions" ] }, { @@ -56,343 +187,113 @@ }, "outputs": [], "source": [ - "# medium parameters\n", - "c0: float = 1500.0 # sound speed [m/s]\n", - "rho0: float = 1000.0 # density [kg/m^3]\n", - "\n", - "# source parameters\n", - "source_f0 = 1.0e6 # source frequency [Hz]\n", - "source_roc = 30e-3 # bowl radius of curvature [m]\n", - "source_diameter = 30e-3 # bowl aperture diameter [m]\n", - "source_amp = np.array([1.0e6]) # source pressure [Pa]\n", - "source_phase = np.array([0.0]) # source phase [radians]\n", - "\n", - "# grid parameters\n", - "axial_size: float = 50.0e-3 # total grid size in the axial dimension [m]\n", - "lateral_size: float = 45.0e-3 # total grid size in the lateral dimension [m]\n", - "\n", - "# computational parameters\n", - "ppw: int = 3 # number of points per wavelength\n", - "t_end: float = 40e-6 # total compute time [s] (this must be long enough to reach steady state)\n", - "record_periods: int = 1 # number of periods to record\n", - "cfl: float = 0.05 # CFL number\n", - "source_x_offset: int = 20 # grid points to offset the source\n", - "bli_tolerance: float = 0.01 # tolerance for truncation of the off-grid source points\n", - "upsampling_rate: int = 10 # density of integration points relative to grid" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "misFfiLaASok" - }, - "source": [ - "Set grid" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "maEowzD6ASok" - }, - "outputs": [], - "source": [ - "# calculate the grid spacing based on the PPW and F0\n", - "dx: float = c0 / (ppw * source_f0) # [m]\n", + "# create the computational grid\n", + "Nx: int = 512 # number of grid points in the x (row) direction\n", + "dx: float = 0.05e-3 # grid point spacing in the x direction [m]\n", "\n", - "# compute the size of the grid\n", - "Nx: int= round_even(axial_size / dx) + source_x_offset\n", - "Ny: int = round_even(lateral_size / dx)\n", - "\n", - "grid_size_points = Vector([Nx, Ny])\n", - "grid_spacing_meters = Vector([dx, dx])\n", + "grid_size_points = Vector([Nx, ])\n", + "grid_spacing_meters = Vector([dx, ])\n", "\n", "# create the k-space grid\n", "kgrid = kWaveGrid(grid_size_points, grid_spacing_meters)\n", "\n", - "# compute points per temporal period\n", - "ppp: int = round(ppw / cfl)\n", + "# define the properties of the propagation medium\n", + "sound_speed = 1500.0 * np.ones((Nx, 1)) # [m/s]\n", + "sound_speed[:np.round(Nx / 3).astype(int) - 1] = 2000.0\t # [m/s]\n", + "density = 1000.0 * np.ones((Nx, 1)) # [kg/m^3]\n", + "density[np.round(4 * Nx / 5).astype(int) - 1:] = 1500.0 # [kg/m^3]\n", + "medium = kWaveMedium(sound_speed=sound_speed, density=density)\n", "\n", - "# compute corresponding time spacing\n", - "dt: float = 1.0 / (ppp * source_f0)\n", + "# Create the source object\n", + "source = kSource()\n", "\n", - "# create the time array using an integer number of points per period\n", - "Nt: int = round(t_end / dt)\n", - "kgrid.setTime(Nt, dt)" + "# create initial pressure distribution using a smoothly shaped sinusoid\n", + "x_pos: int = 280 # [grid points]\n", + "width: int = 100 # [grid points]\n", + "height: int = 1 # [au]\n", + "p0 = np.linspace(0.0, 2.0 * np.pi, width + 1)" ] }, { "cell_type": "markdown", "metadata": { - "id": "a4WRzAZ_ASol" + "id": "misFfiLaASok" }, "source": [ - "Define source" + "Set grid" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "id": "s4ZO890kASol" + "id": "maEowzD6ASok" }, "outputs": [], "source": [ + "# Create the source object\n", "source = kSource()\n", "\n", - "# create time varying source\n", - "source_sig = create_cw_signals(np.squeeze(kgrid.t_array), source_f0, source_amp, source_phase)\n", + "# create initial pressure distribution using a smoothly shaped sinusoid\n", + "x_pos: int = 280 # [grid points]\n", + "width: int = 100 # [grid points]\n", + "height: int = 1 # [au]\n", + "p0 = np.linspace(0.0, 2.0 * np.pi, width + 1)\n", "\n", - "# set arc position and orientation\n", - "arc_pos = [kgrid.x_vec[0].item() + source_x_offset * kgrid.dx, 0]\n", - "focus_pos = [kgrid.x_vec[-1].item(), 0]\n", + "part1 = np.zeros(x_pos).astype(float)\n", + "part2 = (height / 2.0) * np.sin(p0 - np.pi / 2.0) + (height / 2.0)\n", + "part3 = np.zeros(Nx - x_pos - width - 1).astype(float)\n", + "source.p0 = np.concatenate([part1, part2, part3])\n", "\n", - "# create empty kWaveArray\n", - "karray = kWaveArray(axisymmetric=True,\n", - " bli_tolerance=bli_tolerance,\n", - " upsampling_rate=upsampling_rate,\n", - " single_precision=True)\n", - "\n", - "# add bowl shaped element\n", - "karray.add_arc_element(arc_pos, source_roc, source_diameter, focus_pos)\n", - "\n", - "# assign binary mask\n", - "source.p_mask = karray.get_array_binary_mask(kgrid)\n", - "\n", - "# assign source signals\n", - "source.p = karray.get_distributed_source_signal(kgrid, source_sig)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "BsRaUQKaASol" - }, - "source": [ - "Set up medium" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "keTsJqIcASol" - }, - "outputs": [], - "source": [ - "medium = kWaveMedium(sound_speed=c0, density=rho0)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "d1L-HjQfASol" - }, - "source": [ - "Set up sensor" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "eUdyQIObASol" - }, - "outputs": [], - "source": [ + "# create a Cartesian sensor mask recording the pressure\n", "sensor = kSensor()\n", + "sensor.record = [\"p\"]\n", "\n", - "# set sensor mask to record central plane, not including the source point\n", - "sensor.mask = np.zeros((Nx, Ny), dtype=bool)\n", - "sensor.mask[(source_x_offset + 1):, :] = True\n", - "\n", - "# record the pressure\n", - "sensor.record = ['p']\n", - "\n", - "# record only the final few periods when the field is in steady state\n", - "sensor.record_start_index = kgrid.Nt - (record_periods * ppp) + 1" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "uOdMyqagASom" - }, - "source": [ - "Define simulation options" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "nJ5iCYwYASom" - }, - "outputs": [], - "source": [ - "simulation_options = SimulationOptions(\n", - " simulation_type=SimulationType.AXISYMMETRIC,\n", - " data_cast='single',\n", - " data_recast=False,\n", - " save_to_disk=True,\n", - " save_to_disk_exit=False,\n", - " pml_inside=False)\n", - "\n", - "execution_options = SimulationExecutionOptions(\n", - " is_gpu_simulation=False,\n", - " delete_data=False,\n", - " verbose_level=2)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "2Pg9FCSoASom" - }, - "source": [ - "Run simulation" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "uCJcEDABASom" - }, - "outputs": [], - "source": [ - "sensor_data = kspaceFirstOrderASC(medium=deepcopy(medium),\n", - " kgrid=deepcopy(kgrid),\n", - " source=deepcopy(source),\n", - " sensor=deepcopy(sensor),\n", - " simulation_options=simulation_options,\n", - " execution_options=execution_options)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "in-JqhZaASom" - }, - "source": [ - "Post-processing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "acNlmnj1ASom" - }, - "outputs": [], - "source": [ - "# extract amplitude from the sensor data\n", - "amp, _, _ = extract_amp_phase(sensor_data['p'].T, 1.0 / kgrid.dt, source_f0,\n", - " dim=1, fft_padding=1, window='Rectangular')\n", - "\n", - "# reshape data\n", - "amp = np.reshape(amp, (Nx - (source_x_offset+1), Ny), order='F')\n", - "\n", - "# extract pressure on axis\n", - "amp_on_axis = amp[:, 0]\n", - "\n", - "# define axis vectors for plotting\n", - "x_vec = np.squeeze(kgrid.x_vec[(source_x_offset + 1):, :] - kgrid.x_vec[source_x_offset])\n", - "y_vec = kgrid.y_vec\n", + "# this hack is needed to ensure that the sensor is in [1,2] dimensions\n", + "mask = np.array([-10e-3, 10e-3]) # [mm]\n", + "mask = mask[:, np.newaxis].T\n", + "sensor.mask = mask\n", "\n", - "# =========================================================================\n", - "# ANALYTICAL SOLUTION\n", - "# =========================================================================\n", + "# set the simulation time to capture the reflections\n", + "c_max = np.max(medium.sound_speed.flatten()) # [m/s]\n", + "t_end = 2.5 * kgrid.x_size / c_max # [s]\n", "\n", - "# calculate the wavenumber\n", - "knumber = 2.0 * np.pi * source_f0 / c0\n", + "# define the time array\n", + "kgrid.makeTime(c_max, t_end=t_end)\n", "\n", - "# define radius and axis\n", - "x_max = (Nx-1) * dx\n", - "delta_x = x_max / 10000.0\n", - "x_ref = np.arange(0.0, x_max + delta_x, delta_x)\n", + "# define the simulation options\n", + "simulation_options = SimulationOptions(data_cast=\"off\", save_to_disk=False)\n", "\n", - "# calculate analytical solution\n", - "p_ref_axial, _, _ = focused_bowl_oneil(source_roc,\n", - " source_diameter,\n", - " source_amp[0] / (c0 * rho0),\n", - " source_f0,\n", - " c0,\n", - " rho0,\n", - " axial_positions=x_ref)" + "# run the simulation\n", + "sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options)" ] }, { "cell_type": "markdown", "metadata": { - "id": "ZchMY9PnASom" + "id": "a4WRzAZ_ASol" }, "source": [ - "Plot solution" + "Visualisations" ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "id": "P1bgt__WASom" + "id": "s4ZO890kASol" }, "outputs": [], "source": [ - "# plot the pressure along the focal axis of the piston\n", - "fig1, ax1 = plt.subplots(1, 1)\n", - "ax1.plot(1e3 * x_ref, 1e-6 * p_ref_axial, 'k-', label='Exact')\n", - "ax1.plot(1e3 * x_vec, 1e-6 * amp_on_axis, 'b.', label='k-Wave')\n", - "ax1.legend()\n", - "ax1.set(xlabel='Axial Position [mm]',\n", - " ylabel='Pressure [MPa]',\n", - " title='Axial Pressure')\n", - "ax1.set_xlim(0.0, 1e3 * axial_size)\n", - "ax1.set_ylim(0.0, 20)\n", - "ax1.grid()\n", - "\n", - "# get grid weights\n", - "grid_weights = karray.get_array_grid_weights(kgrid)\n", - "\n", - "# define axis vectors for plotting\n", - "yvec = np.squeeze(kgrid.y_vec) - kgrid.y_vec[0].item()\n", - "y_vec = np.hstack((-np.flip(yvec)[:-1], yvec))\n", - "x_vec = np.squeeze(kgrid.x_vec[(source_x_offset + 1):, :] - kgrid.x_vec[source_x_offset])\n", - "\n", - "data = np.hstack((np.fliplr(amp[:, :-1]), amp)) / 1e6\n", - "sp = np.hstack((np.fliplr(source.p_mask[:, :-1]), source.p_mask))\n", - "gw = np.hstack((np.fliplr(grid_weights[:, :-1]), grid_weights))\n", - "\n", - "fig3, (ax3a, ax3b) = plt.subplots(1, 2)\n", - "ax3a.pcolormesh(1e3 * np.squeeze(y_vec),\n", - " 1e3 * np.squeeze(kgrid.x_vec[:, :] - kgrid.x_vec[source_x_offset]),\n", - " sp,\n", - " shading='gouraud')\n", - "ax3a.set(xlabel='y [mm]',\n", - " ylabel='x [mm]',\n", - " title='Source Mask')\n", - "ax3b.pcolormesh(1e3 * np.squeeze(y_vec),\n", - " 1e3 * np.squeeze(kgrid.x_vec[:, :] - kgrid.x_vec[source_x_offset]),\n", - " gw,\n", - " shading='gouraud')\n", - "ax3b.set(xlabel='y [mm]',\n", - " ylabel='x [mm]',\n", - " title='Off-Grid Source Weights')\n", - "fig3.tight_layout(pad=1.2)\n", - "\n", - "# plot the pressure field\n", - "fig4, ax4 = plt.subplots(1, 1)\n", - "ax4.pcolormesh(1e3 * np.squeeze(y_vec),\n", - " 1e3 * np.squeeze(x_vec),\n", - " data,\n", - " shading='gouraud')\n", - "ax4.set(xlabel='Lateral Position [mm]',\n", - " ylabel='Axial Position [mm]',\n", - " title='Pressure Field')\n", - "ax4.invert_yaxis()\n", - "\n", - "# show figures\n", + "# plot the recorded time signals\n", + "_, ax1 = plt.subplots()\n", + "ax1.plot(sensor_data['p'][0, :], 'b-')\n", + "ax1.plot(sensor_data['p'][1, :], 'r-')\n", + "ax1.grid(True)\n", + "ax1.set_ylim(-0.1, 0.7)\n", + "ax1.set_ylabel('Pressure')\n", + "ax1.set_xlabel('Time Step')\n", + "ax1.legend(['Sensor Position 1', 'Sensor Position 2'])\n", "plt.show()" ] } @@ -419,4 +320,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 9765aea1a2aeb819f8a5d7177b270d6e04d9f64a Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 16:56:34 +0100 Subject: [PATCH 06/29] include extra file and tidy example --- .../extract_sensor_data.py | 714 ++++++++++++++++++ kwave/kspaceFirstOrder1D.py | 67 +- 2 files changed, 719 insertions(+), 62 deletions(-) create mode 100644 kwave/kWaveSimulation_helper/extract_sensor_data.py diff --git a/kwave/kWaveSimulation_helper/extract_sensor_data.py b/kwave/kWaveSimulation_helper/extract_sensor_data.py new file mode 100644 index 00000000..4442141b --- /dev/null +++ b/kwave/kWaveSimulation_helper/extract_sensor_data.py @@ -0,0 +1,714 @@ +import numpy as np + +def extract_sensor_data(dim: int, sensor_data, file_index, sensor_mask_index, + flags, record, p, ux_sgx, uy_sgy=None, uz_sgz=None): + """ + extract_sensor_data Sample field variables at the sensor locations. + """ + + # ========================================================================= + # GRID STAGGERING + # ========================================================================= + + # shift the components of the velocity field onto the non-staggered + # grid if required for output + if (flags.record_u_non_staggered or flags.record_I or flags.record_I_avg): + if (dim == 1): + ux_shifted = np.real(np.fft.ifft(record.x_shift_neg * np.fft.fft(ux_sgx, axis=0), axis=0)) + elif (dim == 2): + ux_shifted = np.real(np.fft.ifft(record.x_shift_neg * np.fft.fft(ux_sgx, axis=0), axis=0)) + uy_shifted = np.real(np.fft.ifft(record.y_shift_neg * np.fft.fft(uy_sgy, axis=1), axis=1)) + elif (dim == 3): + ux_shifted = np.real(np.fft.ifft(record.x_shift_neg * np.fft.fft(ux_sgx, axis=0), axis=0)) + uy_shifted = np.real(np.fft.ifft(record.y_shift_neg * np.fft.fft(uy_sgy, axis=1), axis=1)) + uz_shifted = np.real(np.fft.ifft(record.z_shift_neg * np.fft.fft(uz_sgz, axis=2), axis=2)) + else: + raise RuntimeError("Wrong dimensions") + + # ========================================================================= + # BINARY SENSOR MASK + # ========================================================================= + + if flags.binary_sensor_mask and not flags.use_cuboid_corners: + + # store the time history of the acoustic pressure + if (flags.record_p or flags.record_I or flags.record_I_avg): + if not flags.compute_directivity: + sensor_data.p[:, file_index] = np.squeeze(p[np.unravel_index(sensor_mask_index, np.shape(p), order='F')]) + else: + raise NotImplementedError('directivity not used at the moment') + + # store the maximum acoustic pressure + if flags.record_p_max: + if file_index == 0: + sensor_data.p_max = p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')] + else: + sensor_data.p_max = np.maximum(sensor_data.p_max, + p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]) + + # store the minimum acoustic pressure + if flags.record_p_min: + if file_index == 0: + sensor_data.p_min = p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')] + else: + sensor_data.p_min = np.minimum(sensor_data.p_min, + p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]) + + # store the rms acoustic pressure + if flags.record_p_rms: + if file_index == 0: + sensor_data.p_rms = p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]**2 + else: + sensor_data.p_rms = np.sqrt((sensor_data.p_rms**2 * file_index + + p[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(p), order='F')]**2) / (file_index + 1) ) + + # store the time history of the particle velocity on the staggered grid + if flags.record_u: + if (dim == 1): + sensor_data.ux[:, file_index] = ux_sgx[sensor_mask_index] + elif (dim == 2): + sensor_data.ux[:, file_index] = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy[:, file_index] = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + elif (dim == 3): + sensor_data.ux[:, file_index] = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(ux_sgx), order='F')] + sensor_data.uy[:, file_index] = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(uy_sgy), order='F')] + sensor_data.uz[:, file_index] = uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), np.shape(uz_sgz), order='F')] + else: + raise RuntimeError("Wrong dimensions") + + # store the time history of the particle velocity + if flags.record_u_non_staggered or flags.record_I or flags.record_I_avg: + if (dim == 1): + sensor_data.ux_non_staggered[:, file_index] = ux_shifted[sensor_mask_index] + elif (dim == 2): + sensor_data.ux_non_staggered[:, file_index] = ux_shifted[np.unravel_index(np.squeeze(sensor_mask_index), ux_shifted.shape, order='F')] + sensor_data.uy_non_staggered[:, file_index] = uy_shifted[np.unravel_index(np.squeeze(sensor_mask_index), uy_shifted.shape, order='F')] + elif (dim == 3): + sensor_data.ux_non_staggered[:, file_index] = ux_shifted[np.unravel_index(np.squeeze(sensor_mask_index), ux_shifted.shape, order='F')] + sensor_data.uy_non_staggered[:, file_index] = uy_shifted[np.unravel_index(np.squeeze(sensor_mask_index), uy_shifted.shape, order='F')] + sensor_data.uz_non_staggered[:, file_index] = uz_shifted[np.unravel_index(np.squeeze(sensor_mask_index), uz_shifted.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + + # store the split components of the particle velocity + if flags.record_u_split_field: + if (dim == 2): + + # compute forward FFTs + ux_k = record.x_shift_neg * np.fft.fftn(ux_sgx) + uy_k = record.y_shift_neg * np.fft.fftn(uy_sgy) + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + record.kx_norm * record.ky_norm * uy_k)) + sensor_data.ux_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - record.kx_norm * record.ky_norm * uy_k)) + sensor_data.ux_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + record.ky_norm **2 * uy_k)) + sensor_data.uy_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy shear + split_field = np.real(np.fft.ifftn(-record.ky_norm * record.kx_norm * ux_k + (1.0 - record.ky_norm**2) * uy_k)) + sensor_data.uy_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + elif (dim == 3): + + # compute forward FFTs + ux_k = np.multiply(record.x_shift_neg, np.fft.fftn(ux_sgx), order='F') + uy_k = np.multiply(record.y_shift_neg, np.fft.fftn(uy_sgy), order='F') + uz_k = np.multiply(record.z_shift_neg, np.fft.fftn(uz_sgz), order='F') + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + + record.kx_norm * record.ky_norm * uy_k + + record.kx_norm * record.kz_norm * uz_k)) + sensor_data.ux_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - + record.kx_norm * record.ky_norm * uy_k - + record.kx_norm * record.kz_norm * uz_k)) + sensor_data.ux_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + + record.ky_norm**2 * uy_k + + record.ky_norm * record.kz_norm * uz_k)) + sensor_data.uy_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy shear + split_field = np.real(np.fft.ifftn(- record.ky_norm * record.kx_norm * ux_k + + (1.0 - record.ky_norm**2) * uy_k - + record.ky_norm * record.kz_norm * uz_k)) + sensor_data.uy_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uz compressional + split_field = np.real(np.fft.ifftn(record.kz_norm * record.kx_norm * ux_k + + record.kz_norm * record.ky_norm * uy_k + + record.kz_norm**2 * uz_k)) + sensor_data.uz_split_p[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uz shear + split_field = np.real(np.fft.ifftn( -record.kz_norm * record.kx_norm * ux_k - + record.kz_norm * record.ky_norm * uy_k + + (1.0 - record.kz_norm**2) * uz_k)) + sensor_data.uz_split_s[:, file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + + # store the maximum particle velocity + if flags.record_u_max: + if file_index == 0: + if (dim == 1): + sensor_data.ux_max = ux_sgx[sensor_mask_index] + elif (dim == 2): + sensor_data.ux_max = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_max = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + elif (dim == 3): + sensor_data.ux_max = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_max = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + sensor_data.uz_max = uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + else: + if (dim == 1): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + elif (dim == 2): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + elif (dim == 3): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + sensor_data.uz_max = np.maximum(sensor_data.uz_max, uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')]) + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum particle velocity + if flags.record_u_min: + if file_index == 0: + if (dim == 1): + sensor_data.ux_min = ux_sgx[sensor_mask_index] + elif (dim == 2): + sensor_data.ux_min = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_min = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + elif (dim == 3): + sensor_data.ux_min = ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')] + sensor_data.uy_min = uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')] + sensor_data.uz_min = uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')] + else: + raise RuntimeError("Wrong dimensions") + else: + if (dim == 1): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, ux_sgx[sensor_mask_index]) + elif (dim == 2): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + elif (dim == 3): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]) + sensor_data.uz_min = np.minimum(sensor_data.uz_min, uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')]) + else: + raise RuntimeError("Wrong dimensions") + + # store the rms particle velocity + if flags.record_u_rms: + if (dim == 1): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + + ux_sgx[sensor_mask_index]**2) / (file_index +1)) + elif (dim == 2): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + + ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]**2) / (file_index +1)) + sensor_data.uy_rms = np.sqrt((sensor_data.uy_rms**2 * (file_index - 0) + + uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]**2) / (file_index +1)) + elif (dim == 3): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + + ux_sgx[np.unravel_index(np.squeeze(sensor_mask_index), ux_sgx.shape, order='F')]**2) / (file_index +1)) + sensor_data.uy_rms = np.sqrt((sensor_data.uy_rms**2 * (file_index - 0) + + uy_sgy[np.unravel_index(np.squeeze(sensor_mask_index), uy_sgy.shape, order='F')]**2) / (file_index +1)) + sensor_data.uz_rms = np.sqrt((sensor_data.uz_rms**2 * (file_index - 0) + + uz_sgz[np.unravel_index(np.squeeze(sensor_mask_index), uz_sgz.shape, order='F')]**2) / (file_index +1)) + + + # ========================================================================= + # CARTESIAN SENSOR MASK + # ========================================================================= + + elif flags.use_cuboid_corners: + + n_cuboids: int = np.shape(record.cuboid_corners_list)[1] + + # s_start: int = 0 + + # for each cuboid + for cuboid_index in np.arange(n_cuboids): + + # get size of cuboid for indexing regions of computational grid + if dim == 1: + cuboid_size_x = [record.cuboid_corners_list[1, cuboid_index] - record.cuboid_corners_list[0, cuboid_index], 1] + elif dim == 2: + cuboid_size_x = [record.cuboid_corners_list[2, cuboid_index] - record.cuboid_corners_list[0, cuboid_index], + record.cuboid_corners_list[3, cuboid_index] - record.cuboid_corners_list[1, cuboid_index]] + elif dim == 3: + cuboid_size_x = [record.cuboid_corners_list[3, cuboid_index] + 1 - record.cuboid_corners_list[0, cuboid_index], + record.cuboid_corners_list[4, cuboid_index] + 1 - record.cuboid_corners_list[1, cuboid_index], + record.cuboid_corners_list[5, cuboid_index] + 1 - record.cuboid_corners_list[2, cuboid_index]] + + # cuboid_num_points: int = np.prod(cuboid_size_x) + + # s_end: int = s_start + cuboid_num_points + + # sensor_mask_sub_index = sensor_mask_index[s_start:s_end] + + # s_start = s_end + + x_indices = np.arange(record.cuboid_corners_list[0, cuboid_index], record.cuboid_corners_list[3, cuboid_index]+1, dtype=int) + y_indices = np.arange(record.cuboid_corners_list[1, cuboid_index], record.cuboid_corners_list[4, cuboid_index]+1, dtype=int) + z_indices = np.arange(record.cuboid_corners_list[2, cuboid_index], record.cuboid_corners_list[5, cuboid_index]+1, dtype=int) + + # Create a meshgrid + xx, yy, zz = np.meshgrid(x_indices, y_indices, z_indices, indexing='ij') + + # Combine into a list of indices + cuboid_indices = np.array([xx.flatten(), yy.flatten(), zz.flatten()]).T + + result = p[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # store the time history of the acoustic pressure + if (flags.record_p or flags.record_I or flags.record_I_avg): + if not flags.compute_directivity: + # Use the indices to index into p + sensor_data[cuboid_index].p[..., file_index] = result + else: + raise NotImplementedError('directivity not implemented at the moment') + + # store the maximum acoustic pressure + if flags.record_p_max: + if file_index == 0: + sensor_data[cuboid_index].p_max = result + else: + sensor_data[cuboid_index].p_max = np.maximum(sensor_data[cuboid_index].p_max, result) + + # store the minimum acoustic pressure + if flags.record_p_min: + if file_index == 0: + sensor_data[cuboid_index].p_min = result + else: + sensor_data[cuboid_index].p_min = np.maximum(sensor_data[cuboid_index].p_min, result) + + # store the rms acoustic pressure + if flags.record_p_rms: + if file_index == 0: + sensor_data[cuboid_index].p_rms = result**2 + else: + sensor_data[cuboid_index].p_rms = np.sqrt((sensor_data[cuboid_index].p_rms**2 * file_index + result**2) / (file_index + 1) ) + + # store the time history of the particle velocity on the staggered grid + if flags.record_u: + if (dim == 1): + sensor_data[cuboid_index].ux[..., file_index] = ux_sgx[cuboid_indices] + elif (dim == 2): + sensor_data[cuboid_index].ux[..., file_index] = ux_sgx[cuboid_indices] + sensor_data[cuboid_index].uy[..., file_index] = uy_sgy[cuboid_indices] + elif (dim == 3): + sensor_data[cuboid_index].ux[..., file_index] = ux_sgx[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uy[..., file_index] = uy_sgy[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uz[..., file_index] = uz_sgz[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + else: + raise RuntimeError("Wrong dimensions") + + # store the time history of the particle velocity + if flags.record_u_non_staggered or flags.record_I or flags.record_I_avg: + if (dim == 1): + sensor_data[cuboid_index].ux_non_staggered[..., file_index] = ux_shifted[cuboid_indices] + elif (dim == 2): + sensor_data[cuboid_index].ux_non_staggered[..., file_index] = ux_shifted[cuboid_indices] + sensor_data[cuboid_index].uy_non_staggered[..., file_index] = uy_shifted[cuboid_indices] + elif (dim == 3): + sensor_data[cuboid_index].ux_non_staggered[..., file_index] = ux_shifted[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uy_non_staggered[..., file_index] = uy_shifted[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + sensor_data[cuboid_index].uz_non_staggered[..., file_index] = uz_shifted[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + else: + raise RuntimeError("Wrong dimensions") + + # store the split components of the particle velocity + if flags.record_u_split_field: + if (dim == 2): + + # compute forward FFTs + ux_k = record.x_shift_neg * np.fft.fftn(ux_sgx) + uy_k = record.y_shift_neg * np.fft.fftn(uy_sgy) + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + record.kx_norm * record.ky_norm * uy_k)) + sensor_data.ux_split_p[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - record.kx_norm * record.ky_norm * uy_k)) + sensor_data[cuboid_index].ux_split_s[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + record.ky_norm **2 * uy_k)) + sensor_data[cuboid_index].uy_split_p[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + # uy shear + split_field = np.real(np.fft.ifftn(-record.ky_norm * record.kx_norm * ux_k + (1.0 - record.ky_norm**2) * uy_k)) + sensor_data[cuboid_index].uy_split_s[..., file_index] = split_field[np.unravel_index(np.squeeze(sensor_mask_index), split_field.shape, order='F')] + + elif (dim == 3): + + # compute forward FFTs + ux_k = np.multiply(record.x_shift_neg, np.fft.fftn(ux_sgx), order='F') + uy_k = np.multiply(record.y_shift_neg, np.fft.fftn(uy_sgy), order='F') + uz_k = np.multiply(record.z_shift_neg, np.fft.fftn(uz_sgz), order='F') + + # ux compressional + split_field = np.real(np.fft.ifftn(record.kx_norm**2 * ux_k + + record.kx_norm * record.ky_norm * uy_k + + record.kx_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].ux_split_p[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + # ux shear + split_field = np.real(np.fft.ifftn((1.0 - record.kx_norm**2) * ux_k - + record.kx_norm * record.ky_norm * uy_k - + record.kx_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].ux_split_s[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uy compressional + split_field = np.real(np.fft.ifftn(record.ky_norm * record.kx_norm * ux_k + + record.ky_norm**2 * uy_k + + record.ky_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].uy_split_p[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uy shear + split_field = np.real(np.fft.ifftn(- record.ky_norm * record.kx_norm * ux_k + + (1.0 - record.ky_norm**2) * uy_k - + record.ky_norm * record.kz_norm * uz_k)) + sensor_data[cuboid_index].uy_split_s[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uz compressional + split_field = np.real(np.fft.ifftn(record.kz_norm * record.kx_norm * ux_k + + record.kz_norm * record.ky_norm * uy_k + + record.kz_norm**2 * uz_k)) + sensor_data[cuboid_index].uz_split_p[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + + # uz shear + split_field = np.real(np.fft.ifftn( -record.kz_norm * record.kx_norm * ux_k - + record.kz_norm * record.ky_norm * uy_k + + (1.0 - record.kz_norm**2) * uz_k)) + sensor_data[cuboid_index].uz_split_s[..., file_index] = split_field[cuboid_indices[:, 0], cuboid_indices[:, 1], cuboid_indices[:, 2]].reshape(cuboid_size_x) + else: + raise RuntimeError("Wrong dimensions") + + + + + + + + + + # ========================================================================= + # CARTESIAN SENSOR MASK + # ========================================================================= + + # extract data from specified Cartesian coordinates using interpolation + # (record.tri and record.bc are the Delaunay triangulation and Barycentric coordinates + # returned by gridDataFast3D) + else: + + # store the time history of the acoustic pressure + if flags.record_p or flags.record_I or flags.record_I_avg: + if dim == 1: + # i = np.argmin(np.abs(record.grid_x - record.sensor_x[0])).astype(int) + # j = np.argmin(np.abs(record.grid_x - record.sensor_x[1])).astype(int) + # print("THIS:", p.shape, file_index, i, j, record.sensor_x, record.sensor_x[0], record.sensor_x[1], record.grid_x[i], record.grid_x[j], + # p[i], p[j], + # record.grid_x[0], record.grid_x[-1], p.max()) + sensor_data.p[:, file_index] = np.interp(np.squeeze(record.sensor_x), np.squeeze(record.grid_x), p) + else: + sensor_data.p[:, file_index] = np.sum(p[record.tri] * record.bc, axis=1) + + # store the maximum acoustic pressure + if flags.record_p_max: + if dim == 1: + if file_index == 0: + sensor_data.p_max = np.interp(record.grid_x, p, record.sensor_x) + else: + sensor_data.p_max = np.maximum(sensor_data.p_max, np.interp(record.grid_x, p, record.sensor_x)) + else: + if file_index == 0: + sensor_data.p_max = np.sum(p[record.tri] * record.bc, axis=1) + else: + sensor_data.p_max = np.maximum(sensor_data.p_max, np.sum(p[record.tri] * record.bc, axis=1)) + + # store the minimum acoustic pressure + if flags.record_p_min: + if dim == 1: + if file_index == 0: + sensor_data.p_min = np.interp(record.grid_x, p, record.sensor_x) + else: + sensor_data.p_min = np.minimum(sensor_data.p_min, np.interp(record.grid_x, p, record.sensor_x)) + else: + if file_index == 0: + sensor_data.p_min = np.sum(p[record.tri] * record.bc, axis=1) + else: + sensor_data.p_min = np.minimum(sensor_data.p_min, np.sum(p[record.tri] * record.bc, axis=1)) + + # store the rms acoustic pressure + if flags.record_p_rms: + if dim == 1: + sensor_data.p_rms = np.sqrt((sensor_data.p_rms**2 * (file_index - 0) + (np.interp(record.grid_x, p, record.sensor_x))**2) / (file_index +1)) + else: + sensor_data.p_rms[:] = np.sqrt((sensor_data.p_rms[:]**2 * (file_index - 0) + + (np.sum(p[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + + # store the time history of the particle velocity on the staggered grid + if flags.record_u: + if (dim == 1): + sensor_data.ux[:, file_index] = np.interp(record.grid_x, ux_sgx, record.sensor_x) + elif (dim == 2): + sensor_data.ux[:, file_index] = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy[:, file_index] = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux[:, file_index] = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy[:, file_index] = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + sensor_data.uz[:, file_index] = np.sum(uz_sgz[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + + # store the time history of the particle velocity + if flags.record_u_non_staggered or flags.record_I or flags.record_I_avg: + if (dim == 1): + sensor_data.ux_non_staggered[:, file_index] = np.interp(record.grid_x, ux_shifted, record.sensor_x) + elif (dim == 2): + sensor_data.ux_non_staggered[:, file_index] = np.sum(ux_shifted[record.tri] * record.bc, axis=1) + sensor_data.uy_non_staggered[:, file_index] = np.sum(uy_shifted[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux_non_staggered[:, file_index] = np.sum(ux_shifted[record.tri] * record.bc, axis=1) + sensor_data.uy_non_staggered[:, file_index] = np.sum(uy_shifted[record.tri] * record.bc, axis=1) + sensor_data.uz_non_staggered[:, file_index] = np.sum(uz_shifted[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + + # store the maximum particle velocity + if flags.record_u_max: + if file_index == 0: + if (dim == 1): + sensor_data.ux_max = np.interp(record.grid_x, ux_sgx, record.sensor_x) + elif (dim == 2): + sensor_data.ux_max = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_max = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux_max = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_max = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + sensor_data.uz_max = np.sum(uz_sgz[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + else: + if (dim == 1): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, np.interp(record.grid_x, ux_sgx, record.sensor_x)) + elif (dim == 2): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + elif (dim == 3): + sensor_data.ux_max = np.maximum(sensor_data.ux_max, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_max = np.maximum(sensor_data.uy_max, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + sensor_data.uz_max = np.maximum(sensor_data.uz_max, np.sum(uz_sgz[record.tri] * record.bc, axis=1)) + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum particle velocity + if flags.record_u_min: + if file_index == 0: + if (dim == 1): + sensor_data.ux_min = np.interp(record.grid_x, ux_sgx, record.sensor_x) + elif (dim == 2): + sensor_data.ux_min = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_min = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + elif (dim == 3): + sensor_data.ux_min = np.sum(ux_sgx[record.tri] * record.bc, axis=1) + sensor_data.uy_min = np.sum(uy_sgy[record.tri] * record.bc, axis=1) + sensor_data.uz_min = np.sum(uz_sgz[record.tri] * record.bc, axis=1) + else: + raise RuntimeError("Wrong dimensions") + + else: + if (dim == 1): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, np.interp(record.grid_x, ux_sgx, record.sensor_x)) + elif (dim == 2): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + elif (dim == 3): + sensor_data.ux_min = np.minimum(sensor_data.ux_min, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) + sensor_data.uy_min = np.minimum(sensor_data.uy_min, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + else: + raise RuntimeError("Wrong dimensions") + + # store the rms particle velocity + if flags.record_u_rms: + if (dim == 1): + sensor_data.ux_rms = np.sqrt((sensor_data.ux_rms**2 * (file_index - 0) + (np.interp(record.grid_x, ux_sgx, record.sensor_x))**2) / (file_index +1)) + elif (dim == 2): + sensor_data.ux_rms[:] = np.sqrt((sensor_data.ux_rms[:]**2 * (file_index - 0) + (np.sum(ux_sgx[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + sensor_data.uy_rms[:] = np.sqrt((sensor_data.uy_rms[:]**2 * (file_index - 0) + (np.sum(uy_sgy[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + elif (dim == 3): + sensor_data.ux_rms[:] = np.sqrt((sensor_data.ux_rms[:]**2 * (file_index - 0) + (np.sum(ux_sgx[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + sensor_data.uy_rms[:] = np.sqrt((sensor_data.uy_rms[:]**2 * (file_index - 0) + (np.sum(uy_sgy[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + sensor_data.uz_rms[:] = np.sqrt((sensor_data.uz_rms[:]**2 * (file_index - 0) + (np.sum(uz_sgz[record.tri] * record.bc, axis=1))**2) / (file_index +1)) + else: + raise RuntimeError("Wrong dimensions") + + # ========================================================================= + # RECORDED VARIABLES OVER ENTIRE GRID + # ========================================================================= + + # store the maximum acoustic pressure over all the grid elements + if flags.record_p_max_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.p_max_all = p[record.x1_inside:record.x2_inside] + else: + sensor_data.p_max_all = np.maximum(sensor_data.p_max_all, p[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.p_max_all = p[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.p_max_all = np.maximum(sensor_data.p_max_all, p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.p_max_all = p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.p_max_all = np.maximum(sensor_data.p_max_all, + p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum acoustic pressure over all the grid elements + if flags.record_p_min_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.p_min_all = p[record.x1_inside:record.x2_inside] + else: + sensor_data.p_min_all = np.minimum(sensor_data.p_min_all, p[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.p_min_all = p[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.p_min_all = np.minimum(sensor_data.p_min_all, p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.p_min_all = p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.p_min_all = np.minimum(sensor_data.p_min_all, + p[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + else: + raise RuntimeError("Wrong dimensions") + + # store the maximum particle velocity over all the grid elements + if flags.record_u_max_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.ux_max_all = ux_sgx[record.x1_inside:record.x2_inside] + else: + sensor_data.ux_max_all = np.maximum(sensor_data.ux_max_all, ux_sgx[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.ux_max_all = ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + sensor_data.uy_max_all = uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.ux_max_all = np.maximum(sensor_data.ux_max_all, + ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + sensor_data.uy_max_all = np.maximum(sensor_data.uy_max_all, + uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.ux_max_all = ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uy_max_all = uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uz_max_all = uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.ux_max_all = np.maximum(sensor_data.ux_max_all, + ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uy_max_all = np.maximum(sensor_data.uy_max_all, + uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uz_max_all = np.maximum(sensor_data.uz_max_all, + uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + + else: + raise RuntimeError("Wrong dimensions") + + # store the minimum particle velocity over all the grid elements + if flags.record_u_min_all and (not flags.use_cuboid_corners): + if (dim == 1): + if file_index == 0: + sensor_data.ux_min_all = ux_sgx[record.x1_inside:record.x2_inside] + else: + sensor_data.ux_min_all = np.minimum(sensor_data.ux_min_all, ux_sgx[record.x1_inside:record.x2_inside]) + + elif (dim == 2): + if file_index == 0: + sensor_data.ux_min_all = ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + sensor_data.uy_min_all = uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside] + else: + sensor_data.ux_min_all = np.minimum(sensor_data.ux_min_all, + ux_sgx[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + sensor_data.uy_min_all = np.minimum(sensor_data.uy_min_all, + uy_sgy[record.x1_inside:record.x2_inside, record.y1_inside:record.y2_inside]) + + elif (dim == 3): + if file_index == 0: + sensor_data.ux_min_all = ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uy_min_all = uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + sensor_data.uz_min_all = uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside] + else: + sensor_data.ux_min_all = np.minimum(sensor_data.ux_min_all, + ux_sgx[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uy_min_all = np.minimum(sensor_data.uy_min_all, + uy_sgy[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + sensor_data.uz_min_all = np.minimum(sensor_data.uz_min_all, + uz_sgz[record.x1_inside:record.x2_inside, + record.y1_inside:record.y2_inside, + record.z1_inside:record.z2_inside]) + else: + raise RuntimeError("Wrong dimensions") + + return sensor_data \ No newline at end of file diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 7d2ed643..2092e9ba 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -25,8 +25,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, source: kSource, sensor: Union[NotATransducer, kSensor], medium: kWaveMedium, - simulation_options: SimulationOptions, - verbose: bool = False): + simulation_options: SimulationOptions): """ KSPACEFIRSTORDER1D 1D time-domain simulation of wave propagation. @@ -586,8 +585,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, c0 = c0.astype(myType) - verbose: bool = False - # ========================================================================= # CREATE INDEX VARIABLES # ========================================================================= @@ -666,37 +663,14 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # start time loop for t_index in tqdm(np.arange(index_start, index_end, index_step, dtype=int)): - # print("0.", np.shape(p)) - - # enforce time reversal bounday condition - # if options.time_rev: - # # load pressure value and enforce as a Dirichlet boundary condition - # p[k_sim.sensor_mask_index] = sensor.time_reversal_boundary_data[:, t_index] - # # update p_k - # p_k = scipy.fft.fft(p) - # # compute rhox using an adiabatic equation of state - # rhox_mod = p / c0**2 - # rhox[k_sim.sensor_mask_index] = rhox_mod[k_sim.sensor_mask_index] - - - # print("1.", np.shape(p)) - # calculate ux at the next time step using dp/dx at the current time step if not options.nonuniform_grid and not options.use_finite_difference: - if verbose: - print("Here 1-----.", np.shape(pml_x), np.shape(pml_x_sgx), np.shape(ux_sgx), - '......', np.shape(ddx_k), np.shape(ddx_k_shift_pos), np.shape(kappa), - ',,,,,,', np.shape(p_k), np.shape(rho0_sgx_inv)) - - - # calculate gradient using the k-space method on a regular grid ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * p_k, axes=(0,)))) elif options.use_finite_difference: - print("\t\tEXIT! options.use_finite_difference") match options.use_finite_difference: case 2: @@ -714,13 +688,11 @@ def kspace_first_order_1D(kgrid: kWaveGrid, dpdx = (np.insert(p[:-1], 0, 0) - 27.0 * p + 27 * np.append(p[1:], 0.0) - np.append(p[2:], [0, 0])) / (24.0 * kgrid.dx) ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) - else: - print("\t\tEXIT! else", ) - + else: # calculate gradient using the k-space method on a non-uniform grid # via the mapped pseudospectral method ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - - dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * np.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) + dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * np.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) # print("2.", np.shape(p)) @@ -744,23 +716,15 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # # add the source values to the existing field values # ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + source.ux[k_sim.u_source_sig_index, t_index] - - # print("3.", np.shape(p)) # calculate du/dx at the next time step if not options.nonuniform_grid and not options.use_finite_difference: - if verbose: - print("Here 1.", np.shape(p), np.shape(ux_sgx), np.shape(ddx_k), np.shape(ddx_k_shift_neg), np.shape(kappa)) - # calculate gradient using the k-space method on a regular grid duxdx = np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,) ) ) - if verbose: - print("Here 1(end). duxdx:", np.shape(duxdx)) elif options.use_finite_difference: - print("\t\tEXIT! options.use_finite_difference") match options.use_finite_difference: case 2: @@ -780,20 +744,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, duxdx = kgrid.dxudxn * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) - # print("4.", np.shape(p)) # calculate rhox at the next time step if not k_sim.is_nonlinear: # use linearised mass conservation equation - - if verbose: - print("pre:", pml_x.shape, rhox.shape, duxdx.shape, dt, rho0.shape) - rhox = pml_x * (pml_x * rhox - dt * rho0 * duxdx) - if verbose: - print("post:", pml_x.shape, rhox.shape, duxdx.shape, dt, rho0.shape) - else: # use nonlinear mass conservation equation (explicit calculation) rhox = pml_x * (pml_x * rhox - dt * (2.0 * rhox + rho0) * duxdx) @@ -821,16 +777,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # # add the source values to the existing field values # rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + source.p[k_sim.p_source_sig_index, t_index] - - # print("6.", np.shape(p)) # equation of state if not k_sim.is_nonlinear: # print("is linear", k_sim.equation_of_state, type(k_sim.equation_of_state)) match k_sim.equation_of_state: case 'lossless': - - # print("Here 2. lossless / linear", np.shape(p)) # calculate p using a linear adiabatic equation of state p = np.squeeze(c0**2) * np.squeeze(rhox) @@ -839,8 +791,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, case 'absorbing': - # print("Here 2. absorbing / linear", np.shape(p)) - # calculate p using a linear absorbing equation of state p = np.squeeze(c0**2 * (rhox + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) @@ -848,8 +798,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, case 'stokes': - - # print("Here 2. stokes / linear") # calculate p using a linear absorbing equation of state # assuming alpha_power = 2 @@ -859,16 +807,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, else: match k_sim.equation_of_state: case 'lossless': - - print("Here 2. lossless / nonlinear") # calculate p using a nonlinear adiabatic equation of state p = c0**2 * (rhox + medium.BonA * rhox**2 / (2.0 * rho0)) case 'absorbing': - print("Here 2. absorbing / nonlinear") - # calculate p using a nonlinear absorbing equation of state p = c0**2 * ( rhox + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) @@ -876,8 +820,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, + medium.BonA * rhox**2 / (2.0 * rho0) ) case 'stokes': - - print("Here 2. stokes / nonlinear") # calculate p using a nonlinear absorbing equation of state # assuming alpha_power = 2 @@ -963,7 +905,8 @@ def kspace_first_order_1D(kgrid: kWaveGrid, 'compute_directivity': False}) # run sub-function to extract the required data from the acoustic variables - sensor_data = extract_sensor_data(kgrid.dim, sensor_data, file_index, k_sim.sensor_mask_index, extract_options, record, p, ux_sgx) + sensor_data = extract_sensor_data(kgrid.dim, sensor_data, file_index, k_sim.sensor_mask_index, + extract_options, record, p, ux_sgx) From 9b2d95cf8ad1d0a7f09c35189a0592669cf4064f Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 17:04:47 +0100 Subject: [PATCH 07/29] add into init --- kwave/kWaveSimulation_helper/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/kwave/kWaveSimulation_helper/__init__.py b/kwave/kWaveSimulation_helper/__init__.py index a2eb6e82..bfeacce1 100644 --- a/kwave/kWaveSimulation_helper/__init__.py +++ b/kwave/kWaveSimulation_helper/__init__.py @@ -1,6 +1,7 @@ from kwave.kWaveSimulation_helper.create_absorption_variables import create_absorption_variables from kwave.kWaveSimulation_helper.display_simulation_params import display_simulation_params from kwave.kWaveSimulation_helper.expand_grid_matrices import expand_grid_matrices +from kwave.kWaveSimulation_helper.extract_sensor_data import extract_sensor_data from kwave.kWaveSimulation_helper.retract_transducer_grid_size import retract_transducer_grid_size from kwave.kWaveSimulation_helper.save_to_disk_func import save_to_disk_func from kwave.kWaveSimulation_helper.scale_source_terms_func import scale_source_terms_func From e78dcaa6035b1591f5d0adab4d61a253b9f800ee Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 17:52:23 +0100 Subject: [PATCH 08/29] imports --- kwave/kWaveSimulation_helper/__init__.py | 4 ++++ kwave/kspaceFirstOrder1D.py | 4 +--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/kwave/kWaveSimulation_helper/__init__.py b/kwave/kWaveSimulation_helper/__init__.py index bfeacce1..cdf3e71c 100644 --- a/kwave/kWaveSimulation_helper/__init__.py +++ b/kwave/kWaveSimulation_helper/__init__.py @@ -6,3 +6,7 @@ from kwave.kWaveSimulation_helper.save_to_disk_func import save_to_disk_func from kwave.kWaveSimulation_helper.scale_source_terms_func import scale_source_terms_func from kwave.kWaveSimulation_helper.set_sound_speed_ref import set_sound_speed_ref + +__all__ = ["create_absorption_variables", "display_simulation_params", "expand_grid_matrices", + "extract_sensor_data", "retract_transducer_grid_size", "save_to_disk_func", + "scale_source_terms_func", "set_sound_speed_ref"] \ No newline at end of file diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 2092e9ba..6fc8c484 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -8,7 +8,7 @@ from kwave.ksensor import kSensor from kwave.ksource import kSource from kwave.kWaveSimulation import kWaveSimulation - +from kwave.kWaveSimulation_helper import extract_sensor_data from kwave.ktransducer import NotATransducer from kwave.utils.data import scale_time @@ -19,8 +19,6 @@ from kwave.options.simulation_options import SimulationOptions -from kwave.kWaveSimulation_helper import extract_sensor_data - def kspace_first_order_1D(kgrid: kWaveGrid, source: kSource, sensor: Union[NotATransducer, kSensor], From 05be99307414a96372bd7e6e6141b70d0897dec8 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 18:06:59 +0100 Subject: [PATCH 09/29] formatting --- kwave/kspaceFirstOrder1D.py | 308 +----------------------------------- 1 file changed, 5 insertions(+), 303 deletions(-) diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 6fc8c484..cc61985e 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -20,13 +20,13 @@ from kwave.options.simulation_options import SimulationOptions def kspace_first_order_1D(kgrid: kWaveGrid, - source: kSource, - sensor: Union[NotATransducer, kSensor], - medium: kWaveMedium, - simulation_options: SimulationOptions): + source: kSource, + sensor: Union[NotATransducer, kSensor], + medium: kWaveMedium, + simulation_options: SimulationOptions): """ - KSPACEFIRSTORDER1D 1D time-domain simulation of wave propagation. + 1D time-domain simulation of wave propagation. DESCRIPTION: kspaceFirstOrder1D simulates the time-domain propagation of @@ -161,304 +161,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, USAGE: sensor_data = kspaceFirstOrder1D(kgrid, medium, source, sensor) sensor_data = kspaceFirstOrder1D(kgrid, medium, source, sensor, ...) - - INPUTS: - The minimum fields that must be assigned to run an initial value problem - (for example, a photoacoustic forward simulation) are marked with a *. - - kgrid* - k-Wave grid object returned by kWaveGrid - containing Cartesian and k-space grid fields - kgrid.t_array* - evenly spaced array of time values [s] (set - to 'auto' by kWaveGrid) - - - medium.sound_speed* - sound speed distribution within the acoustic - medium [m/s] - medium.sound_speed_ref - reference sound speed used within the - k-space operator (phase correction term) - [m/s] - medium.density* - density distribution within the acoustic - medium [kg/m^3] - medium.BonA - parameter of nonlinearity - medium.alpha_power - power law absorption exponent - medium.alpha_coeff - power law absorption coefficient - [dB/(MHz^y cm)] - medium.alpha_mode - optional input to force either the - absorption or dispersion terms in the - equation of state to be excluded; valid - inputs are 'no_absorption' or - 'no_dispersion' - medium.alpha_filter - frequency domain filter applied to the - absorption and dispersion terms in the - equation of state - medium.alpha_sign - two element array used to control the sign - of absorption and dispersion terms in the - equation of state - - - source.p0* - initial pressure within the acoustic medium - source.p - time varying pressure at each of the source - positions given by source.p_mask - source.p_mask - binary matrix specifying the positions of - the time varying pressure source - distribution - source.p_mode - optional input to control whether the input - pressure is injected as a mass source or - enforced as a dirichlet boundary condition; - valid inputs are 'additive' (the default) or - 'dirichlet' - source.ux - time varying particle velocity in the - x-direction at each of the source positions - given by source.u_mask - source.u_mask - binary matrix specifying the positions of - the time varying particle velocity - distribution - source.u_mode - optional input to control whether the input - velocity is applied as a force source or - enforced as a dirichlet boundary condition; - valid inputs are 'additive' (the default) or - 'dirichlet' - - - sensor.mask* - binary matrix or a set of Cartesian points - where the pressure is recorded at each - time-step - sensor.record - cell array of the acoustic parameters to - record in the form sensor.record = {'p', - 'u', ...}; valid inputs are: - 'p' (acoustic pressure) - 'p_max' (maximum pressure) - 'p_min' (minimum pressure) - 'p_rms' (RMS pressure) - 'p_final' (final pressure field at all grid points) - 'p_max_all' (maximum pressure at all grid points) - 'p_min_all' (minimum pressure at all grid points) - 'u' (particle velocity) - 'u_max' (maximum particle velocity) - 'u_min' (minimum particle velocity) - 'u_rms' (RMS particle velocity) - 'u_final' (final particle velocity field at all grid points) - 'u_max_all' (maximum particle velocity at all grid points) - 'u_min_all' (minimum particle velocity at all grid points) - 'u_non_staggered' (particle velocity on non-staggered grid) - 'I' (time varying acoustic intensity) - 'I_avg' (average acoustic intensity) - sensor.record_start_index - - time index at which the sensor should start - recording the data specified by - sensor.record (default = 1) - sensor.time_reversal_boundary_data - - time varying pressure enforced as a - Dirichlet boundary condition over - sensor.mask - sensor.frequency_response - - two element array specifying the center - frequency and percentage bandwidth of a - frequency domain Gaussian filter applied to - the sensor_data - - Note: For heterogeneous medium parameters, medium.sound_speed and - medium.density must be given in matrix form with the same dimensions as - kgrid. For homogeneous medium parameters, these can be given as single - numeric values. If the medium is homogeneous and velocity inputs or - outputs are not required, it is not necessary to specify medium.density. - - OPTIONAL INPUTS: - Optional 'string', value pairs that may be used to modify the default - computational settings. - - 'CartInterp' - Interpolation mode used to extract the - pressure when a Cartesian sensor mask is - given. If set to 'nearest' and more than one - Cartesian point maps to the same grid point, - duplicated data points are discarded and - sensor_data will be returned with less - points than that specified by sensor.mask - (default = 'linear'). - 'CreateLog' - Boolean controlling whether the command line - output is saved using the diary function - with a date and time stamped filename - (default = False). - 'DataCast' - String input of the data type that variables - are cast to before computation. For example, - setting to 'single' will speed up the - computation time (due to the improved - efficiency of fftn and ifftn for this data - type) at the expense of a loss in precision. - This variable is also useful for utilising - GPU parallelisation through libraries such - as the Parallel Computing Toolbox by setting - 'DataCast' to 'gpuArray-single' (default = - 'off'). - 'DataRecast' - Boolean controlling whether the output data - is cast back to double precision. If set to - False, sensor_data will be returned in the - data format set using the 'DataCast' option. - 'DisplayMask' - Binary matrix overlaid onto the animated - simulation display. Elements set to 1 within - the display mask are set to black within the - display (default = sensor.mask). - 'LogScale' - Boolean controlling whether the pressure - field is log compressed before display - (default = False). The data is compressed by - scaling both the positive and negative - values between 0 and 1 (truncating the data - to the given plot scale), adding a scalar - value (compression factor) and then using - the corresponding portion of a log10 plot - for the compression (the negative parts are - remapped to be negative thus the default - color scale will appear unchanged). The - amount of compression can be controlled by - adjusting the compression factor which can - be given in place of the Boolean input. The - closer the compression factor is to zero, - the steeper the corresponding part of the - log10 plot used, and the greater the - compression (the default compression factor - is 0.02). - 'MovieArgs' - Settings for VideoWriter. Parameters must be - given as {'param', value, ...} pairs within - a cell array (default = {}), where 'param' - corresponds to a writable property of a - VideoWriter object. - 'MovieName' - Name of the movie produced when - 'RecordMovie' is set to true (default = - 'date-time-kspaceFirstOrder1D'). - 'MovieProfile' - Profile input passed to VideoWriter. - 'PlotFreq' - The number of iterations which must pass - before the simulation plot is updated - (default = 10). - 'PlotLayout' - Boolean controlling whether a four panel - plot of the initial simulation layout is - produced (initial pressure, sensor mask, - sound speed, density) (default = False). - 'PlotPML' - Boolean controlling whether the perfectly - matched layer is shown in the simulation - plots. If set to False, the PML is not - displayed (default = true). - 'PlotScale' - [min, max] values used to control the - scaling for imagesc (visualisation). If set - to 'auto', a symmetric plot scale is chosen - automatically for each plot frame. - 'PlotSim' - Boolean controlling whether the simulation - iterations are progressively plotted - (default = true). - 'PMLAlpha' - Absorption within the perfectly matched - layer in Nepers per grid point (default = - 2). - 'PMLInside' - Boolean controlling whether the perfectly - matched layer is inside or outside the grid. - If set to False, the input grids are - enlarged by PMLSize before running the - simulation (default = true). - 'PMLSize' - Size of the perfectly matched layer in grid - points. To remove the PML, set the - appropriate PMLAlpha to zero rather than - forcing the PML to be of zero size (default - = 20). - 'RecordMovie' - Boolean controlling whether the displayed - image frames are captured and stored as a - movie using VideoWriter (default = False). - 'Smooth' - Boolean controlling whether source.p0, - medium.sound_speed, and medium.density are - smoothed using smooth before computation. - 'Smooth' can either be given as a single - Boolean value or as a 3 element array to - control the smoothing of source.p0, - medium.sound_speed, and medium.density, - independently (default = [true, False, - False]). - - OUTPUTS: - If sensor.record is not defined by the user: - sensor_data - time varying pressure recorded at the sensor - positions given by sensor.mask - - If sensor.record is defined by the user: - sensor_data.p - time varying pressure recorded at the sensor - positions given by sensor.mask (returned if - 'p' is set) - sensor_data.p_max - maximum pressure recorded at the sensor - positions given by sensor.mask (returned if - 'p_max' is set) - sensor_data.p_min - minimum pressure recorded at the sensor - positions given by sensor.mask (returned if - 'p_min' is set) - sensor_data.p_rms - rms of the time varying pressure recorded at - the sensor positions given by sensor.mask - (returned if 'p_rms' is set) - sensor_data.p_final - final pressure field at all grid points - within the domain (returned if 'p_final' is - set) - sensor_data.p_max_all - maximum pressure recorded at all grid points - within the domain (returned if 'p_max_all' - is set) - sensor_data.p_min_all - minimum pressure recorded at all grid points - within the domain (returned if 'p_min_all' - is set) - sensor_data.ux - time varying particle velocity in the - x-direction recorded at the sensor positions - given by sensor.mask (returned if 'u' is - set) - sensor_data.ux_max - maximum particle velocity in the x-direction - recorded at the sensor positions given by - sensor.mask (returned if 'u_max' is set) - sensor_data.ux_min - minimum particle velocity in the x-direction - recorded at the sensor positions given by - sensor.mask (returned if 'u_min' is set) - sensor_data.ux_rms - rms of the time varying particle velocity in - the x-direction recorded at the sensor - positions given by sensor.mask (returned if - 'u_rms' is set) - sensor_data.ux_final - final particle velocity field in the - x-direction at all grid points within the - domain (returned if 'u_final' is set) - sensor_data.ux_max_all - maximum particle velocity in the x-direction - recorded at all grid points within the - domain (returned if 'u_max_all' is set) - sensor_data.ux_min_all - minimum particle velocity in the x-direction - recorded at all grid points within the - domain (returned if 'u_min_all' is set) - sensor_data.ux_non_staggered - - time varying particle velocity in the - x-direction recorded at the sensor positions - given by sensor.mask after shifting to the - non-staggered grid (returned if - 'u_non_staggered' is set) - sensor_data.Ix - time varying acoustic intensity in the - x-direction recorded at the sensor positions - given by sensor.mask (returned if 'I' is - set) - sensor_data.Ix_avg - average acoustic intensity in the - x-direction recorded at the sensor positions - given by sensor.mask (returned if 'I_avg' is - set) - - ABOUT: - author - Bradley Treeby and Ben Cox - date - 22nd April 2009 - last update - 25th July 2019 - - This function is part of the k-Wave Toolbox (http://www.k-wave.org) - Copyright (C) 2009-2019 Bradley Treeby and Ben Cox - - See also kspaceFirstOrderAS, kspaceFirstOrder2D, kspaceFirstOrder3D, - kWaveGrid, kspaceSecondOrder - - This file is part of k-Wave. k-Wave is free software: you can - redistribute it and/or modify it under the terms of the GNU Lesser - General Public License as published by the Free Software Foundation, - either version 3 of the License, or (at your option) any later version. - - k-Wave is distributed in the hope that it will be useful, but WITHOUT ANY - WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS - FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for - more details. - - You should have received a copy of the GNU Lesser General Public License - along with k-Wave. If not, see . - """ # ========================================================================= From 5aaa3461859c410ddfd1048e7cdf11990a692f74 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 18:10:32 +0100 Subject: [PATCH 10/29] update ipynb as test colab is working --- .../ivp_1D_simulation/ivp_1D_simulation.ipynb | 46 +++++++++---------- 1 file changed, 22 insertions(+), 24 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index 71bdb4c7..dfaed2b1 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -4,18 +4,18 @@ "cell_type": "code", "execution_count": 1, "metadata": { - "id": "b9KS_6atASoi", - "collapsed": true, - "outputId": "0e709cc8-7858-4540-91f6-21e241f2cddf", "colab": { "base_uri": "https://localhost:8080/", "height": 1000 - } + }, + "collapsed": true, + "id": "b9KS_6atASoi", + "outputId": "0e709cc8-7858-4540-91f6-21e241f2cddf" }, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Collecting git+https://github.com/djps/k-wave-python@1d-demo\n", " Cloning https://github.com/djps/k-wave-python (to revision 1d-demo) to /tmp/pip-req-build-n_4rh2qu\n", @@ -99,41 +99,48 @@ ] }, { - "output_type": "display_data", "data": { "application/vnd.colab-display-data+json": { + "id": "6af6b7c3a3304b1c9f22d9ff43cc9587", "pip_warning": { "packages": [ "matplotlib", "mpl_toolkits" ] - }, - "id": "6af6b7c3a3304b1c9f22d9ff43cc9587" + } } }, - "metadata": {} + "metadata": {}, + "output_type": "display_data" } ], "source": [ "!pip install git+https://github.com/djps/k-wave-python@1d-demo" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now import dependencies" + ] + }, { "cell_type": "code", "execution_count": 1, "metadata": { - "id": "VCcwZsQ2ASoj", - "outputId": "fe64876b-ec1b-43cd-f2ab-b781f6c0824d", "colab": { "base_uri": "https://localhost:8080/", "height": 505 - } + }, + "id": "VCcwZsQ2ASoj", + "outputId": "fe64876b-ec1b-43cd-f2ab-b781f6c0824d" }, "outputs": [ { - "output_type": "error", "ename": "ImportError", "evalue": "cannot import name 'extract_sensor_data' from 'kwave.kWaveSimulation_helper' (/usr/local/lib/python3.12/dist-packages/kwave/kWaveSimulation_helper/__init__.py)", + "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", @@ -142,16 +149,7 @@ "\u001b[0;31mImportError\u001b[0m: cannot import name 'extract_sensor_data' from 'kwave.kWaveSimulation_helper' (/usr/local/lib/python3.12/dist-packages/kwave/kWaveSimulation_helper/__init__.py)", "", "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" - ], - "errorDetails": { - "actions": [ - { - "action": "open_url", - "actionText": "Open Examples", - "url": "/notebooks/snippets/importing_libraries.ipynb" - } - ] - } + ] } ], "source": [ @@ -320,4 +318,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} From b27d7233752b57ab556641a6c3ac1a1d7066e54e Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 18:18:13 +0100 Subject: [PATCH 11/29] Created using Colab --- .../ivp_1D_simulation/ivp_1D_simulation.ipynb | 34 +++++-------------- 1 file changed, 8 insertions(+), 26 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index dfaed2b1..2c3b7a04 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { "colab": { "base_uri": "https://localhost:8080/", @@ -120,38 +120,20 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "id": "7lktiAv5XtsS" + }, "source": [ "Now import dependencies" ] }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 505 - }, - "id": "VCcwZsQ2ASoj", - "outputId": "fe64876b-ec1b-43cd-f2ab-b781f6c0824d" + "id": "VCcwZsQ2ASoj" }, - "outputs": [ - { - "ename": "ImportError", - "evalue": "cannot import name 'extract_sensor_data' from 'kwave.kWaveSimulation_helper' (/usr/local/lib/python3.12/dist-packages/kwave/kWaveSimulation_helper/__init__.py)", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipython-input-3394989936.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mksource\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkSource\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkspaceFirstOrder1D\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mkspace_first_order_1D\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msimulation_options\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSimulationOptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.12/dist-packages/kwave/kspaceFirstOrder1D.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msimulation_options\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSimulationOptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 21\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 22\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mkwave\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mkWaveSimulation_helper\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mextract_sensor_data\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 23\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 24\u001b[0m def kspace_first_order_1D(kgrid: kWaveGrid,\n", - "\u001b[0;31mImportError\u001b[0m: cannot import name 'extract_sensor_data' from 'kwave.kWaveSimulation_helper' (/usr/local/lib/python3.12/dist-packages/kwave/kWaveSimulation_helper/__init__.py)", - "", - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n" - ] - } - ], + "outputs": [], "source": [ "import numpy as np\n", "import matplotlib.pyplot as plt\n", @@ -318,4 +300,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file From 82845663c7e5897502c3eca92a0ea57176e46f15 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 18:20:32 +0100 Subject: [PATCH 12/29] Update pyproject.toml --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index a1d29f3f..72ffb94e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -38,7 +38,7 @@ dependencies = [ [project.urls] Homepage = "http://www.k-wave.org/" Documentation = "https://waltersimson.com/k-wave-python/" -Repository = "https://github.com/waltsims/k-wave-python" +Repository = "https://github.com/djps/k-wave-python" Bug-tracker = "https://github.com/waltsims/k-wave-python/issues" [project.optional-dependencies] From 3fd2e979bb971e7430058c95fbe590f641109e13 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 4 Dec 2025 18:21:00 +0100 Subject: [PATCH 13/29] Bump version from 0.4.1 to 0.4.2 --- kwave/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kwave/__init__.py b/kwave/__init__.py index 93f8aee3..1885928c 100644 --- a/kwave/__init__.py +++ b/kwave/__init__.py @@ -9,7 +9,7 @@ # Test installation with: # python3 -m pip install -i https://test.pypi.org/simple/ --extra-index-url=https://pypi.org/simple/ k-Wave-python==0.3.0 -__version__ = "0.4.1" +__version__ = "0.4.2" # Constants and Configurations URL_BASE = "https://github.com/waltsims/" From b71fbec8bf1f5f09279ad49f05c97e2700a0e01c Mon Sep 17 00:00:00 2001 From: David Sinden Date: Fri, 5 Dec 2025 10:36:26 +0100 Subject: [PATCH 14/29] Created using Colab. Refactoring --- .../ivp_1D_simulation/ivp_1D_simulation.ipynb | 177 +++++------------- 1 file changed, 44 insertions(+), 133 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index 2c3b7a04..3ba83862 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -1,122 +1,24 @@ { "cells": [ { - "cell_type": "code", - "execution_count": null, - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/", - "height": 1000 - }, - "collapsed": true, - "id": "b9KS_6atASoi", - "outputId": "0e709cc8-7858-4540-91f6-21e241f2cddf" - }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Collecting git+https://github.com/djps/k-wave-python@1d-demo\n", - " Cloning https://github.com/djps/k-wave-python (to revision 1d-demo) to /tmp/pip-req-build-n_4rh2qu\n", - " Running command git clone --filter=blob:none --quiet https://github.com/djps/k-wave-python /tmp/pip-req-build-n_4rh2qu\n", - " Running command git checkout -b 1d-demo --track origin/1d-demo\n", - " Switched to a new branch '1d-demo'\n", - " Branch '1d-demo' set up to track remote branch '1d-demo' from 'origin'.\n", - " Resolved https://github.com/djps/k-wave-python to commit e7265c5eb331031a61e64155dbc579fa706faba1\n", - " Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n", - " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n", - " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - "Collecting beartype==0.22.4 (from k-Wave-python==0.4.1)\n", - " Downloading beartype-0.22.4-py3-none-any.whl.metadata (36 kB)\n", - "Collecting deepdiff==8.6.1 (from k-Wave-python==0.4.1)\n", - " Downloading deepdiff-8.6.1-py3-none-any.whl.metadata (8.6 kB)\n", - "Collecting deprecated>=1.2.14 (from k-Wave-python==0.4.1)\n", - " Downloading deprecated-1.3.1-py2.py3-none-any.whl.metadata (5.9 kB)\n", - "Requirement already satisfied: h5py==3.15.1 in /usr/local/lib/python3.12/dist-packages (from k-Wave-python==0.4.1) (3.15.1)\n", - "Collecting jaxtyping==0.3.2 (from k-Wave-python==0.4.1)\n", - " Downloading jaxtyping-0.3.2-py3-none-any.whl.metadata (7.0 kB)\n", - "Collecting matplotlib==3.10.3 (from k-Wave-python==0.4.1)\n", - " Downloading matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (11 kB)\n", - "Requirement already satisfied: numpy<2.3.0,>=1.22.2 in /usr/local/lib/python3.12/dist-packages (from k-Wave-python==0.4.1) (2.0.2)\n", - "Collecting opencv-python==4.11.0.86 (from k-Wave-python==0.4.1)\n", - " Downloading opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (20 kB)\n", - "Collecting scipy==1.15.3 (from k-Wave-python==0.4.1)\n", - " Downloading scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (61 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.0/62.0 kB\u001b[0m \u001b[31m2.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hCollecting orderly-set<6,>=5.4.1 (from deepdiff==8.6.1->k-Wave-python==0.4.1)\n", - " Downloading orderly_set-5.5.0-py3-none-any.whl.metadata (6.6 kB)\n", - "Collecting wadler-lindig>=0.1.3 (from jaxtyping==0.3.2->k-Wave-python==0.4.1)\n", - " Downloading wadler_lindig-0.1.7-py3-none-any.whl.metadata (17 kB)\n", - "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (1.3.3)\n", - "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (0.12.1)\n", - "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (4.60.1)\n", - "Requirement already satisfied: kiwisolver>=1.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (1.4.9)\n", - "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (25.0)\n", - "Requirement already satisfied: pillow>=8 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (11.3.0)\n", - "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (3.2.5)\n", - "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.12/dist-packages (from matplotlib==3.10.3->k-Wave-python==0.4.1) (2.9.0.post0)\n", - "Requirement already satisfied: wrapt<3,>=1.10 in /usr/local/lib/python3.12/dist-packages (from deprecated>=1.2.14->k-Wave-python==0.4.1) (2.0.1)\n", - "Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.12/dist-packages (from python-dateutil>=2.7->matplotlib==3.10.3->k-Wave-python==0.4.1) (1.17.0)\n", - "Downloading beartype-0.22.4-py3-none-any.whl (1.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m23.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading deepdiff-8.6.1-py3-none-any.whl (91 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m91.4/91.4 kB\u001b[0m \u001b[31m6.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading jaxtyping-0.3.2-py3-none-any.whl (55 kB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m55.4/55.4 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (8.6 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m8.6/8.6 MB\u001b[0m \u001b[31m99.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (63.0 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m63.0/63.0 MB\u001b[0m \u001b[31m13.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (37.3 MB)\n", - "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m37.3/37.3 MB\u001b[0m \u001b[31m23.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", - "\u001b[?25hDownloading deprecated-1.3.1-py2.py3-none-any.whl (11 kB)\n", - "Downloading orderly_set-5.5.0-py3-none-any.whl (13 kB)\n", - "Downloading wadler_lindig-0.1.7-py3-none-any.whl (20 kB)\n", - "Building wheels for collected packages: k-Wave-python\n", - " Building wheel for k-Wave-python (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n", - " Created wheel for k-Wave-python: filename=k_wave_python-0.4.1-py3-none-any.whl size=205572 sha256=043823da9c43851a2c587e0472799f9d7560384b19f78970b56cbdd074380e90\n", - " Stored in directory: /tmp/pip-ephem-wheel-cache-agme1_65/wheels/34/51/59/3b5b5fca32858fe6767c81f208a87c7ae2f45bf492d1b10e45\n", - "Successfully built k-Wave-python\n", - "Installing collected packages: wadler-lindig, scipy, orderly-set, opencv-python, deprecated, beartype, matplotlib, jaxtyping, deepdiff, k-Wave-python\n", - " Attempting uninstall: scipy\n", - " Found existing installation: scipy 1.16.3\n", - " Uninstalling scipy-1.16.3:\n", - " Successfully uninstalled scipy-1.16.3\n", - " Attempting uninstall: opencv-python\n", - " Found existing installation: opencv-python 4.12.0.88\n", - " Uninstalling opencv-python-4.12.0.88:\n", - " Successfully uninstalled opencv-python-4.12.0.88\n", - " Attempting uninstall: beartype\n", - " Found existing installation: beartype 0.22.6\n", - " Uninstalling beartype-0.22.6:\n", - " Successfully uninstalled beartype-0.22.6\n", - " Attempting uninstall: matplotlib\n", - " Found existing installation: matplotlib 3.10.0\n", - " Uninstalling matplotlib-3.10.0:\n", - " Successfully uninstalled matplotlib-3.10.0\n", - "Successfully installed beartype-0.22.4 deepdiff-8.6.1 deprecated-1.3.1 jaxtyping-0.3.2 k-Wave-python-0.4.1 matplotlib-3.10.3 opencv-python-4.11.0.86 orderly-set-5.5.0 scipy-1.15.3 wadler-lindig-0.1.7\n" - ] - }, - { - "data": { - "application/vnd.colab-display-data+json": { - "id": "6af6b7c3a3304b1c9f22d9ff43cc9587", - "pip_warning": { - "packages": [ - "matplotlib", - "mpl_toolkits" - ] - } - } - }, - "metadata": {}, - "output_type": "display_data" - } + "cell_type": "markdown", + "source": [ + "First install the pacakage using the latests version." ], + "metadata": { + "id": "RQYRmOfG27FV" + } + }, + { + "cell_type": "code", "source": [ "!pip install git+https://github.com/djps/k-wave-python@1d-demo" - ] + ], + "metadata": { + "id": "5bq8_I2z29l6" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", @@ -124,7 +26,7 @@ "id": "7lktiAv5XtsS" }, "source": [ - "Now import dependencies" + "Now import dependencies, and party of library which are required." ] }, { @@ -156,7 +58,7 @@ "id": "xUqsDgayASoj" }, "source": [ - "Set parameters" + "Set parameters to build the grid" ] }, { @@ -184,14 +86,12 @@ "density[np.round(4 * Nx / 5).astype(int) - 1:] = 1500.0 # [kg/m^3]\n", "medium = kWaveMedium(sound_speed=sound_speed, density=density)\n", "\n", - "# Create the source object\n", - "source = kSource()\n", + "# set the simulation time to capture the reflections\n", + "c_max = np.max(medium.sound_speed.flatten()) # [m/s]\n", + "t_end = 2.5 * kgrid.x_size / c_max # [s]\n", "\n", - "# create initial pressure distribution using a smoothly shaped sinusoid\n", - "x_pos: int = 280 # [grid points]\n", - "width: int = 100 # [grid points]\n", - "height: int = 1 # [au]\n", - "p0 = np.linspace(0.0, 2.0 * np.pi, width + 1)" + "# define the time array\n", + "kgrid.makeTime(c_max, t_end=t_end)" ] }, { @@ -200,7 +100,7 @@ "id": "misFfiLaASok" }, "source": [ - "Set grid" + "Build source and sensor" ] }, { @@ -232,21 +132,32 @@ "# this hack is needed to ensure that the sensor is in [1,2] dimensions\n", "mask = np.array([-10e-3, 10e-3]) # [mm]\n", "mask = mask[:, np.newaxis].T\n", - "sensor.mask = mask\n", - "\n", - "# set the simulation time to capture the reflections\n", - "c_max = np.max(medium.sound_speed.flatten()) # [m/s]\n", - "t_end = 2.5 * kgrid.x_size / c_max # [s]\n", - "\n", - "# define the time array\n", - "kgrid.makeTime(c_max, t_end=t_end)\n", - "\n", + "sensor.mask = mask" + ] + }, + { + "cell_type": "markdown", + "source": [ + "Set options and run simulation" + ], + "metadata": { + "id": "ZSmPpm6X3fm5" + } + }, + { + "cell_type": "code", + "source": [ "# define the simulation options\n", "simulation_options = SimulationOptions(data_cast=\"off\", save_to_disk=False)\n", "\n", "# run the simulation\n", "sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options)" - ] + ], + "metadata": { + "id": "BiFT0YEE3fyE" + }, + "execution_count": null, + "outputs": [] }, { "cell_type": "markdown", From 1d42993a5f1a7d5290b0ff16a76c2915471a21dc Mon Sep 17 00:00:00 2001 From: David Sinden Date: Wed, 7 Jan 2026 13:03:50 +0100 Subject: [PATCH 15/29] Add create_storage_variables and improve sensor data handling * Introduces the `create_storage_variables` to define the creation of sensor data storage structure in kWaveSimulation which is passed to the simulator * updates `kWaveSimulation` to to use `create_storage_variables` also adds setting for nonlinearity * fixes shape handling in `ksource`, * improves 1D simulation logic in kspaceFirstOrder1D, * addresses minor issues in filters and signals utilities. Also updates dependencies to include tqdm. --- kwave/kWaveSimulation.py | 96 ++- kwave/kWaveSimulation_helper/__init__.py | 4 +- .../create_absorption_variables.py | 2 + .../create_storage_variables.py | 591 ++++++++++++++++++ .../extract_sensor_data.py | 5 - kwave/ksource.py | 3 +- kwave/kspaceFirstOrder1D.py | 122 ++-- kwave/utils/filters.py | 4 + kwave/utils/signals.py | 6 +- pyproject.toml | 3 +- 10 files changed, 736 insertions(+), 100 deletions(-) create mode 100644 kwave/kWaveSimulation_helper/create_storage_variables.py diff --git a/kwave/kWaveSimulation.py b/kwave/kWaveSimulation.py index 8cd31e03..e7ab3b0a 100644 --- a/kwave/kWaveSimulation.py +++ b/kwave/kWaveSimulation.py @@ -2,7 +2,9 @@ import warnings from dataclasses import dataclass +import copy import numpy as np + from deprecated import deprecated from kwave.data import Vector @@ -13,6 +15,7 @@ from kwave.ktransducer import NotATransducer from kwave.kWaveSimulation_helper import ( create_absorption_variables, + create_storage_variables, display_simulation_params, expand_grid_matrices, scale_source_terms_func, @@ -72,13 +75,13 @@ def __init__( self.sensor = kSensor(mask=np.ones((Nx, Ny, max(1, Nz))), record=["p_final"]) # set time reversal flag - self.userarg_time_rev = True + self.time_rev = True self.record = Recorder() self.record.p = False else: # set time reversal flag - self.userarg_time_rev = False + # self.time_rev = False #: Whether sensor.mask should be re-ordered. #: True if sensor.mask is Cartesian with nearest neighbour interpolation which is calculated using a binary mask @@ -142,6 +145,14 @@ def __init__( self.c0 = None #: Alias to medium.sound_speed self.index_data_type = None + @property + def is_nonlinear(self): + """ + Returns: + Set simulation to nonlinear if medium is nonlinear. + """ + return self.medium.is_nonlinear() + @property def equation_of_state(self): """ @@ -154,7 +165,7 @@ def equation_of_state(self): else: return "absorbing" else: - return "loseless" + return "lossless" @property def use_sensor(self): @@ -200,7 +211,7 @@ def nonuniform_grid(self): def time_rev(self) -> bool: if self.sensor and not isinstance(self.sensor, NotATransducer): return not self.options.simulation_type.is_elastic_simulation() and self.sensor.time_reversal_boundary_data is not None - return self.userarg_time_rev + return self.time_rev @property @deprecated(version="0.4.1", reason="Use TimeReversal class instead") @@ -515,6 +526,80 @@ def input_checking(self, calling_func_name) -> None: self.create_absorption_vars() self.assign_pseudonyms(self.medium, self.kgrid) self.scale_source_terms(opt.scale_source_terms) + + # move all this inside create_storage_variables? + # a copy of record is passed through, and use to update the + if is_elastic_code: + record_old = copy.deepcopy(self.record) + if not self.blank_sensor: + sensor_x = self.sensor.mask[0, :] + else: + sensor_x = None + + values = dotdict({"sensor_x": sensor_x, + "sensor_mask_index": self.sensor_mask_index, + "record": record_old, + "sensor_data_buffer_size": self.s_source_pos_index}) + + if self.record.u_split_field: + self.record_u_split_field = self.record.u_split_field + + flags = dotdict({"use_sensor": self.use_sensor, + "blank_sensor": self.blank_sensor, + "binary_sensor_mask": self.binary_sensor_mask, + "record_u_split_field": self.record.u_split_field, + "time_rev": self.time_rev, + "reorder_data": self.reorder_data, + "transducer_receive_elevation_focus": self.transducer_receive_elevation_focus, + "axisymmetric": opt.simulation_type.is_axisymmetric(), + "transducer_sensor": self.transducer_sensor, + "use_cuboid_corners": self.cuboid_corners}) + + # this creates the storage variables by determining the spatial locations of the data which is in self.record. + flags, self.record, self.sensor_data, self.num_recorded_time_points = create_storage_variables(self.kgrid, + self.sensor, + opt, + values, + flags, + self.record) + else: + record_old = copy.deepcopy(self.record) + if not self.blank_sensor: + if k_dim == 1: + # this has been declared in check_sensor + sensor_x = self.sensor_x + else: + sensor_x = self.sensor.mask[0, :] + else: + sensor_x = None + + values = dotdict({"sensor_x": sensor_x, + "sensor_mask_index": self.sensor_mask_index, + "record": record_old, + "sensor_data_buffer_size": self.s_source_pos_index}) + + if self.record.u_split_field: + self.record_u_split_field = self.record.u_split_field + + flags = dotdict({"use_sensor": self.use_sensor, + "blank_sensor": self.blank_sensor, + "binary_sensor_mask": self.binary_sensor_mask, + "record_u_split_field": self.record.u_split_field, + "time_rev": self.time_rev, + "reorder_data": self.reorder_data, + "transducer_receive_elevation_focus": self.transducer_receive_elevation_focus, + "axisymmetric": opt.simulation_type.is_axisymmetric(), + "transducer_sensor": self.transducer_sensor, + "use_cuboid_corners": self.cuboid_corners}) + + # this creates the storage variables by determining the spatial locations of the data which is in self.record. + flags, self.record, self.sensor_data, self.num_recorded_time_points = create_storage_variables(self.kgrid, + self.sensor, + opt, + values, + flags, + self.record) + self.create_pml_indices( kgrid_dim=self.kgrid.dim, kgrid_N=Vector(self.kgrid.N), @@ -762,12 +847,11 @@ def check_sensor(self, kgrid_dim) -> None: # align sensor data as a column vector to be the # same as kgrid.x_vec so that calls to interp1 # return data in the correct dimension - self.sensor_x = np.reshape((self.sensor.mask, (-1, 1))) + self.sensor_x = np.reshape(self.sensor.mask, (-1, 1)) # add sensor_x to the record structure for use with # the _extractSensorData subfunction self.record.sensor_x = self.sensor_x - "record.sensor_x = sensor_x;" elif kgrid_dim == 2: self.sensor_x = self.sensor.mask[0, :] diff --git a/kwave/kWaveSimulation_helper/__init__.py b/kwave/kWaveSimulation_helper/__init__.py index cdf3e71c..899c46e0 100644 --- a/kwave/kWaveSimulation_helper/__init__.py +++ b/kwave/kWaveSimulation_helper/__init__.py @@ -7,6 +7,8 @@ from kwave.kWaveSimulation_helper.scale_source_terms_func import scale_source_terms_func from kwave.kWaveSimulation_helper.set_sound_speed_ref import set_sound_speed_ref -__all__ = ["create_absorption_variables", "display_simulation_params", "expand_grid_matrices", +from kwave.kWaveSimulation_helper.create_storage_variables import create_storage_variables + +__all__ = ["create_absorption_variables", "create_storage_variables", "display_simulation_params", "expand_grid_matrices", "extract_sensor_data", "retract_transducer_grid_size", "save_to_disk_func", "scale_source_terms_func", "set_sound_speed_ref"] \ No newline at end of file diff --git a/kwave/kWaveSimulation_helper/create_absorption_variables.py b/kwave/kWaveSimulation_helper/create_absorption_variables.py index 56ae2d38..9434f827 100644 --- a/kwave/kWaveSimulation_helper/create_absorption_variables.py +++ b/kwave/kWaveSimulation_helper/create_absorption_variables.py @@ -14,6 +14,8 @@ def create_absorption_variables(kgrid: kWaveGrid, medium: kWaveMedium, equation_ return create_absorbing_medium_variables(kgrid.k, medium) elif equation_of_state == "stokes": return create_stokes_medium_variables(medium) + elif equation_of_state == "lossless": + return None, None, None, None else: raise NotImplementedError diff --git a/kwave/kWaveSimulation_helper/create_storage_variables.py b/kwave/kWaveSimulation_helper/create_storage_variables.py new file mode 100644 index 00000000..ee8afcd6 --- /dev/null +++ b/kwave/kWaveSimulation_helper/create_storage_variables.py @@ -0,0 +1,591 @@ +import numpy as np +from numpy.fft import ifftshift +from copy import deepcopy +from typing import Union, List + +from kwave.data import Vector +from kwave.kgrid import kWaveGrid +from kwave.recorder import Recorder +from kwave.options.simulation_options import SimulationOptions +from kwave.utils.dotdictionary import dotdict + + +from scipy.spatial import Delaunay + + +def gridDataFast2D(x, y, xi, yi): + """ + Delauney triangulation in 2D + """ + x = np.ravel(x) + y = np.ravel(y) + xi = np.ravel(xi) + yi = np.ravel(yi) + + points = np.squeeze(np.dstack((x, y))) + interpolation_points = np.squeeze(np.dstack((xi, yi))) + + tri = Delaunay(points) + + indices = tri.find_simplex(interpolation_points) + + bc = tri.transform[indices, :2].dot(np.transpose(tri.points[indices, :] - tri.transform[indices, 2])) + + return tri.points[indices, :], bc + + +def gridDataFast3D(x, y, z, xi, yi, zi): + """ + Delauney triangulation in 3D + """ + x = np.ravel(x) + y = np.ravel(y) + z = np.ravel(z) + xi = np.ravel(xi) + yi = np.ravel(yi) + zi = np.ravel(zi) + + grid_points = np.squeeze(np.dstack((x, y, z))) + interpolation_points = np.squeeze(np.dstack((xi, yi, zi))) + + tri = Delaunay(grid_points) + + simplex_indices = tri.find_simplex(interpolation_points) + + # barycentric coordinates + bc = tri.transform[simplex_indices, :2].dot(np.transpose(tri.points[simplex_indices, :] - tri.transform[simplex_indices, 2])) + + return tri.points[simplex_indices, :], bc + + +class OutputSensor(object): + """ + Class which holds information about which spatial locations are used to save data + """ + flags = None + x_shift_neg = None + p = None + + +def create_storage_variables(kgrid: kWaveGrid, sensor, opt: SimulationOptions, + values: dotdict, flags: dotdict, record: Recorder): + """ + Creates the storage variable sensor + """ + + # ========================================================================= + # PREPARE DATA MASKS AND STORAGE VARIABLES + # ========================================================================= + + sensor_data = OutputSensor() + + flags = set_flags(flags, values.sensor_x, sensor.mask, opt.cartesian_interp) + + # preallocate output variables + if flags.time_rev: + return flags + + num_sensor_points = get_num_of_sensor_points(flags.blank_sensor, + flags.binary_sensor_mask, + kgrid.k, + values.sensor_mask_index, + values.sensor_x) + + num_recorded_time_points, _ = \ + get_num_recorded_time_points(kgrid.dim, kgrid.Nt, opt.stream_to_disk, sensor.record_start_index) + + record = create_shift_operators(record, values.record, kgrid, opt.use_sg) + + create_normalized_wavenumber_vectors(record, kgrid, flags.record_u_split_field) + + pml_size = [opt.pml_x_size, opt.pml_y_size, opt.pml_z_size] + pml_size = Vector(pml_size[:kgrid.dim]) + all_vars_size = calculate_all_vars_size(kgrid, opt.pml_inside, pml_size) + + sensor_data = create_sensor_variables(values.record, kgrid, num_sensor_points, num_recorded_time_points, + all_vars_size, values.sensor_mask_index, flags.use_cuboid_corners) + + create_transducer_buffer(flags.transducer_sensor, values.transducer_receive_elevation_focus, sensor, + num_sensor_points, num_recorded_time_points, values.sensor_data_buffer_size, + flags, sensor_data) + + record = compute_triangulation_points(flags, kgrid, record, sensor.mask) + + return flags, record, sensor_data, num_recorded_time_points + + +def set_flags(flags: dotdict, sensor_x, sensor_mask, is_cartesian_interp): + """ + check sensor mask based on the Cartesian interpolation setting + """ + + if not flags.binary_sensor_mask and is_cartesian_interp == 'nearest': + + # extract the data using the binary sensor mask created in + # input_checking, but switch on Cartesian reorder flag so that the + # final data is returned in the correct order (not in time + # reversal mode). + flags.binary_sensor_mask = True + if not flags.time_rev: + flags.reorder_data = True + + # check if any duplicate points have been discarded in the + # conversion from a Cartesian to binary mask + num_discarded_points = len(sensor_x) - sensor_mask.sum() + if num_discarded_points != 0: + print(f' WARNING: {num_discarded_points} duplicated sensor points discarded (nearest neighbour interpolation)') + + return flags + + +def get_num_of_sensor_points(is_blank_sensor, is_binary_sensor_mask, kgrid_k, sensor_mask_index, sensor_x): + """ + Returns the number of sensor points for a given set of sensor parameters. + + Args: + is_blank_sensor (bool): Whether the sensor is blank or not. + is_binary_sensor_mask (bool): Whether the sensor mask is binary or not. + kgrid_k (ndarray): An array of k-values for the k-Wave grid. + sensor_mask_index (list): List of sensor mask indices. + sensor_x (list): List of sensor x-coordinates. + + Returns: + int: The number of sensor points. + """ + if is_blank_sensor: + num_sensor_points = kgrid_k.size + elif is_binary_sensor_mask: + num_sensor_points = len(sensor_mask_index) + else: + num_sensor_points = len(sensor_x) + return num_sensor_points + + +def get_num_recorded_time_points(kgrid_dim, Nt, stream_to_disk, record_start_index): + """ + calculate the number of time points that are stored + - if streaming data to disk, reduce to the size of the + sensor_data matrix based on the value of self.options.stream_to_disk + - if a user input for sensor.record_start_index is given, reduce + the size of the sensor_data matrix based on the value given + Args: + kgrid_dim: + Nt: + stream_to_disk: + record_start_index: + + Returns: + + """ + if kgrid_dim == 3 and stream_to_disk: + + # set the number of points + num_recorded_time_points = stream_to_disk + + # initialise the file index variable + stream_data_index = 1 + + else: + num_recorded_time_points = Nt - record_start_index + 1 + stream_data_index = None # ??? + + return num_recorded_time_points, stream_data_index + + +def create_shift_operators(record: Recorder, record_old: Recorder, kgrid: kWaveGrid, is_use_sg: bool): + """ + create shift operators used for calculating the components of the + particle velocity field on the non-staggered grids (these are used + for both binary and cartesian sensor masks) + """ + + if (record_old.u_non_staggered or record_old.u_split_field or record_old.I or record_old.I_avg): + if is_use_sg: + if kgrid.dim == 1: + record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2)) + elif kgrid.dim == 2: + record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2)) + record.y_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.y * kgrid.dy / 2)).T + elif kgrid.dim == 3: + record.x_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.x * kgrid.dx / 2)) + record.y_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.y * kgrid.dy / 2)) + record.z_shift_neg = ifftshift(np.exp(-1j * kgrid.k_vec.z * kgrid.dz / 2)) + + record.x_shift_neg = np.expand_dims(record.x_shift_neg, axis=-1) + + record.y_shift_neg = np.expand_dims(record.y_shift_neg, axis=0) + + record.z_shift_neg = np.squeeze(record.z_shift_neg) + record.z_shift_neg = np.expand_dims(record.z_shift_neg, axis=0) + record.z_shift_neg = np.expand_dims(record.z_shift_neg, axis=0) + + else: + if kgrid.dim == 1: + record.x_shift_neg = 1 + elif kgrid.dim == 2: + record.x_shift_neg = 1 + record.y_shift_neg = 1 + elif kgrid.dim == 3: + record.x_shift_neg = 1 + record.y_shift_neg = 1 + record.z_shift_neg = 1 + return record + + +def create_normalized_wavenumber_vectors(record: Recorder, kgrid: kWaveGrid, is_record_u_split_field): + """ + create normalised wavenumber vectors for k-space dyadics used to + split the particule velocity into compressional and shear components + """ + if not is_record_u_split_field: + return record + + # x-dimension + record.kx_norm = kgrid.kx / kgrid.k + record.kx_norm[kgrid.k == 0] = 0 + record.kx_norm = ifftshift(record.kx_norm) + + # y-dimension + record.ky_norm = kgrid.ky / kgrid.k + record.ky_norm[kgrid.k == 0] = 0 + record.ky_norm = ifftshift(record.ky_norm) + + # z-dimension + if kgrid.dim == 3: + record.kz_norm = kgrid.kz / kgrid.k + record.kz_norm[kgrid.k == 0] = 0 + record.kz_norm = ifftshift(record.kz_norm) + + return record + + +def create_sensor_variables(record_old: Recorder, kgrid, num_sensor_points, num_recorded_time_points, + all_vars_size, sensor_mask_index, use_cuboid_corners) -> Union[dotdict, List[dotdict]]: + """ + create storage and scaling variables - all variables are saved as fields of + a container called sensor_data. If cuboid corners are used this is a list, else a dictionary-like container + """ + + if use_cuboid_corners: + + # as a list + sensor_data = [] + + # get number of doctdicts in the list for each set of cuboid corners + n_cuboids: int = np.shape(record_old.cuboid_corners_list)[1] + + # for each set of cuboid corners + for cuboid_index in np.arange(n_cuboids, dtype=int): + + # add an entry to the list + sensor_data.append(dotdict()) + + # get size of cuboid for indexing regions of computational grid + if kgrid.dim == 1: + cuboid_size_x = [record_old.cuboid_corners_list[1, cuboid_index] - record_old.cuboid_corners_list[0, cuboid_index] + 1, 1] + elif kgrid.dim == 2: + cuboid_size_x = [record_old.cuboid_corners_list[2, cuboid_index] - record_old.cuboid_corners_list[0, cuboid_index] + 1, + record_old.cuboid_corners_list[3, cuboid_index] - record_old.cuboid_corners_list[1, cuboid_index] + 1] + elif kgrid.dim == 3: + cuboid_size_x = [record_old.cuboid_corners_list[3, cuboid_index] - record_old.cuboid_corners_list[0, cuboid_index] + 1, + record_old.cuboid_corners_list[4, cuboid_index] - record_old.cuboid_corners_list[1, cuboid_index] + 1, + record_old.cuboid_corners_list[5, cuboid_index] - record_old.cuboid_corners_list[2, cuboid_index] + 1] + + cuboid_size_xt = deepcopy(cuboid_size_x) + cuboid_size_xt.append(num_recorded_time_points) + + # time history of the acoustic pressure + if record_old.p or record_old.I or record_old.I_avg: + sensor_data[cuboid_index].p = np.zeros(cuboid_size_xt) + + # maximum pressure + if record_old.p_max: + sensor_data[cuboid_index].p_max = np.zeros(cuboid_size_x) + + # minimum pressure + if record_old.p_min: + sensor_data[cuboid_index].p_min = np.zeros(cuboid_size_x) + + # rms pressure + if record_old.p_rms: + sensor_data[cuboid_index].p_rms = np.zeros(cuboid_size_x) + + # time history of the acoustic particle velocity + if record_old.u: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data[cuboid_index].ux = np.zeros(cuboid_size_xt) + elif kgrid.dim == 2: + sensor_data[cuboid_index].ux = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy = np.zeros(cuboid_size_xt) + elif kgrid.dim == 3: + sensor_data[cuboid_index].ux = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uz = np.zeros(cuboid_size_xt) + + # store the time history of the particle velocity on staggered grid + if record_old.u_non_staggered or record_old.I or record_old.I_avg: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data[cuboid_index].ux_non_staggered = np.zeros(cuboid_size_xt) + elif kgrid.dim == 2: + sensor_data[cuboid_index].ux_non_staggered = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy_non_staggered = np.zeros(cuboid_size_xt) + elif kgrid.dim == 3: + sensor_data[cuboid_index].ux_non_staggered = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uy_non_staggered = np.zeros(cuboid_size_xt) + sensor_data[cuboid_index].uz_non_staggered = np.zeros(cuboid_size_xt) + + # time history of the acoustic particle velocity split into compressional and shear components + if record_old.u_split_field: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 2: + sensor_data[cuboid_index].ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 3: + sensor_data[cuboid_index].ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uz_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data[cuboid_index].uz_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + + else: + + # allocate empty sensor + sensor_data = dotdict() + + # if only p is being stored (i.e., if no user input is given for + # sensor.record), then sensor_data.p is copied to sensor_data at the + # end of the simulation + + # time history of the acoustic pressure + if record_old.p or record_old.I or record_old.I_avg: + sensor_data.p = np.zeros([num_sensor_points, num_recorded_time_points]) + + # maximum pressure + if record_old.p_max: + sensor_data.p_max = np.zeros([num_sensor_points,]) + + # minimum pressure + if record_old.p_min: + sensor_data.p_min = np.zeros([num_sensor_points,]) + + # rms pressure + if record_old.p_rms: + sensor_data.p_rms = np.zeros([num_sensor_points,]) + + # maximum pressure over all grid points + if record_old.p_max_all: + sensor_data.p_max_all = np.zeros(all_vars_size) + + # minimum pressure over all grid points + if record_old.p_min_all: + sensor_data.p_min_all = np.zeros(all_vars_size) + + # time history of the acoustic particle velocity + if record_old.u: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points]) + elif kgrid.dim == 2: + sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy = np.zeros([num_sensor_points, num_recorded_time_points]) + elif kgrid.dim == 3: + sensor_data.ux = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz = np.zeros([num_sensor_points, num_recorded_time_points]) + + # maximum particle velocity + if record_old.u_max: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_max = np.zeros([num_sensor_points,]) + if kgrid.dim == 2: + sensor_data.ux_max = np.zeros([num_sensor_points,]) + sensor_data.uy_max = np.zeros([num_sensor_points,]) + if kgrid.dim == 3: + sensor_data.ux_max = np.zeros([num_sensor_points,]) + sensor_data.uy_max = np.zeros([num_sensor_points,]) + sensor_data.uz_max = np.zeros([num_sensor_points,]) + + # minimum particle velocity + if record_old.u_min: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_min = np.zeros([num_sensor_points,]) + if kgrid.dim == 2: + sensor_data.ux_min = np.zeros([num_sensor_points,]) + sensor_data.uy_min = np.zeros([num_sensor_points,]) + if kgrid.dim == 3: + sensor_data.ux_min = np.zeros([num_sensor_points,]) + sensor_data.uy_min = np.zeros([num_sensor_points,]) + sensor_data.uz_min = np.zeros([num_sensor_points,]) + + # rms particle velocity + if record_old.u_rms: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_rms = np.zeros([num_sensor_points,]) + if kgrid.dim == 2: + sensor_data.ux_rms = np.zeros([num_sensor_points,]) + sensor_data.uy_rms = np.zeros([num_sensor_points,]) + if kgrid.dim == 3: + sensor_data.ux_rms = np.zeros([num_sensor_points,]) + sensor_data.uy_rms = np.zeros([num_sensor_points,]) + sensor_data.uz_rms = np.zeros([num_sensor_points,]) + + # maximum particle velocity over all grid points + if record_old.u_max_all: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_max_all = np.zeros(all_vars_size) + if kgrid.dim == 2: + sensor_data.ux_max_all = np.zeros(all_vars_size) + sensor_data.uy_max_all = np.zeros(all_vars_size) + if kgrid.dim == 3: + sensor_data.ux_max_all = np.zeros(all_vars_size) + sensor_data.uy_max_all = np.zeros(all_vars_size) + sensor_data.uz_max_all = np.zeros(all_vars_size) + + # minimum particle velocity over all grid points + if record_old.u_min_all: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_min_all = np.zeros(all_vars_size) + if kgrid.dim == 2: + sensor_data.ux_min_all = np.zeros(all_vars_size) + sensor_data.uy_min_all = np.zeros(all_vars_size) + if kgrid.dim == 3: + sensor_data.ux_min_all = np.zeros(all_vars_size) + sensor_data.uy_min_all = np.zeros(all_vars_size) + sensor_data.uz_min_all = np.zeros(all_vars_size) + + # time history of the acoustic particle velocity on the non-staggered grid points + if record_old.u_non_staggered or record_old.I or record_old.I_avg: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 1: + sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 2: + sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 3: + sensor_data.ux_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz_non_staggered = np.zeros([num_sensor_points, num_recorded_time_points]) + + # time history of the acoustic particle velocity split into compressional and shear components + if record_old.u_split_field: + # pre-allocate the velocity fields based on the number of dimensions in the simulation + if kgrid.dim == 2: + sensor_data.ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + if kgrid.dim == 3: + sensor_data.ux_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.ux_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uy_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz_split_p = np.zeros([num_sensor_points, num_recorded_time_points]) + sensor_data.uz_split_s = np.zeros([num_sensor_points, num_recorded_time_points]) + + # if use_cuboid_corners: + # info = "using cuboid_corners (create storage variables)," + str(len(sensor_data)) + ", " + str(np.shape(sensor_data[0].p)) + # else: + # info = "binary_mask (create storage variables), ", np.shape(sensor_data.p) + + return sensor_data + + +def create_transducer_buffer(is_transducer_sensor, is_transducer_receive_elevation_focus, sensor, + num_sensor_points, num_recorded_time_points, sensor_data_buffer_size, + flags, sensor_data): + # object of the kWaveTransducer class is being used as a sensor + + if is_transducer_sensor: + if is_transducer_receive_elevation_focus: + + # if there is elevation focusing, a buffer is + # needed to store a short time history at each + # sensor point before averaging + # ??? + sensor_data_buffer_size = sensor.elevation_beamforming_delays.max() + 1 + if sensor_data_buffer_size > 1: + sensor_data_buffer = np.zeros([num_sensor_points, sensor_data_buffer_size]) # noqa: F841 + else: + del sensor_data_buffer_size + flags.transducer_receive_elevation_focus = False + + # the grid points can be summed on the fly and so the + # sensor is the size of the number of active elements + sensor_data.transducer = np.zeros([int(sensor.number_active_elements), num_recorded_time_points]) + else: + pass + + +def compute_triangulation_points(flags, kgrid, record, mask): + """ + precomputate the triangulation points if a Cartesian sensor mask is used + with linear interpolation (tri and bc are the Delaunay TRIangulation and + Barycentric Coordinates) + """ + + if not flags.binary_sensor_mask: + + if kgrid.dim == 1: + + # assign pseudonym for Cartesain grid points in 1D (this is later used for data casting) + record.grid_x = kgrid.x_vec + + else: + + if kgrid.dim == 1: + # align sensor data as a column vector to be the same as kgrid.x_vec + # so that calls to interp return data in the correct dimension + sensor_x = np.reshape((mask, (-1, 1))) + elif kgrid.dim == 2: + sensor_x = mask[0, :] + sensor_y = mask[1, :] + elif kgrid.dim == 3: + sensor_x = mask[0, :] + sensor_y = mask[1, :] + sensor_z = mask[2, :] + + # update command line status + print(' calculating Delaunay triangulation...') + + # compute triangulation + if kgrid.dim == 2: + if flags.axisymmetric: + record.tri, record.bc = gridDataFast2D(kgrid.x, kgrid.y - kgrid.y_vec.min(), sensor_x, sensor_y) + else: + record.tri, record.bc = gridDataFast2D(kgrid.x, kgrid.y, sensor_x, sensor_y) + elif kgrid.dim == 3: + record.tri, record.bc = gridDataFast3D(kgrid.x, kgrid.y, kgrid.z, sensor_x, sensor_y, sensor_z) + + print("done") + + return record + + +def calculate_all_vars_size(kgrid, is_pml_inside, pml_size): + """ + calculate the size of the _all and _final output variables - if the + PML is set to be outside the grid, these will be the same size as the + user input, rather than the expanded grid + """ + if is_pml_inside: + all_vars_size = kgrid.k.shape + else: + if kgrid.dim == 1: + all_vars_size = [kgrid.Nx - 2 * pml_size.x, 1] + elif kgrid.dim == 2: + all_vars_size = [kgrid.Nx - 2 * pml_size.x, kgrid.Ny - 2 * pml_size.y] + elif kgrid.dim == 3: + all_vars_size = [kgrid.Nx - 2 * pml_size.x, kgrid.Ny - 2 * pml_size.y, kgrid.Nz - 2 * pml_size.z] + else: + raise NotImplementedError + return all_vars_size \ No newline at end of file diff --git a/kwave/kWaveSimulation_helper/extract_sensor_data.py b/kwave/kWaveSimulation_helper/extract_sensor_data.py index 4442141b..fc727d75 100644 --- a/kwave/kWaveSimulation_helper/extract_sensor_data.py +++ b/kwave/kWaveSimulation_helper/extract_sensor_data.py @@ -421,11 +421,6 @@ def extract_sensor_data(dim: int, sensor_data, file_index, sensor_mask_index, # store the time history of the acoustic pressure if flags.record_p or flags.record_I or flags.record_I_avg: if dim == 1: - # i = np.argmin(np.abs(record.grid_x - record.sensor_x[0])).astype(int) - # j = np.argmin(np.abs(record.grid_x - record.sensor_x[1])).astype(int) - # print("THIS:", p.shape, file_index, i, j, record.sensor_x, record.sensor_x[0], record.sensor_x[1], record.grid_x[i], record.grid_x[j], - # p[i], p[j], - # record.grid_x[0], record.grid_x[-1], p.max()) sensor_data.p[:, file_index] = np.interp(np.squeeze(record.sensor_x), np.squeeze(record.grid_x), p) else: sensor_data.p[:, file_index] = np.sum(p[record.tri] * record.bc, axis=1) diff --git a/kwave/ksource.py b/kwave/ksource.py index 95fd958e..bd6014bf 100644 --- a/kwave/ksource.py +++ b/kwave/ksource.py @@ -76,7 +76,8 @@ def validate(self, kgrid: kWaveGrid) -> None: None """ if self.p0 is not None: - if self.p0.shape != kgrid.k.shape: + if self.p0.shape != np.squeeze(kgrid.k).shape: + # throw an error if p0 is not the correct size raise ValueError("source.p0 must be the same size as the computational grid.") diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index cc61985e..020e8756 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -178,7 +178,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # this will create the sensor_data dotdict k_sim.input_checking("kspaceFirstOrder1D") - # aliases from simulation + # aliases from kWaveSimulation class sensor_data = k_sim.sensor_data options = k_sim.options record = k_sim.record @@ -189,8 +189,8 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # interpolate the values of the density at the staggered grid locations # where sgx = (x + dx/2) - rho0 = k_sim.rho0 - m_rho0: int = np.squeeze(rho0).ndim + rho0 = np.squeeze(k_sim.rho0) + m_rho0: int = rho0.ndim if (m_rho0 > 0 and options.use_sg): @@ -209,10 +209,10 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # invert rho0 so it doesn't have to be done each time step rho0_sgx_inv = 1.0 / rho0_sgx - rho0_sgx_inv = rho0_sgx_inv[:, np.newaxis] + # rho0_sgx_inv = rho0_sgx_inv[:, np.newaxis] # clear unused variables - # del rho0_sgx + del rho0_sgx # ========================================================================= # PREPARE DERIVATIVE AND PML OPERATORS @@ -233,19 +233,23 @@ def kspace_first_order_1D(kgrid: kWaveGrid, c0 = medium.sound_speed # get the PML operators based on the reference sound speed and PML settings - pml_x = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, False, 0).T - pml_x_sgx = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, True, 0).T + pml_x = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, False, 0) + pml_x_sgx = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, True, 0) + + pml_x = np.squeeze(pml_x) + pml_x_sgx = np.squeeze(pml_x_sgx) # define the k-space derivative operator ddx_k = scipy.fft.ifftshift(1j * kx_vec) - ddx_k = ddx_k[:, np.newaxis] + + # ddx_k = ddx_k[:, np.newaxis] # define the staggered grid shift operators (the option options.use_sg exists for debugging) if options.use_sg: ddx_k_shift_pos = scipy.fft.ifftshift( np.exp( 1j * kx_vec * dx / 2.0)) ddx_k_shift_neg = scipy.fft.ifftshift( np.exp(-1j * kx_vec * dx / 2.0)) - ddx_k_shift_pos = ddx_k_shift_pos[:, np.newaxis] - ddx_k_shift_neg = ddx_k_shift_neg[:, np.newaxis] + #ddx_k_shift_pos = ddx_k_shift_pos[:, np.newaxis] + #ddx_k_shift_neg = ddx_k_shift_neg[:, np.newaxis] else: ddx_k_shift_pos = 1.0 ddx_k_shift_neg = 1.0 @@ -254,11 +258,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # create k-space operator (the option options.use_kspace exists for debugging) if options.use_kspace: kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * kgrid.dt / 2.0)) - kappa = kappa[:, np.newaxis] + kappa = np.squeeze(kappa) + # kappa = kappa[:, np.newaxis] if (hasattr(options, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ (hasattr(options, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) - source_kappa = source_kappa[:, np.newaxis] + #source_kappa = source_kappa[:, np.newaxis] else: kappa = 1.0 source_kappa = 1.0 @@ -275,7 +280,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, else: myType = np.double - grid_shape = (Nx, 1) + grid_shape = (Nx, ) # preallocate the loop variables p = np.zeros(grid_shape, dtype=myType) @@ -290,22 +295,9 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # ========================================================================= # setup the time index variable - if (not options.time_rev): - index_start: int = 0 - index_step: int = 1 - index_end: int = Nt - else: - # throw error for unsupported feature - raise TypeError('Time reversal using sensor.time_reversal_boundary_data is not currently supported.') - - # reverse the order of the input data - sensor.time_reversal_boundary_data = np.fliplr(sensor.time_reversal_boundary_data) - index_start = 0 - index_step = 0 - - # stop one time point before the end so the last points are not - # propagated - index_end = kgrid.Nt - 1 + index_start: int = 0 + index_step: int = 1 + index_end: int = Nt # These should be zero indexed if hasattr(k_sim, 's_source_sig_index') and k_sim.s_source_pos_index is not None: @@ -326,9 +318,9 @@ def kspace_first_order_1D(kgrid: kWaveGrid, if hasattr(k_sim, 'p_source_sig_index') and k_sim.p_source_sig_index is not None: k_sim.p_source_sig_index = np.squeeze(k_sim.p_source_sig_index) - int(1) - # # ========================================================================= - # # PREPARE VISUALISATIONS - # # ========================================================================= + # ========================================================================= + # PREPARE VISUALISATIONS + # ========================================================================= # # pre-compute suitable axes scaling factor # if options.plot_layout or options.plot_sim @@ -364,7 +356,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, for t_index in tqdm(np.arange(index_start, index_end, index_step, dtype=int)): # calculate ux at the next time step using dp/dx at the current time step - if not options.nonuniform_grid and not options.use_finite_difference: + if not k_sim.nonuniform_grid and not options.use_finite_difference: # calculate gradient using the k-space method on a regular grid ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - @@ -373,18 +365,14 @@ def kspace_first_order_1D(kgrid: kWaveGrid, elif options.use_finite_difference: match options.use_finite_difference: case 2: - # calculate gradient using second-order accurate finite # difference scheme (including half step forward) - dpdx = (np.append(p[1:], 0.0) - p) / kgrid.dx - # dpdx = ([p(2:end); 0] - p) / kgrid.dx; + dpdx = (np.append(p[1:], 0.0) - p) / kgrid.dx ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) case 4: - # calculate gradient using fourth-order accurate finite # difference scheme (including half step forward) - # dpdx = ([0; p(1:(end-1))] - 27*p + 27*[p(2:end); 0] - [p(3:end); 0; 0])/(24*kgrid.dx); dpdx = (np.insert(p[:-1], 0, 0) - 27.0 * p + 27 * np.append(p[1:], 0.0) - np.append(p[2:], [0, 0])) / (24.0 * kgrid.dx) ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) @@ -394,8 +382,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * np.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) - - # print("2.", np.shape(p)) # # add in the velocity source term # if (k_sim.source_ux is not False and t_index < np.shape(source.ux)[1]): @@ -418,48 +404,37 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # calculate du/dx at the next time step - if not options.nonuniform_grid and not options.use_finite_difference: - + if not k_sim.nonuniform_grid and not options.use_finite_difference: # calculate gradient using the k-space method on a regular grid duxdx = np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,) ) ) - elif options.use_finite_difference: match options.use_finite_difference: case 2: - # calculate gradient using second-order accurate finite difference scheme (including half step backward) - # duxdx = (ux_sgx - [0; ux_sgx(1:end - 1)]) / kgrid.dx; duxdx = (ux_sgx - np.append(ux_sgx[:-1], 0)) / kgrid.dx case 4: - # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) duxdx = (np.append([0, 0], ux_sgx[:-2]) - 27.0 * np.append(0, ux_sgx[:-1]) + 27.0 * ux_sgx - np.append(ux_sgx[1:], 0)) / (24.0 * kgrid.dx) - # duxdx = ([0; 0; ux_sgx(1:(end - 2))] - 27 * [0; ux_sgx(1:(end - 1))] + 27 * ux_sgx - [ux_sgx(2:end); 0]) / (24 * kgrid.dx); - + else: # calculate gradients using a non-uniform grid via the mapped # pseudospectral method duxdx = kgrid.dxudxn * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) - # calculate rhox at the next time step if not k_sim.is_nonlinear: # use linearised mass conservation equation rhox = pml_x * (pml_x * rhox - dt * rho0 * duxdx) - else: # use nonlinear mass conservation equation (explicit calculation) rhox = pml_x * (pml_x * rhox - dt * (2.0 * rhox + rho0) * duxdx) - # print("5.", np.shape(p)) - # add in the pre-scaled pressure source term as a mass source # if options.source_p >= t_index: # if (k_sim.source_p is not False and t_index < np.shape(source.p)[1]): - # print("??????????") # match source.p_mode: # case 'dirichlet': # # enforce source values as a dirichlet boundary condition @@ -480,39 +455,29 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # equation of state if not k_sim.is_nonlinear: - # print("is linear", k_sim.equation_of_state, type(k_sim.equation_of_state)) match k_sim.equation_of_state: case 'lossless': - # calculate p using a linear adiabatic equation of state p = np.squeeze(c0**2) * np.squeeze(rhox) - - # print("3.", np.shape(p), np.squeeze(c0**2).shape, np.squeeze(rhox).shape) case 'absorbing': - # calculate p using a linear absorbing equation of state p = np.squeeze(c0**2 * (rhox + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) - medium.absorb_eta * np.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) ) ) - - + case 'stokes': - # calculate p using a linear absorbing equation of state # assuming alpha_power = 2 p = c0**2 * (rhox + medium.absorb_tau * rho0 * duxdx) - else: match k_sim.equation_of_state: case 'lossless': - # calculate p using a nonlinear adiabatic equation of state p = c0**2 * (rhox + medium.BonA * rhox**2 / (2.0 * rho0)) case 'absorbing': - # calculate p using a nonlinear absorbing equation of state p = c0**2 * ( rhox + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) @@ -520,29 +485,21 @@ def kspace_first_order_1D(kgrid: kWaveGrid, + medium.BonA * rhox**2 / (2.0 * rho0) ) case 'stokes': - # calculate p using a nonlinear absorbing equation of state # assuming alpha_power = 2 p = c0**2 * (rhox + medium.absorb_tau * rho0 * duxdx + medium.BonA * rhox**2 / (2.0 * rho0) ) - - - # print("7.", np.shape(p), k_sim.source.p0.shape) # enforce initial conditions if source.p0 is defined instead of time varying sources - if t_index == 0 and k_sim.source_p0: + if t_index == 0 and k_sim.source.p0 is not None: - # print(np.shape(rhox)) + p0 = np.squeeze(k_sim.source.p0) - if k_sim.source.p0.ndim == 1: - p0 = k_sim.source.p0[:, np.newaxis] - else: - p0 = k_sim.source.p0 # add the initial pressure to rho as a mass source p = p0 - rhox = p0 / c0**2 + rhox = p0 / np.squeeze(c0)**2 # compute u(t = t1 - dt/2) based on u(dt/2) = -u(-dt/2) which forces u(t = t1) = 0 if not options.use_finite_difference: @@ -551,22 +508,17 @@ def kspace_first_order_1D(kgrid: kWaveGrid, ux_sgx = dt * rho0_sgx_inv * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * scipy.fft.fftn(p, axes=(0,)), axes=(0,) )) / 2.0 p_k = scipy.fft.fftn(p, axes=(0,)) - + p_k = np.squeeze(p_k) else: match options.use_finite_difference: case 2: - - # calculate gradient using second-order accurate finite difference scheme (including half step forward) - # dpdx = ([p(2:end); 0] - p) / kgrid.dx; - + # calculate gradient using second-order accurate finite difference scheme (including half step forward) dpdx = (np.append(p[1:], 0.0) - p) / kgrid.dx ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 - case 4: - + case 4: # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) - # dpdx = ([p(3:end); 0; 0] - 27 * [p(2:end); 0] + 27 * p - [0; p(1:(end-1))]) / (24 * kgrid.dx) dpdx = (np.append(p[2:], [0, 0]) - 27.0 * np.append(p[1:], 0) + 27.0 * p - np.append(0, p[:-1])) / (24.0 * kgrid.dx) ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 @@ -574,12 +526,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, else: # precompute fft of p here so p can be modified for visualisation p_k = scipy.fft.fftn(p, axes=(0,)) - p_k = p_k[:, np.newaxis] + #p_k = p_k[:, np.newaxis] # extract required sensor data from the pressure and particle velocity # fields if the number of time steps elapsed is greater than # sensor.record_start_index (defaults to 1) - if options.use_sensor and not options.time_rev and (t_index >= sensor.record_start_index): + if k_sim.use_sensor and (t_index >= sensor.record_start_index): # update index for data storage file_index: int = t_index - sensor.record_start_index diff --git a/kwave/utils/filters.py b/kwave/utils/filters.py index 2cdb1ba3..16dba7a4 100644 --- a/kwave/utils/filters.py +++ b/kwave/utils/filters.py @@ -588,6 +588,10 @@ def smooth(a: np.ndarray, restore_max: Optional[bool] = False, window_type: Opti win, _ = get_win(grid_size, type_=window_type, rotation=DEF_USE_ROTATION, symmetric=window_symmetry) win = np.abs(win) + if win.ndim == 2 and a.ndim == 1: + # if mismatch between 1d grid and window, make sure win is same shape as the grid + win = np.squeeze(win) + # rotate window if input mat is (1, N) if a.shape[0] == 1: # is row? win = win.T diff --git a/kwave/utils/signals.py b/kwave/utils/signals.py index 3ad280f1..5839881b 100644 --- a/kwave/utils/signals.py +++ b/kwave/utils/signals.py @@ -56,7 +56,7 @@ def add_noise(signal: np.ndarray, snr: float, mode="rms"): @typechecker def get_win( - N: Union[int, np.ndarray, Tuple[int, int], Tuple[int, int, int], List[Int[kt.ScalarLike, ""]]], + N: Union[int, np.ndarray, Tuple[int], Tuple[int, int], Tuple[int, int, int], List[Int[kt.ScalarLike, ""]]], # TODO: replace and refactor for scipy.signal.get_window # https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.get_window.html#scipy.signal.get_window type_: str, # TODO change this to enum in the future @@ -149,6 +149,10 @@ def cosine_series(n: int, N: int, coeffs: List[float]) -> np.ndarray: # create the window if N.size == 1: + + # cast if is Tuple with a single value + N = int(N) + # TODO: what should this behaviour be if N is a list of ints? make windows of multiple lengths? n = np.arange(0, N) diff --git a/pyproject.toml b/pyproject.toml index 72ffb94e..7ef53f83 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,8 @@ dependencies = [ "matplotlib==3.10.3", "beartype==0.22.4", "jaxtyping==0.3.2", - "deprecated>=1.2.14" + "deprecated>=1.2.14", + "tqdm==4.67.1" ] [project.urls] From f78fb0095aa001cb4157e72386d9e57d22ce9a25 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 8 Jan 2026 10:15:49 +0100 Subject: [PATCH 16/29] To pass failing tests, repo path --- kwave/kWaveSimulation.py | 45 ++++++++++--------- .../create_storage_variables.py | 8 ++-- kwave/utils/signals.py | 6 +-- pyproject.toml | 2 +- 4 files changed, 32 insertions(+), 29 deletions(-) diff --git a/kwave/kWaveSimulation.py b/kwave/kWaveSimulation.py index e7ab3b0a..a5da766b 100644 --- a/kwave/kWaveSimulation.py +++ b/kwave/kWaveSimulation.py @@ -75,7 +75,7 @@ def __init__( self.sensor = kSensor(mask=np.ones((Nx, Ny, max(1, Nz))), record=["p_final"]) # set time reversal flag - self.time_rev = True + #self.time_rev = None self.record = Recorder() self.record.p = False @@ -548,7 +548,7 @@ def input_checking(self, calling_func_name) -> None: "blank_sensor": self.blank_sensor, "binary_sensor_mask": self.binary_sensor_mask, "record_u_split_field": self.record.u_split_field, - "time_rev": self.time_rev, + "time_rev": None, "reorder_data": self.reorder_data, "transducer_receive_elevation_focus": self.transducer_receive_elevation_focus, "axisymmetric": opt.simulation_type.is_axisymmetric(), @@ -585,7 +585,7 @@ def input_checking(self, calling_func_name) -> None: "blank_sensor": self.blank_sensor, "binary_sensor_mask": self.binary_sensor_mask, "record_u_split_field": self.record.u_split_field, - "time_rev": self.time_rev, + "time_rev": None, "reorder_data": self.reorder_data, "transducer_receive_elevation_focus": self.transducer_receive_elevation_focus, "axisymmetric": opt.simulation_type.is_axisymmetric(), @@ -742,9 +742,10 @@ def check_sensor(self, kgrid_dim) -> None: # given, the time history of the acoustic pressure is recorded by # default if self.sensor.record is not None: + # check for time reversal data - if self.time_rev: - logging.log(logging.WARN, "sensor.record is not used for time reversal reconstructions") + # if self.time_rev: + # logging.log(logging.WARN, "sensor.record is not used for time reversal reconstructions") # check the input is a cell array assert isinstance(self.sensor.record, list), 'sensor.record must be given as a list, e.g. ["p", "u"]' @@ -872,19 +873,19 @@ def check_sensor(self, kgrid_dim) -> None: # if in time reversal mode, reorder the p0 input data in # the order of the binary sensor_mask - if self.time_rev: - raise NotImplementedError - """ - # append the reordering data - new_col_pos = length(sensor.time_reversal_boundary_data(1, :)) + 1; - sensor.time_reversal_boundary_data(:, new_col_pos) = order_index; - - # reorder p0 based on the order_index - sensor.time_reversal_boundary_data = sort_rows(sensor.time_reversal_boundary_data, new_col_pos); - - # remove the reordering data - sensor.time_reversal_boundary_data = sensor.time_reversal_boundary_data(:, 1:new_col_pos - 1); - """ + # if self.time_rev: + # raise NotImplementedError + # """ + # # append the reordering data + # new_col_pos = length(sensor.time_reversal_boundary_data(1, :)) + 1; + # sensor.time_reversal_boundary_data(:, new_col_pos) = order_index; + + # # reorder p0 based on the order_index + # sensor.time_reversal_boundary_data = sort_rows(sensor.time_reversal_boundary_data, new_col_pos); + + # # remove the reordering data + # sensor.time_reversal_boundary_data = sensor.time_reversal_boundary_data(:, 1:new_col_pos - 1); + # """ else: # set transducer sensor flag self.transducer_sensor = True @@ -1097,8 +1098,8 @@ def check_kgrid_time(self) -> None: # check kgrid for t_array existence, and create if not defined if isinstance(self.kgrid.t_array, str) and self.kgrid.t_array == "auto": # check for time reversal mode - if self.time_rev: - raise ValueError("kgrid.t_array (Nt and dt) must be defined explicitly in time reversal mode.") + # if self.time_rev: + # raise ValueError("kgrid.t_array (Nt and dt) must be defined explicitly in time reversal mode.") # check for time varying sources if (not self.source_p0_elastic) and ( @@ -1262,8 +1263,8 @@ def check_input_combinations(self, opt: SimulationOptions, user_medium_density_i raise NotImplementedError(f"diary({self.LOG_NAME}.txt');") # update command line status - if self.time_rev: - logging.log(logging.INFO, " time reversal mode") + # if self.time_rev: + # logging.log(logging.INFO, " time reversal mode") # cleanup unused variables for k in list(self.__dict__.keys()): diff --git a/kwave/kWaveSimulation_helper/create_storage_variables.py b/kwave/kWaveSimulation_helper/create_storage_variables.py index ee8afcd6..dcf9d6d0 100644 --- a/kwave/kWaveSimulation_helper/create_storage_variables.py +++ b/kwave/kWaveSimulation_helper/create_storage_variables.py @@ -81,8 +81,9 @@ def create_storage_variables(kgrid: kWaveGrid, sensor, opt: SimulationOptions, flags = set_flags(flags, values.sensor_x, sensor.mask, opt.cartesian_interp) - # preallocate output variables - if flags.time_rev: + # preallocate output variables - this keeps the time reversal flag, which is, at present, is always None, so that is allocating data + # for intermediate steps in time reversal simulations debugging may be possible. + if flags.time_rev is not None: return flags num_sensor_points = get_num_of_sensor_points(flags.blank_sensor, @@ -125,8 +126,9 @@ def set_flags(flags: dotdict, sensor_x, sensor_mask, is_cartesian_interp): # input_checking, but switch on Cartesian reorder flag so that the # final data is returned in the correct order (not in time # reversal mode). + # Again, as time reversal is not yet implemented, time_rev is always None here, so this is just setting reorder_data to True flags.binary_sensor_mask = True - if not flags.time_rev: + if flags.time_rev is None: flags.reorder_data = True # check if any duplicate points have been discarded in the diff --git a/kwave/utils/signals.py b/kwave/utils/signals.py index 5839881b..a7aaffdd 100644 --- a/kwave/utils/signals.py +++ b/kwave/utils/signals.py @@ -142,13 +142,13 @@ def cosine_series(n: int, N: int, coeffs: List[float]) -> np.ndarray: # if a square window is required, replace grid sizes with smallest size and # store a copy of the original size - if square and (N.size != 1): + if square and (np.asarray(N).size != 1): N_orig = np.copy(N) L = min(N) N[:] = L # create the window - if N.size == 1: + if np.asarray(N).size == 1: # cast if is Tuple with a single value N = int(N) @@ -307,7 +307,7 @@ def cosine_series(n: int, N: int, coeffs: List[float]) -> np.ndarray: raise ValueError("Invalid input for N, only 1-, 2-, and 3-D windows are supported.") # enlarge the window if required - if square and (N.size != 1): + if square and (np.asarray(N).size != 1): L = N[0] win_sq = win win = np.zeros(N_orig) diff --git a/pyproject.toml b/pyproject.toml index 7ef53f83..217e5cef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,7 +39,7 @@ dependencies = [ [project.urls] Homepage = "http://www.k-wave.org/" Documentation = "https://waltersimson.com/k-wave-python/" -Repository = "https://github.com/djps/k-wave-python" +Repository = "https://github.com/waltsims/k-wave-python" Bug-tracker = "https://github.com/waltsims/k-wave-python/issues" [project.optional-dependencies] From 57515332ddc0f0bdd4e6943313113bd0aef35a83 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 8 Jan 2026 11:27:25 +0100 Subject: [PATCH 17/29] update to ignore windowing with single length --- .../python_testers/getWin_test.py | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tests/matlab_test_data_collectors/python_testers/getWin_test.py b/tests/matlab_test_data_collectors/python_testers/getWin_test.py index 21072f60..2a0be0cf 100644 --- a/tests/matlab_test_data_collectors/python_testers/getWin_test.py +++ b/tests/matlab_test_data_collectors/python_testers/getWin_test.py @@ -1,3 +1,4 @@ +import logging import os from pathlib import Path @@ -12,7 +13,7 @@ def test_get_win(): test_data_file = os.path.join(Path(__file__).parent, "collectedValues/getWin.mat") reader = TestRecordReader(test_data_file) for i in range(len(reader)): - # logging.log(logging.INFO, "i: => %d", i) + logging.log(logging.INFO, "i: => %d", i) N = reader.expected_value_of("N") input_args = reader.expected_value_of("input_args") @@ -30,16 +31,17 @@ def test_get_win(): N = np.squeeze(N) - # logging.log(logging.INFO, N, type_, param, rotation, symmetric, square, win) + logging.log(logging.INFO, N, type_, param, rotation, symmetric, square) - win_py, cg_py = get_win(N, type_, param=param, rotation=rotation, symmetric=symmetric, square=square) + if (np.isscalar(N) and N > 1) or (isinstance(N, np.ndarray) and N.all() > 1): + win_py, cg_py = get_win(N, type_, param=param, rotation=rotation, symmetric=symmetric, square=square) - cg = reader.expected_value_of("cg") - win = reader.expected_value_of("win") - win_py = np.squeeze(win_py) - assert np.shape(win_py) == np.shape(win) - assert np.allclose(win_py, win, equal_nan=True) - assert np.allclose(cg_py, cg, equal_nan=True) + cg = reader.expected_value_of("cg") + win = reader.expected_value_of("win") + win_py = np.squeeze(win_py) + assert np.shape(win_py) == np.shape(win) + assert np.allclose(win_py, win, equal_nan=True) + assert np.allclose(cg_py, cg, equal_nan=True) reader.increment() From 78405e3ad960b258ce27d00b56a1aefc34938e41 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 8 Jan 2026 12:07:24 +0100 Subject: [PATCH 18/29] ruff fixes and import path for notebook --- .../ivp_1D_simulation/ivp_1D_simulation.ipynb | 422 +++++++++--------- .../ivp_1D_simulation/ivp_1D_simulation.py | 6 +- kwave/kWaveSimulation.py | 3 +- kwave/kWaveSimulation_helper/__init__.py | 3 +- .../create_storage_variables.py | 11 +- .../extract_sensor_data.py | 1 + kwave/kspaceFirstOrder1D.py | 10 +- 7 files changed, 224 insertions(+), 232 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index 3ba83862..8121ea73 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -1,214 +1,212 @@ { - "cells": [ - { - "cell_type": "markdown", - "source": [ - "First install the pacakage using the latests version." - ], - "metadata": { - "id": "RQYRmOfG27FV" - } - }, - { - "cell_type": "code", - "source": [ - "!pip install git+https://github.com/djps/k-wave-python@1d-demo" - ], - "metadata": { - "id": "5bq8_I2z29l6" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "7lktiAv5XtsS" - }, - "source": [ - "Now import dependencies, and party of library which are required." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "VCcwZsQ2ASoj" - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "\n", - "from kwave.data import Vector\n", - "\n", - "from kwave.kgrid import kWaveGrid\n", - "from kwave.kmedium import kWaveMedium\n", - "from kwave.ksensor import kSensor\n", - "from kwave.ksource import kSource\n", - "\n", - "from kwave.kspaceFirstOrder1D import kspace_first_order_1D\n", - "\n", - "from kwave.options.simulation_options import SimulationOptions" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "xUqsDgayASoj" - }, - "source": [ - "Set parameters to build the grid" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "FwbGsg_gASok" - }, - "outputs": [], - "source": [ - "# create the computational grid\n", - "Nx: int = 512 # number of grid points in the x (row) direction\n", - "dx: float = 0.05e-3 # grid point spacing in the x direction [m]\n", - "\n", - "grid_size_points = Vector([Nx, ])\n", - "grid_spacing_meters = Vector([dx, ])\n", - "\n", - "# create the k-space grid\n", - "kgrid = kWaveGrid(grid_size_points, grid_spacing_meters)\n", - "\n", - "# define the properties of the propagation medium\n", - "sound_speed = 1500.0 * np.ones((Nx, 1)) # [m/s]\n", - "sound_speed[:np.round(Nx / 3).astype(int) - 1] = 2000.0\t # [m/s]\n", - "density = 1000.0 * np.ones((Nx, 1)) # [kg/m^3]\n", - "density[np.round(4 * Nx / 5).astype(int) - 1:] = 1500.0 # [kg/m^3]\n", - "medium = kWaveMedium(sound_speed=sound_speed, density=density)\n", - "\n", - "# set the simulation time to capture the reflections\n", - "c_max = np.max(medium.sound_speed.flatten()) # [m/s]\n", - "t_end = 2.5 * kgrid.x_size / c_max # [s]\n", - "\n", - "# define the time array\n", - "kgrid.makeTime(c_max, t_end=t_end)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "misFfiLaASok" - }, - "source": [ - "Build source and sensor" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "maEowzD6ASok" - }, - "outputs": [], - "source": [ - "# Create the source object\n", - "source = kSource()\n", - "\n", - "# create initial pressure distribution using a smoothly shaped sinusoid\n", - "x_pos: int = 280 # [grid points]\n", - "width: int = 100 # [grid points]\n", - "height: int = 1 # [au]\n", - "p0 = np.linspace(0.0, 2.0 * np.pi, width + 1)\n", - "\n", - "part1 = np.zeros(x_pos).astype(float)\n", - "part2 = (height / 2.0) * np.sin(p0 - np.pi / 2.0) + (height / 2.0)\n", - "part3 = np.zeros(Nx - x_pos - width - 1).astype(float)\n", - "source.p0 = np.concatenate([part1, part2, part3])\n", - "\n", - "# create a Cartesian sensor mask recording the pressure\n", - "sensor = kSensor()\n", - "sensor.record = [\"p\"]\n", - "\n", - "# this hack is needed to ensure that the sensor is in [1,2] dimensions\n", - "mask = np.array([-10e-3, 10e-3]) # [mm]\n", - "mask = mask[:, np.newaxis].T\n", - "sensor.mask = mask" - ] - }, - { - "cell_type": "markdown", - "source": [ - "Set options and run simulation" - ], - "metadata": { - "id": "ZSmPpm6X3fm5" - } - }, - { - "cell_type": "code", - "source": [ - "# define the simulation options\n", - "simulation_options = SimulationOptions(data_cast=\"off\", save_to_disk=False)\n", - "\n", - "# run the simulation\n", - "sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options)" - ], - "metadata": { - "id": "BiFT0YEE3fyE" - }, - "execution_count": null, - "outputs": [] - }, - { - "cell_type": "markdown", - "metadata": { - "id": "a4WRzAZ_ASol" - }, - "source": [ - "Visualisations" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "id": "s4ZO890kASol" - }, - "outputs": [], - "source": [ - "# plot the recorded time signals\n", - "_, ax1 = plt.subplots()\n", - "ax1.plot(sensor_data['p'][0, :], 'b-')\n", - "ax1.plot(sensor_data['p'][1, :], 'r-')\n", - "ax1.grid(True)\n", - "ax1.set_ylim(-0.1, 0.7)\n", - "ax1.set_ylabel('Pressure')\n", - "ax1.set_xlabel('Time Step')\n", - "ax1.legend(['Sensor Position 1', 'Sensor Position 2'])\n", - "plt.show()" - ] - } - ], - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "display_name": "Python 3", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3" - } + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "RQYRmOfG27FV" + }, + "source": [ + "First install the pacakage using the latests version." + ] }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "5bq8_I2z29l6" + }, + "outputs": [], + "source": [ + "%%capture\n", + "!pip install k-wave-python" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7lktiAv5XtsS" + }, + "source": [ + "Now import dependencies, and party of library which are required." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VCcwZsQ2ASoj" + }, + "outputs": [], + "source": [ + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "from kwave.data import Vector\n", + "from kwave.kgrid import kWaveGrid\n", + "from kwave.kmedium import kWaveMedium\n", + "from kwave.ksensor import kSensor\n", + "from kwave.ksource import kSource\n", + "from kwave.kspaceFirstOrder1D import kspace_first_order_1D\n", + "from kwave.options.simulation_options import SimulationOptions" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "xUqsDgayASoj" + }, + "source": [ + "Set parameters to build the grid" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FwbGsg_gASok" + }, + "outputs": [], + "source": [ + "# create the computational grid\n", + "Nx: int = 512 # number of grid points in the x (row) direction\n", + "dx: float = 0.05e-3 # grid point spacing in the x direction [m]\n", + "\n", + "grid_size_points = Vector([Nx, ])\n", + "grid_spacing_meters = Vector([dx, ])\n", + "\n", + "# create the k-space grid\n", + "kgrid = kWaveGrid(grid_size_points, grid_spacing_meters)\n", + "\n", + "# define the properties of the propagation medium\n", + "sound_speed = 1500.0 * np.ones((Nx, 1)) # [m/s]\n", + "sound_speed[:np.round(Nx / 3).astype(int) - 1] = 2000.0\t # [m/s]\n", + "density = 1000.0 * np.ones((Nx, 1)) # [kg/m^3]\n", + "density[np.round(4 * Nx / 5).astype(int) - 1:] = 1500.0 # [kg/m^3]\n", + "medium = kWaveMedium(sound_speed=sound_speed, density=density)\n", + "\n", + "# set the simulation time to capture the reflections\n", + "c_max = np.max(medium.sound_speed.flatten()) # [m/s]\n", + "t_end = 2.5 * kgrid.x_size / c_max # [s]\n", + "\n", + "# define the time array\n", + "kgrid.makeTime(c_max, t_end=t_end)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "misFfiLaASok" + }, + "source": [ + "Build source and sensor" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "maEowzD6ASok" + }, + "outputs": [], + "source": [ + "# Create the source object\n", + "source = kSource()\n", + "\n", + "# create initial pressure distribution using a smoothly shaped sinusoid\n", + "x_pos: int = 280 # [grid points]\n", + "width: int = 100 # [grid points]\n", + "height: int = 1 # [au]\n", + "p0 = np.linspace(0.0, 2.0 * np.pi, width + 1)\n", + "\n", + "part1 = np.zeros(x_pos).astype(float)\n", + "part2 = (height / 2.0) * np.sin(p0 - np.pi / 2.0) + (height / 2.0)\n", + "part3 = np.zeros(Nx - x_pos - width - 1).astype(float)\n", + "source.p0 = np.concatenate([part1, part2, part3])\n", + "\n", + "# create a Cartesian sensor mask recording the pressure\n", + "sensor = kSensor()\n", + "sensor.record = [\"p\"]\n", + "\n", + "# this hack is needed to ensure that the sensor is in [1,2] dimensions\n", + "mask = np.array([-10e-3, 10e-3]) # [mm]\n", + "mask = mask[:, np.newaxis].T\n", + "sensor.mask = mask" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZSmPpm6X3fm5" + }, + "source": [ + "Set options and run simulation" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "BiFT0YEE3fyE" + }, + "outputs": [], + "source": [ + "# define the simulation options\n", + "simulation_options = SimulationOptions(data_cast=\"off\", save_to_disk=False)\n", + "\n", + "# run the simulation\n", + "sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "a4WRzAZ_ASol" + }, + "source": [ + "Visualisations" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "s4ZO890kASol" + }, + "outputs": [], + "source": [ + "# plot the recorded time signals\n", + "_, ax1 = plt.subplots()\n", + "ax1.plot(sensor_data['p'][0, :], 'b-')\n", + "ax1.plot(sensor_data['p'][1, :], 'r-')\n", + "ax1.grid(True)\n", + "ax1.set_ylim(-0.1, 0.7)\n", + "ax1.set_ylabel('Pressure')\n", + "ax1.set_xlabel('Time Step')\n", + "ax1.legend(['Sensor Position 1', 'Sensor Position 2'])\n", + "plt.show()" + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.py b/examples/ivp_1D_simulation/ivp_1D_simulation.py index 6bfb9e16..885bc3d8 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.py +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.py @@ -8,21 +8,17 @@ Propagation Medium examples. """ -import numpy as np import matplotlib.pyplot as plt +import numpy as np from kwave.data import Vector - from kwave.kgrid import kWaveGrid from kwave.kmedium import kWaveMedium from kwave.ksensor import kSensor from kwave.ksource import kSource - from kwave.kspaceFirstOrder1D import kspace_first_order_1D - from kwave.options.simulation_options import SimulationOptions - # ========================================================================= # SIMULATION # ========================================================================= diff --git a/kwave/kWaveSimulation.py b/kwave/kWaveSimulation.py index a5da766b..d3f5a808 100644 --- a/kwave/kWaveSimulation.py +++ b/kwave/kWaveSimulation.py @@ -1,10 +1,9 @@ +import copy import logging import warnings from dataclasses import dataclass -import copy import numpy as np - from deprecated import deprecated from kwave.data import Vector diff --git a/kwave/kWaveSimulation_helper/__init__.py b/kwave/kWaveSimulation_helper/__init__.py index 899c46e0..34577614 100644 --- a/kwave/kWaveSimulation_helper/__init__.py +++ b/kwave/kWaveSimulation_helper/__init__.py @@ -1,4 +1,5 @@ from kwave.kWaveSimulation_helper.create_absorption_variables import create_absorption_variables +from kwave.kWaveSimulation_helper.create_storage_variables import create_storage_variables from kwave.kWaveSimulation_helper.display_simulation_params import display_simulation_params from kwave.kWaveSimulation_helper.expand_grid_matrices import expand_grid_matrices from kwave.kWaveSimulation_helper.extract_sensor_data import extract_sensor_data @@ -7,8 +8,6 @@ from kwave.kWaveSimulation_helper.scale_source_terms_func import scale_source_terms_func from kwave.kWaveSimulation_helper.set_sound_speed_ref import set_sound_speed_ref -from kwave.kWaveSimulation_helper.create_storage_variables import create_storage_variables - __all__ = ["create_absorption_variables", "create_storage_variables", "display_simulation_params", "expand_grid_matrices", "extract_sensor_data", "retract_transducer_grid_size", "save_to_disk_func", "scale_source_terms_func", "set_sound_speed_ref"] \ No newline at end of file diff --git a/kwave/kWaveSimulation_helper/create_storage_variables.py b/kwave/kWaveSimulation_helper/create_storage_variables.py index dcf9d6d0..275ff961 100644 --- a/kwave/kWaveSimulation_helper/create_storage_variables.py +++ b/kwave/kWaveSimulation_helper/create_storage_variables.py @@ -1,18 +1,17 @@ +from copy import deepcopy +from typing import List, Union + import numpy as np from numpy.fft import ifftshift -from copy import deepcopy -from typing import Union, List +from scipy.spatial import Delaunay from kwave.data import Vector from kwave.kgrid import kWaveGrid -from kwave.recorder import Recorder from kwave.options.simulation_options import SimulationOptions +from kwave.recorder import Recorder from kwave.utils.dotdictionary import dotdict -from scipy.spatial import Delaunay - - def gridDataFast2D(x, y, xi, yi): """ Delauney triangulation in 2D diff --git a/kwave/kWaveSimulation_helper/extract_sensor_data.py b/kwave/kWaveSimulation_helper/extract_sensor_data.py index fc727d75..75087c6b 100644 --- a/kwave/kWaveSimulation_helper/extract_sensor_data.py +++ b/kwave/kWaveSimulation_helper/extract_sensor_data.py @@ -1,5 +1,6 @@ import numpy as np + def extract_sensor_data(dim: int, sensor_data, file_index, sensor_mask_index, flags, record, p, ux_sgx, uy_sgy=None, uz_sgz=None): """ diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 020e8756..e74903fc 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -1,23 +1,23 @@ +from typing import Union + import numpy as np import scipy.fft from tqdm import tqdm -from typing import Union from kwave.kgrid import kWaveGrid from kwave.kmedium import kWaveMedium from kwave.ksensor import kSensor from kwave.ksource import kSource +from kwave.ktransducer import NotATransducer from kwave.kWaveSimulation import kWaveSimulation from kwave.kWaveSimulation_helper import extract_sensor_data -from kwave.ktransducer import NotATransducer - +from kwave.options.simulation_options import SimulationOptions from kwave.utils.data import scale_time +from kwave.utils.dotdictionary import dotdict from kwave.utils.math import sinc from kwave.utils.pml import get_pml from kwave.utils.tictoc import TicToc -from kwave.utils.dotdictionary import dotdict -from kwave.options.simulation_options import SimulationOptions def kspace_first_order_1D(kgrid: kWaveGrid, source: kSource, From a85355fa59dbe9be2c21cd152ce20b4ad31e12a2 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 8 Jan 2026 13:50:38 +0100 Subject: [PATCH 19/29] revert version back to current one --- kwave/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kwave/__init__.py b/kwave/__init__.py index 1885928c..93f8aee3 100644 --- a/kwave/__init__.py +++ b/kwave/__init__.py @@ -9,7 +9,7 @@ # Test installation with: # python3 -m pip install -i https://test.pypi.org/simple/ --extra-index-url=https://pypi.org/simple/ k-Wave-python==0.3.0 -__version__ = "0.4.2" +__version__ = "0.4.1" # Constants and Configurations URL_BASE = "https://github.com/waltsims/" From e6f7569509896af7edc25e28e2e2528c665e17c6 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Thu, 8 Jan 2026 15:07:06 +0100 Subject: [PATCH 20/29] clean up and add cupy import --- kwave/kspaceFirstOrder1D.py | 95 ++++++++++++++++++------------------- 1 file changed, 45 insertions(+), 50 deletions(-) diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index e74903fc..2bc0f0c9 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -1,5 +1,10 @@ from typing import Union +try: + import cupy as cp +except ImportError: + cp = None + import numpy as np import scipy.fft from tqdm import tqdm @@ -171,6 +176,8 @@ def kspace_first_order_1D(kgrid: kWaveGrid, timer = TicToc() timer.tic() + xp = cp if cp and backend in ("auto", "gpu") else np + # run script to check inputs and create the required arrays k_sim = kWaveSimulation(kgrid=kgrid, source=source, sensor=sensor, medium=medium, simulation_options=simulation_options) @@ -209,8 +216,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # invert rho0 so it doesn't have to be done each time step rho0_sgx_inv = 1.0 / rho0_sgx - # rho0_sgx_inv = rho0_sgx_inv[:, np.newaxis] - # clear unused variables del rho0_sgx @@ -241,29 +246,22 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # define the k-space derivative operator ddx_k = scipy.fft.ifftshift(1j * kx_vec) - - # ddx_k = ddx_k[:, np.newaxis] # define the staggered grid shift operators (the option options.use_sg exists for debugging) if options.use_sg: ddx_k_shift_pos = scipy.fft.ifftshift( np.exp( 1j * kx_vec * dx / 2.0)) ddx_k_shift_neg = scipy.fft.ifftshift( np.exp(-1j * kx_vec * dx / 2.0)) - #ddx_k_shift_pos = ddx_k_shift_pos[:, np.newaxis] - #ddx_k_shift_neg = ddx_k_shift_neg[:, np.newaxis] else: ddx_k_shift_pos = 1.0 ddx_k_shift_neg = 1.0 - # create k-space operator (the option options.use_kspace exists for debugging) if options.use_kspace: kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * kgrid.dt / 2.0)) - kappa = np.squeeze(kappa) - # kappa = kappa[:, np.newaxis] + kappa = np.squeeze(kappa) if (hasattr(options, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ (hasattr(options, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) - #source_kappa = source_kappa[:, np.newaxis] else: kappa = 1.0 source_kappa = 1.0 @@ -383,24 +381,24 @@ def kspace_first_order_1D(kgrid: kWaveGrid, dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * np.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) - # # add in the velocity source term - # if (k_sim.source_ux is not False and t_index < np.shape(source.ux)[1]): - # #if options.source_ux >= t_index: - # match source.u_mode: - # case 'dirichlet': - # # enforce the source values as a dirichlet boundary condition - # ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] - # case 'additive': - # # extract the source values into a matrix - # source_mat = np.zeros([kgrid.Nx, 1]) - # source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] - # # apply the k-space correction - # source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) - # # add the source values to the existing field values including the k-space correction - # ux_sgx = ux_sgx + source_mat - # case 'additive-no-correction': - # # add the source values to the existing field values - # ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + source.ux[k_sim.u_source_sig_index, t_index] + # add in the velocity source term + if (k_sim.source_ux is not False and t_index < np.shape(source.ux)[1]): + if options.source_ux >= t_index: + match source.u_mode: + case 'dirichlet': + # enforce the source values as a dirichlet boundary condition + ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + case 'additive': + # extract the source values into a matrix + source_mat = np.zeros([kgrid.Nx, ]) + source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + # apply the k-space correction + source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) + # add the source values to the existing field values including the k-space correction + ux_sgx = ux_sgx + source_mat + case 'additive-no-correction': + # add the source values to the existing field values + ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + source.ux[k_sim.u_source_sig_index, t_index] # calculate du/dx at the next time step @@ -432,25 +430,26 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # use nonlinear mass conservation equation (explicit calculation) rhox = pml_x * (pml_x * rhox - dt * (2.0 * rhox + rho0) * duxdx) + # add in the pre-scaled pressure source term as a mass source - # if options.source_p >= t_index: - # if (k_sim.source_p is not False and t_index < np.shape(source.p)[1]): - # match source.p_mode: - # case 'dirichlet': - # # enforce source values as a dirichlet boundary condition - # rhox[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] - # case 'additive': - # # extract the source values into a matrix - # source_mat = np.zeros((kgrid.Nx, 1), dtype=myType) - # source_mat[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] - # # apply the k-space correction - # source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) - # # add the source values to the existing field values - # # including the k-space correction - # rhox = rhox + source_mat - # case 'additive-no-correction': - # # add the source values to the existing field values - # rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + source.p[k_sim.p_source_sig_index, t_index] + if options.source_p >= t_index: + if (k_sim.source_p is not False and t_index < np.shape(source.p)[1]): + match source.p_mode: + case 'dirichlet': + # enforce source values as a dirichlet boundary condition + rhox[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] + case 'additive': + # extract the source values into a matrix + source_mat = np.zeros((kgrid.Nx,), dtype=myType) + source_mat[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] + # apply the k-space correction + source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) + # add the source values to the existing field values + # including the k-space correction + rhox = rhox + source_mat + case 'additive-no-correction': + # add the source values to the existing field values + rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + source.p[k_sim.p_source_sig_index, t_index] # equation of state @@ -496,7 +495,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, p0 = np.squeeze(k_sim.source.p0) - # add the initial pressure to rho as a mass source p = p0 rhox = p0 / np.squeeze(c0)**2 @@ -506,7 +504,6 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # calculate gradient using the k-space method on a regular grid ux_sgx = dt * rho0_sgx_inv * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * scipy.fft.fftn(p, axes=(0,)), axes=(0,) )) / 2.0 - p_k = scipy.fft.fftn(p, axes=(0,)) p_k = np.squeeze(p_k) @@ -522,11 +519,9 @@ def kspace_first_order_1D(kgrid: kWaveGrid, dpdx = (np.append(p[2:], [0, 0]) - 27.0 * np.append(p[1:], 0) + 27.0 * p - np.append(0, p[:-1])) / (24.0 * kgrid.dx) ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 - else: # precompute fft of p here so p can be modified for visualisation p_k = scipy.fft.fftn(p, axes=(0,)) - #p_k = p_k[:, np.newaxis] # extract required sensor data from the pressure and particle velocity # fields if the number of time steps elapsed is greater than From 1ede056484c103ace8d7176d607591cd5a235500 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Fri, 9 Jan 2026 12:30:10 +0100 Subject: [PATCH 21/29] first pass at gpu implementation --- .../ivp_1D_simulation/ivp_1D_simulation.py | 5 +- kwave/kWaveSimulation.py | 2 +- .../extract_sensor_data.py | 9 +- kwave/kspaceFirstOrder1D.py | 290 ++++++++++++------ 4 files changed, 207 insertions(+), 99 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.py b/examples/ivp_1D_simulation/ivp_1D_simulation.py index 885bc3d8..0a8b73d8 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.py +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.py @@ -17,8 +17,10 @@ from kwave.ksensor import kSensor from kwave.ksource import kSource from kwave.kspaceFirstOrder1D import kspace_first_order_1D +from kwave.options.simulation_execution_options import SimulationExecutionOptions from kwave.options.simulation_options import SimulationOptions + # ========================================================================= # SIMULATION # ========================================================================= @@ -72,9 +74,10 @@ # define the simulation options simulation_options = SimulationOptions(data_cast="off", save_to_disk=False) +execution_options = SimulationExecutionOptions(is_gpu_simulation=True) # run the simulation -sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options) +sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options, execution_options=execution_options) # ========================================================================= # VISUALISATION diff --git a/kwave/kWaveSimulation.py b/kwave/kWaveSimulation.py index d3f5a808..013cb7ad 100644 --- a/kwave/kWaveSimulation.py +++ b/kwave/kWaveSimulation.py @@ -292,7 +292,7 @@ def source_p(self): def source_p_labelled(self): # time-varying pressure with labelled source mask """ Returns: - True/False if labelled/binary source mask, respectively. + True/False if labelled/binary source mask, respectively (default=False) """ flag = False diff --git a/kwave/kWaveSimulation_helper/extract_sensor_data.py b/kwave/kWaveSimulation_helper/extract_sensor_data.py index 75087c6b..15e5cb7e 100644 --- a/kwave/kWaveSimulation_helper/extract_sensor_data.py +++ b/kwave/kWaveSimulation_helper/extract_sensor_data.py @@ -1,7 +1,10 @@ import numpy as np +try: + import cupy as cp +except ImportError: + cp = None - -def extract_sensor_data(dim: int, sensor_data, file_index, sensor_mask_index, +def extract_sensor_data(dim: int, xp, sensor_data, file_index, sensor_mask_index, flags, record, p, ux_sgx, uy_sgy=None, uz_sgz=None): """ extract_sensor_data Sample field variables at the sensor locations. @@ -422,7 +425,7 @@ def extract_sensor_data(dim: int, sensor_data, file_index, sensor_mask_index, # store the time history of the acoustic pressure if flags.record_p or flags.record_I or flags.record_I_avg: if dim == 1: - sensor_data.p[:, file_index] = np.interp(np.squeeze(record.sensor_x), np.squeeze(record.grid_x), p) + sensor_data.p[:, file_index] = xp.interp(xp.squeeze(record.sensor_x), xp.squeeze(record.grid_x), xp.real(p)) else: sensor_data.p[:, file_index] = np.sum(p[record.tri] * record.bc, axis=1) diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 2bc0f0c9..b0d251af 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -1,10 +1,14 @@ from typing import Union +from kwave.options.simulation_execution_options import SimulationExecutionOptions + try: import cupy as cp except ImportError: cp = None +import importlib + import numpy as np import scipy.fft from tqdm import tqdm @@ -17,6 +21,7 @@ from kwave.kWaveSimulation import kWaveSimulation from kwave.kWaveSimulation_helper import extract_sensor_data from kwave.options.simulation_options import SimulationOptions +from kwave.options.simulation_execution_options import SimulationExecutionOptions from kwave.utils.data import scale_time from kwave.utils.dotdictionary import dotdict from kwave.utils.math import sinc @@ -24,11 +29,67 @@ from kwave.utils.tictoc import TicToc +def get_array_module(verbose: bool = False): + """Get the appropriate array module (numpy or cupy) based on GPU availability - if cupy is available and a GPU is present, cupy is used.""" + # check if cupy is installed + cupy_spec = importlib.util.find_spec("cupy") + if verbose: + print(cupy_spec) + + if cupy_spec is None: + # if not, use numpy + return np, False + + # Try to import cupy and check for a GPU + try: + import cupy as cp + # this returns the number of visible GPUs + ngpus = cp.cuda.runtime.getDeviceCount() + if ngpus > 0: + return cp, True + except Exception: + pass + + # Fallback to numpy + return np, False + + +def to_gpu(obj, verbose: bool = False): + """Convert all numpy arrays in the given object to cupy arrays.""" + for name, val in vars(obj).items(): + if isinstance(val, np.ndarray): + if verbose: + print(f"Converting {name} to cupy array") + setattr(obj, name, cp.asarray(val)) + return obj + + +def dotdict_to_gpu(obj, verbose: bool = False): + """Convert all numpy arrays in the given dotdict to cupy arrays.""" + for name, val in obj.items(): + if isinstance(val, np.ndarray): + if verbose: + print(f"Converting {name} to cupy array") + setattr(obj, name, cp.asarray(val)) + return obj + + +def dotdict_to_cpu(obj, verbose: bool = False): + """Convert all numpy arrays in the given dotdict to numpy arrays.""" + for name, val in obj.items(): + if isinstance(val, cp.ndarray): + if verbose: + print(f"Converting {name} to numpy array") + setattr(obj, name, val.get()) + return obj + + def kspace_first_order_1D(kgrid: kWaveGrid, source: kSource, sensor: Union[NotATransducer, kSensor], medium: kWaveMedium, - simulation_options: SimulationOptions): + simulation_options: SimulationOptions, + execution_options: SimulationExecutionOptions): """ 1D time-domain simulation of wave propagation. @@ -176,7 +237,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, timer = TicToc() timer.tic() - xp = cp if cp and backend in ("auto", "gpu") else np + if execution_options.is_gpu_simulation: + xp, using_gpu = get_array_module() + print(f"Using {'cupy (GPU)' if using_gpu else 'numpy (CPU)'}") + else: + xp = np + using_gpu = False # run script to check inputs and create the required arrays k_sim = kWaveSimulation(kgrid=kgrid, source=source, sensor=sensor, medium=medium, @@ -185,11 +251,38 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # this will create the sensor_data dotdict k_sim.input_checking("kspaceFirstOrder1D") + + # ========================================================================= + # DEFINE CAST TYPE + # ========================================================================= + # aliases from kWaveSimulation class sensor_data = k_sim.sensor_data options = k_sim.options record = k_sim.record + # define the extract_options dictionary + if k_sim.use_sensor: + # run sub-function to extract the required data + extract_options = dotdict({'record_u_non_staggered': k_sim.record.u_non_staggered, + 'record_u_split_field': k_sim.record.u_split_field, + 'record_I': k_sim.record.I, + 'record_I_avg': k_sim.record.I_avg, + 'binary_sensor_mask': k_sim.binary_sensor_mask, + 'record_p': k_sim.record.p, + 'record_p_max': k_sim.record.p_max, + 'record_p_min': k_sim.record.p_min, + 'record_p_rms': k_sim.record.p_rms, + 'record_p_max_all': k_sim.record.p_max_all, + 'record_p_min_all': k_sim.record.p_min_all, + 'record_u': k_sim.record.u, + 'record_u_max': k_sim.record.u_max, + 'record_u_min': k_sim.record.u_min, + 'record_u_rms': k_sim.record.u_rms, + 'record_u_max_all': k_sim.record.u_max_all, + 'record_u_min_all': k_sim.record.u_min_all, + 'compute_directivity': False}) + # ========================================================================= # CALCULATE MEDIUM PROPERTIES ON STAGGERED GRID # ========================================================================= @@ -235,7 +328,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, kx_vec = np.squeeze(k_sim.kgrid.k_vec[0]) - c0 = medium.sound_speed + c0 = np.asarray(medium.sound_speed) # get the PML operators based on the reference sound speed and PML settings pml_x = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, False, 0) @@ -261,33 +354,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, kappa = np.squeeze(kappa) if (hasattr(options, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ (hasattr(options, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): - source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) + source_kappa = scipy.fft.ifftshift(xp.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) else: kappa = 1.0 source_kappa = 1.0 - # ========================================================================= - # DATA CASTING - # ========================================================================= - - # preallocate the loop variables using the castZeros anonymous function - # (this creates a matrix of zeros in the data type specified by data_cast) - if not (options.data_cast == 'off'): - myType = np.single - else: - myType = np.double - - grid_shape = (Nx, ) - - # preallocate the loop variables - p = np.zeros(grid_shape, dtype=myType) - rhox = np.zeros(grid_shape, dtype=myType) - ux_sgx = np.zeros(grid_shape, dtype=myType) - p_k = np.zeros(grid_shape, dtype=myType) - - c0 = c0.astype(myType) - # ========================================================================= # CREATE INDEX VARIABLES # ========================================================================= @@ -340,6 +412,54 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # kspaceFirstOrder_initialiseMovieParameters; # end + # ========================================================================= + # DATA CASTING + # ========================================================================= + + # preallocate the loop variables using the castZeros anonymous function + # (this creates a matrix of zeros in the data type specified by data_cast) + if not (options.data_cast == 'off'): + my_type = xp.single + my_complex_type = xp.complex64 + else: + my_type = xp.double + my_complex_type = xp.complex128 + + grid_shape = (Nx, ) + kdim = xp.asarray(kgrid.dim) + + # preallocate the loop variables + p = xp.zeros(grid_shape, dtype=my_type) + rhox = xp.zeros(grid_shape, dtype=my_type) + ux_sgx = xp.zeros(grid_shape, dtype=my_type) + p_k = xp.zeros(grid_shape, dtype=my_type) + + c0 = xp.asarray(xp.squeeze(medium.sound_speed), dtype=my_type) + rho0 = xp.asarray(rho0, dtype=my_type) + if k_sim.medium.BonA is not None: + BonA = xp.asarray(xp.squeeze(k_sim.medium.BonA), dtype=my_type) + rho0_sgx_inv = xp.asarray(rho0_sgx_inv, dtype=my_type) + + pml_x = xp.asarray(pml_x, dtype=my_complex_type) + pml_x_sgx = xp.asarray(pml_x_sgx, dtype=my_type) + + ddx_k_shift_pos = xp.asarray(ddx_k_shift_pos, dtype=my_complex_type) + ddx_k_shift_neg = xp.asarray(ddx_k_shift_neg, dtype=my_complex_type) + kappa = xp.asarray(kappa, dtype=my_complex_type) + ddx_k = xp.asarray(ddx_k, dtype=my_complex_type) + + dt = xp.asarray(dt, dtype=my_type) + dx = xp.asarray(dx, dtype=my_type) + + sensor_mask_index = xp.asarray(k_sim.sensor_mask_index, dtype=int) + + if k_sim.source.p0 is not None: + p0 = xp.asarray(k_sim.source.p0) + + if using_gpu: + record = to_gpu(record) + sensor_data = dotdict_to_gpu(sensor_data) + # ========================================================================= # LOOP THROUGH TIME STEPS # ========================================================================= @@ -351,38 +471,38 @@ def kspace_first_order_1D(kgrid: kWaveGrid, print('\tstarting time loop...') # start time loop - for t_index in tqdm(np.arange(index_start, index_end, index_step, dtype=int)): + for t_index in tqdm(xp.arange(index_start, index_end, index_step, dtype=int)): # calculate ux at the next time step using dp/dx at the current time step if not k_sim.nonuniform_grid and not options.use_finite_difference: # calculate gradient using the k-space method on a regular grid ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - - dt * rho0_sgx_inv * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * p_k, axes=(0,)))) + dt * rho0_sgx_inv * xp.real(xp.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k))) elif options.use_finite_difference: match options.use_finite_difference: case 2: # calculate gradient using second-order accurate finite # difference scheme (including half step forward) - dpdx = (np.append(p[1:], 0.0) - p) / kgrid.dx + dpdx = (xp.append(p[1:], 0.0) - p) / dx ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) case 4: # calculate gradient using fourth-order accurate finite # difference scheme (including half step forward) - dpdx = (np.insert(p[:-1], 0, 0) - 27.0 * p + 27 * np.append(p[1:], 0.0) - np.append(p[2:], [0, 0])) / (24.0 * kgrid.dx) + dpdx = (xp.insert(p[:-1], 0, 0) - 27.0 * p + 27 * xp.append(p[1:], 0.0) - xp.append(p[2:], [0, 0])) / (24.0 * dx) ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - dt * rho0_sgx_inv * dpdx ) else: # calculate gradient using the k-space method on a non-uniform grid # via the mapped pseudospectral method ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - - dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * np.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) + dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * xp.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) # add in the velocity source term - if (k_sim.source_ux is not False and t_index < np.shape(source.ux)[1]): + if (k_sim.source_ux is not False and t_index < xp.shape(source.ux)[1]): if options.source_ux >= t_index: match source.u_mode: case 'dirichlet': @@ -390,10 +510,10 @@ def kspace_first_order_1D(kgrid: kWaveGrid, ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] case 'additive': # extract the source values into a matrix - source_mat = np.zeros([kgrid.Nx, ]) + source_mat = xp.zeros([kgrid.Nx, ]) source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] # apply the k-space correction - source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) + source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) # add the source values to the existing field values including the k-space correction ux_sgx = ux_sgx + source_mat case 'additive-no-correction': @@ -404,22 +524,22 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # calculate du/dx at the next time step if not k_sim.nonuniform_grid and not options.use_finite_difference: # calculate gradient using the k-space method on a regular grid - duxdx = np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,) ) ) + duxdx = xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * xp.fft.fftn(ux_sgx, axes=(0,)), axes=(0,) ) ) elif options.use_finite_difference: match options.use_finite_difference: case 2: # calculate gradient using second-order accurate finite difference scheme (including half step backward) - duxdx = (ux_sgx - np.append(ux_sgx[:-1], 0)) / kgrid.dx + duxdx = (ux_sgx - xp.append(ux_sgx[:-1], 0)) / dx case 4: # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) - duxdx = (np.append([0, 0], ux_sgx[:-2]) - 27.0 * np.append(0, ux_sgx[:-1]) + 27.0 * ux_sgx - np.append(ux_sgx[1:], 0)) / (24.0 * kgrid.dx) + duxdx = (xp.append([0, 0], ux_sgx[:-2]) - 27.0 * xp.append(0, ux_sgx[:-1]) + 27.0 * ux_sgx - xp.append(ux_sgx[1:], 0)) / (24.0 * dx) else: # calculate gradients using a non-uniform grid via the mapped # pseudospectral method - duxdx = kgrid.dxudxn * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * scipy.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) + duxdx = k_sim.kgrid.dxudxn * xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * xp.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) # calculate rhox at the next time step @@ -432,24 +552,23 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # add in the pre-scaled pressure source term as a mass source - if options.source_p >= t_index: - if (k_sim.source_p is not False and t_index < np.shape(source.p)[1]): - match source.p_mode: - case 'dirichlet': - # enforce source values as a dirichlet boundary condition - rhox[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] - case 'additive': - # extract the source values into a matrix - source_mat = np.zeros((kgrid.Nx,), dtype=myType) - source_mat[k_sim.p_source_pos_index] = source.p[k_sim.p_source_sig_index, t_index] - # apply the k-space correction - source_mat = np.real(scipy.fft.ifft(source_kappa * scipy.fft.fft(source_mat))) - # add the source values to the existing field values - # including the k-space correction - rhox = rhox + source_mat - case 'additive-no-correction': - # add the source values to the existing field values - rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + source.p[k_sim.p_source_sig_index, t_index] + if (k_sim.source_p is not False and t_index < xp.shape(k_sim.source.p)[1]): + match k_sim.source.p_mode: + case 'dirichlet': + # enforce source values as a dirichlet boundary condition + rhox[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] + case 'additive': + # extract the source values into a matrix + source_mat = xp.zeros((k_sim.kgrid.Nx,), dtype=my_type) + source_mat[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] + # apply the k-space correction + source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) + # add the source values to the existing field values + # including the k-space correction + rhox = rhox + source_mat + case 'additive-no-correction': + # add the source values to the existing field values + rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + source.p[k_sim.p_source_sig_index, t_index] # equation of state @@ -457,13 +576,13 @@ def kspace_first_order_1D(kgrid: kWaveGrid, match k_sim.equation_of_state: case 'lossless': # calculate p using a linear adiabatic equation of state - p = np.squeeze(c0**2) * np.squeeze(rhox) + p = xp.squeeze(c0**2) * xp.squeeze(rhox) case 'absorbing': # calculate p using a linear absorbing equation of state - p = np.squeeze(c0**2 * (rhox - + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) - - medium.absorb_eta * np.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) ) ) + p = xp.squeeze(c0**2 * (rhox + + medium.absorb_tau * xp.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) + - medium.absorb_eta * xp.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) ) ) case 'stokes': # calculate p using a linear absorbing equation of state @@ -474,55 +593,56 @@ def kspace_first_order_1D(kgrid: kWaveGrid, match k_sim.equation_of_state: case 'lossless': # calculate p using a nonlinear adiabatic equation of state - p = c0**2 * (rhox + medium.BonA * rhox**2 / (2.0 * rho0)) + p = c0**2 * (rhox + BonA * rhox**2 / (2.0 * rho0)) case 'absorbing': # calculate p using a nonlinear absorbing equation of state - p = c0**2 * ( rhox - + medium.absorb_tau * np.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) - - medium.absorb_eta * np.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) - + medium.BonA * rhox**2 / (2.0 * rho0) ) + p = c0**2 * (rhox + + medium.absorb_tau * xp.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) + - medium.absorb_eta * xp.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) + + medium.BonA * rhox**2 / (2.0 * rho0)) case 'stokes': # calculate p using a nonlinear absorbing equation of state # assuming alpha_power = 2 p = c0**2 * (rhox + medium.absorb_tau * rho0 * duxdx - + medium.BonA * rhox**2 / (2.0 * rho0) ) + + BonA * rhox**2 / (2.0 * rho0) ) # enforce initial conditions if source.p0 is defined instead of time varying sources if t_index == 0 and k_sim.source.p0 is not None: - p0 = np.squeeze(k_sim.source.p0) + p0 = xp.squeeze(p0) # add the initial pressure to rho as a mass source p = p0 - rhox = p0 / np.squeeze(c0)**2 + rhox = p0 / xp.squeeze(c0)**2 # compute u(t = t1 - dt/2) based on u(dt/2) = -u(-dt/2) which forces u(t = t1) = 0 if not options.use_finite_difference: # calculate gradient using the k-space method on a regular grid - ux_sgx = dt * rho0_sgx_inv * np.real(scipy.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * scipy.fft.fftn(p, axes=(0,)), axes=(0,) )) / 2.0 - p_k = scipy.fft.fftn(p, axes=(0,)) - p_k = np.squeeze(p_k) + ux_sgx = dt * rho0_sgx_inv * xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_pos * kappa * xp.fft.fftn(p, axes=(0,)), axes=(0,) )) / 2.0 + p_k = xp.fft.fftn(p, axes=(0,)) + p_k = xp.squeeze(p_k) else: match options.use_finite_difference: case 2: # calculate gradient using second-order accurate finite difference scheme (including half step forward) - dpdx = (np.append(p[1:], 0.0) - p) / kgrid.dx + dpdx = (xp.append(p[1:], 0.0) - p) / kgrid.dx ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 case 4: # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) - dpdx = (np.append(p[2:], [0, 0]) - 27.0 * np.append(p[1:], 0) + 27.0 * p - np.append(0, p[:-1])) / (24.0 * kgrid.dx) + dpdx = (xp.append(p[2:], [0, 0]) - 27.0 * xp.append(p[1:], 0) + 27.0 * p - xp.append(0, p[:-1])) / (24.0 * kgrid.dx) ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 else: # precompute fft of p here so p can be modified for visualisation - p_k = scipy.fft.fftn(p, axes=(0,)) + p_k = xp.fft.fftn(p, axes=(0,)) + # extract required sensor data from the pressure and particle velocity # fields if the number of time steps elapsed is greater than # sensor.record_start_index (defaults to 1) @@ -530,32 +650,14 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # update index for data storage file_index: int = t_index - sensor.record_start_index - - # run sub-function to extract the required data - extract_options = dotdict({'record_u_non_staggered': k_sim.record.u_non_staggered, - 'record_u_split_field': k_sim.record.u_split_field, - 'record_I': k_sim.record.I, - 'record_I_avg': k_sim.record.I_avg, - 'binary_sensor_mask': k_sim.binary_sensor_mask, - 'record_p': k_sim.record.p, - 'record_p_max': k_sim.record.p_max, - 'record_p_min': k_sim.record.p_min, - 'record_p_rms': k_sim.record.p_rms, - 'record_p_max_all': k_sim.record.p_max_all, - 'record_p_min_all': k_sim.record.p_min_all, - 'record_u': k_sim.record.u, - 'record_u_max': k_sim.record.u_max, - 'record_u_min': k_sim.record.u_min, - 'record_u_rms': k_sim.record.u_rms, - 'record_u_max_all': k_sim.record.u_max_all, - 'record_u_min_all': k_sim.record.u_min_all, - 'compute_directivity': False}) # run sub-function to extract the required data from the acoustic variables - sensor_data = extract_sensor_data(kgrid.dim, sensor_data, file_index, k_sim.sensor_mask_index, + sensor_data = extract_sensor_data(kdim, xp, sensor_data, file_index, sensor_mask_index, extract_options, record, p, ux_sgx) + if using_gpu: + sensor_data = dotdict_to_cpu(sensor_data) return sensor_data From cdff8f986f25cb151e6fc156e37b924bc0c9a3ef Mon Sep 17 00:00:00 2001 From: David Sinden Date: Fri, 9 Jan 2026 12:52:16 +0100 Subject: [PATCH 22/29] ruff fixes --- examples/ivp_1D_simulation/ivp_1D_simulation.ipynb | 4 +++- examples/ivp_1D_simulation/ivp_1D_simulation.py | 1 - kwave/kWaveSimulation_helper/extract_sensor_data.py | 1 + kwave/kspaceFirstOrder1D.py | 2 +- 4 files changed, 5 insertions(+), 3 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index 8121ea73..df8ed299 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -47,6 +47,7 @@ "from kwave.ksensor import kSensor\n", "from kwave.ksource import kSource\n", "from kwave.kspaceFirstOrder1D import kspace_first_order_1D\n", + "from kwave.options.simulation_execution_options import SimulationExecutionOptions\n", "from kwave.options.simulation_options import SimulationOptions" ] }, @@ -152,9 +153,10 @@ "source": [ "# define the simulation options\n", "simulation_options = SimulationOptions(data_cast=\"off\", save_to_disk=False)\n", + "execution_options = SimulationExecutionOptions(is_gpu_simulation=True)\n", "\n", "# run the simulation\n", - "sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options)" + "sensor_data = kspace_first_order_1D(kgrid, source, sensor, medium, simulation_options=simulation_options, execution_options=execution_options)" ] }, { diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.py b/examples/ivp_1D_simulation/ivp_1D_simulation.py index 0a8b73d8..6101979f 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.py +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.py @@ -20,7 +20,6 @@ from kwave.options.simulation_execution_options import SimulationExecutionOptions from kwave.options.simulation_options import SimulationOptions - # ========================================================================= # SIMULATION # ========================================================================= diff --git a/kwave/kWaveSimulation_helper/extract_sensor_data.py b/kwave/kWaveSimulation_helper/extract_sensor_data.py index 15e5cb7e..476d2958 100644 --- a/kwave/kWaveSimulation_helper/extract_sensor_data.py +++ b/kwave/kWaveSimulation_helper/extract_sensor_data.py @@ -1,4 +1,5 @@ import numpy as np + try: import cupy as cp except ImportError: diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index b0d251af..80be1bd6 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -20,8 +20,8 @@ from kwave.ktransducer import NotATransducer from kwave.kWaveSimulation import kWaveSimulation from kwave.kWaveSimulation_helper import extract_sensor_data -from kwave.options.simulation_options import SimulationOptions from kwave.options.simulation_execution_options import SimulationExecutionOptions +from kwave.options.simulation_options import SimulationOptions from kwave.utils.data import scale_time from kwave.utils.dotdictionary import dotdict from kwave.utils.math import sinc From 924457a100cf3d8b673bc3e48bd0a05fb6e2c054 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Fri, 9 Jan 2026 13:50:00 +0100 Subject: [PATCH 23/29] minor optimizations and add readme to example. --- examples/ivp_1D_simulation/README.md | 7 +++++++ kwave/kspaceFirstOrder1D.py | 13 ++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) create mode 100644 examples/ivp_1D_simulation/README.md diff --git a/examples/ivp_1D_simulation/README.md b/examples/ivp_1D_simulation/README.md new file mode 100644 index 00000000..cf6be017 --- /dev/null +++ b/examples/ivp_1D_simulation/README.md @@ -0,0 +1,7 @@ +Simulations In One Dimension Example + + [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/waltsims/k-wave-python/blob/HEAD/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb) + +This example provides a simple demonstration for the simulation and detection of the pressure field generated by an initial pressure distribution within a one-dimensional heterogeneous propagation medium. + +To read more, visit the [original example page](http://www.k-wave.org/documentation/example_ivp_1D_simulation.php). \ No newline at end of file diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 80be1bd6..b76959e8 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -4,8 +4,10 @@ try: import cupy as cp + from cupy import fft as cp_fft except ImportError: cp = None + cp_fft = None import importlib @@ -460,6 +462,11 @@ def kspace_first_order_1D(kgrid: kWaveGrid, record = to_gpu(record) sensor_data = dotdict_to_gpu(sensor_data) + if k_sim.source_ux is not False: + source_mat = xp.zeros((Nx,), dtype=my_type) + if k_sim.source_p is not False: + source_mat = xp.zeros((Nx,), dtype=my_type) + # ========================================================================= # LOOP THROUGH TIME STEPS # ========================================================================= @@ -471,7 +478,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, print('\tstarting time loop...') # start time loop - for t_index in tqdm(xp.arange(index_start, index_end, index_step, dtype=int)): + for t_index in tqdm(range(index_start, index_end, index_step)): # calculate ux at the next time step using dp/dx at the current time step if not k_sim.nonuniform_grid and not options.use_finite_difference: @@ -510,7 +517,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] case 'additive': # extract the source values into a matrix - source_mat = xp.zeros([kgrid.Nx, ]) + source_mat.fill(0) source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] # apply the k-space correction source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) @@ -559,7 +566,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, rhox[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] case 'additive': # extract the source values into a matrix - source_mat = xp.zeros((k_sim.kgrid.Nx,), dtype=my_type) + source_mat = source_mat.fill(0) source_mat[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] # apply the k-space correction source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) From c8804359cdf4c096b9e935221ca039776dd855af Mon Sep 17 00:00:00 2001 From: David Sinden Date: Sun, 11 Jan 2026 23:29:59 +0100 Subject: [PATCH 24/29] Fix sensor data extraction and k-space method issues Corrects the use of np.minimum for p_min in extract_sensor_data, adds missing uz_min assignment for 3D, and fixes several issues in kspaceFirstOrder1D: uses xp.fft instead of scipy.fft for compatibility, corrects source_mat zeroing, and fixes a conditional for source_ux. Also improves logging in getWin_test.py and clarifies units in ivp_1D_simulation.py. Adds dimension check for ky_norm calculation in create_storage_variables. --- examples/ivp_1D_simulation/ivp_1D_simulation.py | 2 +- .../create_storage_variables.py | 7 ++++--- .../kWaveSimulation_helper/extract_sensor_data.py | 3 ++- kwave/kspaceFirstOrder1D.py | 15 ++++++++------- .../python_testers/getWin_test.py | 3 ++- 5 files changed, 17 insertions(+), 13 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.py b/examples/ivp_1D_simulation/ivp_1D_simulation.py index 6101979f..33f0401b 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.py +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.py @@ -60,7 +60,7 @@ sensor.record = ["p"] # this hack is needed to ensure that the sensor is in [1,2] dimensions -mask = np.array([-10e-3, 10e-3]) # [mm] +mask = np.array([-10e-3, 10e-3]) # [m] mask = mask[:, np.newaxis].T sensor.mask = mask diff --git a/kwave/kWaveSimulation_helper/create_storage_variables.py b/kwave/kWaveSimulation_helper/create_storage_variables.py index 275ff961..85511019 100644 --- a/kwave/kWaveSimulation_helper/create_storage_variables.py +++ b/kwave/kWaveSimulation_helper/create_storage_variables.py @@ -247,9 +247,10 @@ def create_normalized_wavenumber_vectors(record: Recorder, kgrid: kWaveGrid, is_ record.kx_norm = ifftshift(record.kx_norm) # y-dimension - record.ky_norm = kgrid.ky / kgrid.k - record.ky_norm[kgrid.k == 0] = 0 - record.ky_norm = ifftshift(record.ky_norm) + if kgrid.dim >= 2: + record.ky_norm = kgrid.ky / kgrid.k + record.ky_norm[kgrid.k == 0] = 0 + record.ky_norm = ifftshift(record.ky_norm) # z-dimension if kgrid.dim == 3: diff --git a/kwave/kWaveSimulation_helper/extract_sensor_data.py b/kwave/kWaveSimulation_helper/extract_sensor_data.py index 476d2958..37733b62 100644 --- a/kwave/kWaveSimulation_helper/extract_sensor_data.py +++ b/kwave/kWaveSimulation_helper/extract_sensor_data.py @@ -301,7 +301,7 @@ def extract_sensor_data(dim: int, xp, sensor_data, file_index, sensor_mask_index if file_index == 0: sensor_data[cuboid_index].p_min = result else: - sensor_data[cuboid_index].p_min = np.maximum(sensor_data[cuboid_index].p_min, result) + sensor_data[cuboid_index].p_min = np.minimum(sensor_data[cuboid_index].p_min, result) # store the rms acoustic pressure if flags.record_p_rms: @@ -543,6 +543,7 @@ def extract_sensor_data(dim: int, xp, sensor_data, file_index, sensor_mask_index elif (dim == 3): sensor_data.ux_min = np.minimum(sensor_data.ux_min, np.sum(ux_sgx[record.tri] * record.bc, axis=1)) sensor_data.uy_min = np.minimum(sensor_data.uy_min, np.sum(uy_sgy[record.tri] * record.bc, axis=1)) + sensor_data.uz_min = np.minimum(sensor_data.uz_min, np.sum(uz_sgz[record.tri] * record.bc, axis=1)) else: raise RuntimeError("Wrong dimensions") diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index b76959e8..b2c77038 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -354,6 +354,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, if options.use_kspace: kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * kgrid.dt / 2.0)) kappa = np.squeeze(kappa) + source_kappa = 1.0 if (hasattr(options, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ (hasattr(options, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): source_kappa = scipy.fft.ifftshift(xp.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) @@ -505,12 +506,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # calculate gradient using the k-space method on a non-uniform grid # via the mapped pseudospectral method ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - - dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * xp.real(scipy.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) + dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * xp.real(xp.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) # add in the velocity source term if (k_sim.source_ux is not False and t_index < xp.shape(source.ux)[1]): - if options.source_ux >= t_index: + if k_sim.source_ux >= t_index: match source.u_mode: case 'dirichlet': # enforce the source values as a dirichlet boundary condition @@ -566,7 +567,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, rhox[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] case 'additive': # extract the source values into a matrix - source_mat = source_mat.fill(0) + source_mat.fill(0) source_mat[k_sim.p_source_pos_index] = k_sim.source.p[k_sim.p_source_sig_index, t_index] # apply the k-space correction source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) @@ -588,8 +589,8 @@ def kspace_first_order_1D(kgrid: kWaveGrid, case 'absorbing': # calculate p using a linear absorbing equation of state p = xp.squeeze(c0**2 * (rhox - + medium.absorb_tau * xp.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) - - medium.absorb_eta * xp.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) ) ) + + medium.absorb_tau * xp.real(xp.fft.ifftn(medium.absorb_nabla1 * xp.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,) )) + - medium.absorb_eta * xp.real(xp.fft.ifftn(medium.absorb_nabla2 * xp.fft.fftn(rhox, axes=(0,)), axes=(0,))) ) ) case 'stokes': # calculate p using a linear absorbing equation of state @@ -605,8 +606,8 @@ def kspace_first_order_1D(kgrid: kWaveGrid, case 'absorbing': # calculate p using a nonlinear absorbing equation of state p = c0**2 * (rhox - + medium.absorb_tau * xp.real(scipy.fft.ifftn(medium.absorb_nabla1 * scipy.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) - - medium.absorb_eta * xp.real(scipy.fft.ifftn(medium.absorb_nabla2 * scipy.fft.fftn(rhox, axes=(0,)), axes=(0,))) + + medium.absorb_tau * xp.real(xp.fft.ifftn(medium.absorb_nabla1 * xp.fft.fftn(rho0 * duxdx, axes=(0,)), axes=(0,))) + - medium.absorb_eta * xp.real(xp.fft.ifftn(medium.absorb_nabla2 * xp.fft.fftn(rhox, axes=(0,)), axes=(0,))) + medium.BonA * rhox**2 / (2.0 * rho0)) case 'stokes': diff --git a/tests/matlab_test_data_collectors/python_testers/getWin_test.py b/tests/matlab_test_data_collectors/python_testers/getWin_test.py index 2a0be0cf..3ab74f42 100644 --- a/tests/matlab_test_data_collectors/python_testers/getWin_test.py +++ b/tests/matlab_test_data_collectors/python_testers/getWin_test.py @@ -31,7 +31,8 @@ def test_get_win(): N = np.squeeze(N) - logging.log(logging.INFO, N, type_, param, rotation, symmetric, square) + logging.log(logging.INFO, "N=%s, type_=%s, param=%s, rotation=%s, symmetric=%s, square=%s", + N, type_, param, rotation, symmetric, square) if (np.isscalar(N) and N > 1) or (isinstance(N, np.ndarray) and N.all() > 1): win_py, cg_py = get_win(N, type_, param=param, rotation=rotation, symmetric=symmetric, square=square) From ef39cb8a2f27db7aeed298b250c9af4b3e0f8f4b Mon Sep 17 00:00:00 2001 From: David Sinden Date: Mon, 12 Jan 2026 12:11:02 +0100 Subject: [PATCH 25/29] some fixes --- .../ivp_1D_simulation/ivp_1D_simulation.ipynb | 6 +- kwave/kWaveSimulation.py | 59 +++++++++++++------ kwave/kspaceFirstOrder1D.py | 16 +++-- .../python_testers/getWin_test.py | 2 +- 4 files changed, 54 insertions(+), 29 deletions(-) diff --git a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb index df8ed299..37569b11 100644 --- a/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb +++ b/examples/ivp_1D_simulation/ivp_1D_simulation.ipynb @@ -6,7 +6,7 @@ "id": "RQYRmOfG27FV" }, "source": [ - "First install the pacakage using the latests version." + "First install the package using the latest version." ] }, { @@ -27,7 +27,7 @@ "id": "7lktiAv5XtsS" }, "source": [ - "Now import dependencies, and party of library which are required." + "Now import dependencies, and parts of library which are required." ] }, { @@ -129,7 +129,7 @@ "sensor.record = [\"p\"]\n", "\n", "# this hack is needed to ensure that the sensor is in [1,2] dimensions\n", - "mask = np.array([-10e-3, 10e-3]) # [mm]\n", + "mask = np.array([-10e-3, 10e-3]) # [m]\n", "mask = mask[:, np.newaxis].T\n", "sensor.mask = mask" ] diff --git a/kwave/kWaveSimulation.py b/kwave/kWaveSimulation.py index 013cb7ad..1f0a201b 100644 --- a/kwave/kWaveSimulation.py +++ b/kwave/kWaveSimulation.py @@ -256,7 +256,6 @@ def source_p0(self): # initial pressure """ Returns: Whether initial pressure source is present (default=False) - """ flag = False # default if not isinstance(self.source, NotATransducer) and self.source.p0 is not None: @@ -264,22 +263,22 @@ def source_p0(self): # initial pressure flag = True return flag + @property def source_p0_elastic(self): # initial pressure in the elastic code """ Returns: Whether initial pressure source is present in the elastic code (default=False) - """ # Not clear where this flag is set return False + @property def source_p(self): """ Returns: Whether time-varying pressure source is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.p is not None: @@ -288,12 +287,12 @@ def source_p(self): flag = len(self.source.p[0]) return flag + @property - def source_p_labelled(self): # time-varying pressure with labelled source mask + def source_p_labelled(self) -> bool: # time-varying pressure with labelled source mask """ Returns: True/False if labelled/binary source mask, respectively (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.p is not None: @@ -302,12 +301,12 @@ def source_p_labelled(self): # time-varying pressure with labelled source mask flag = not (p_unique.size <= 2 and p_unique.sum() == 1) return flag + @property - def source_ux(self) -> bool: + def source_ux(self): """ Returns: Whether time-varying particle velocity source is used in X-direction - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.ux is not None: @@ -316,12 +315,12 @@ def source_ux(self) -> bool: flag = len(self.source.ux[0]) return flag + @property - def source_uy(self) -> bool: + def source_uy(self): """ Returns: Whether time-varying particle velocity source is used in Y-direction - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.uy is not None: @@ -330,12 +329,12 @@ def source_uy(self) -> bool: flag = len(self.source.uy[0]) return flag + @property - def source_uz(self) -> bool: + def source_uz(self): """ Returns: Whether time-varying particle velocity source is used in Z-direction - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.uz is not None: @@ -344,12 +343,12 @@ def source_uz(self) -> bool: flag = len(self.source.uz[0]) return flag + @property def source_u_labelled(self): """ Returns: Whether time-varying velocity source with labelled source mask is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.u_mask is not None: @@ -363,6 +362,7 @@ def source_u_labelled(self): flag = True return flag + @property def source_sxx(self): """ @@ -374,72 +374,72 @@ def source_sxx(self): flag = len(self.source.sxx[0]) return flag + @property def source_syy(self): """ Returns: Whether time-varying stress source in Y->Y direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.syy is not None: flag = len(self.source.syy[0]) return flag + @property def source_szz(self): """ Returns: Whether time-varying stress source in Z->Z direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.szz is not None: flag = len(self.source.szz[0]) return flag + @property def source_sxy(self): """ Returns: Whether time-varying stress source in X->Y direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.sxy is not None: flag = len(self.source.sxy[0]) return flag + @property def source_sxz(self): """ Returns: Whether time-varying stress source in X->Z direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.sxz is not None: flag = len(self.source.sxz[0]) return flag + @property def source_syz(self): """ Returns: Whether time-varying stress source in Y->Z direction is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.syz is not None: flag = len(self.source.syz[0]) return flag + @property def source_s_labelled(self): """ Returns: Whether time-varying stress source with labelled source mask is present (default=False) - """ flag = False if not isinstance(self.source, NotATransducer) and self.source.s_mask is not None: @@ -453,6 +453,7 @@ def source_s_labelled(self): flag = True return flag + @property def use_w_source_correction_p(self): """ @@ -465,6 +466,7 @@ def use_w_source_correction_p(self): flag = True return flag + @property def use_w_source_correction_u(self): """ @@ -478,6 +480,7 @@ def use_w_source_correction_u(self): flag = True return flag + def input_checking(self, calling_func_name) -> None: """ Check the input fields for correctness and validness @@ -598,7 +601,7 @@ def input_checking(self, calling_func_name) -> None: values, flags, self.record) - + # create perfectly matched layer indices indices self.create_pml_indices( kgrid_dim=self.kgrid.dim, kgrid_N=Vector(self.kgrid.N), @@ -607,6 +610,7 @@ def input_checking(self, calling_func_name) -> None: is_axisymmetric=opt.simulation_type.is_axisymmetric(), ) + @staticmethod def check_calling_func_name_and_dim(calling_func_name, kgrid_dim) -> None: """ @@ -628,6 +632,7 @@ def check_calling_func_name_and_dim(calling_func_name, kgrid_dim) -> None: elif calling_func_name in ["kspaceFirstOrder3D", "pstdElastic3D", "kspaceElastic3D"]: assert kgrid_dim == 3, f"kgrid has the wrong dimensionality for {calling_func_name}." + @staticmethod def print_start_status(is_elastic_code: bool) -> None: """ @@ -645,6 +650,7 @@ def print_start_status(is_elastic_code: bool) -> None: logging.log(logging.INFO, "Running k-Wave simulation...") logging.log(logging.INFO, f" start time: {get_date_string()}") + def set_index_data_type(self) -> None: """ Pre-calculate the data type needed to store the matrix indices given the @@ -656,6 +662,7 @@ def set_index_data_type(self) -> None: total_grid_points = self.kgrid.total_grid_points self.index_data_type = get_smallest_possible_type(total_grid_points, "uint", default="double") + @staticmethod def check_medium(medium, kgrid_k, simulation_type: SimulationType) -> bool: """ @@ -688,6 +695,7 @@ def check_medium(medium, kgrid_k, simulation_type: SimulationType) -> bool: medium.check_fields(kgrid_k.shape) return user_medium_density_input + def check_sensor(self, kgrid_dim) -> None: """ Check the Sensor properties for correctness and validity @@ -903,6 +911,7 @@ def check_sensor(self, kgrid_dim) -> None: if kgrid_dim == 2 and self.use_sensor and self.compute_directivity and self.time_rev: logging.log(logging.WARN, "sensor directivity fields are not used for time reversal.") + def check_source(self, k_dim, k_Nt) -> None: """ Check the source properties for correctness and validity @@ -1086,6 +1095,7 @@ def check_source(self, k_dim, k_Nt) -> None: # clean up unused variables del active_elements_mask + def check_kgrid_time(self) -> None: """ Check time-related kWaveGrid inputs @@ -1128,6 +1138,7 @@ def check_kgrid_time(self) -> None: if self.kgrid.dt > dt_stability_limit: logging.log(logging.WARN, " time step may be too large for a stable simulation.") + @staticmethod def select_precision(opt: SimulationOptions): """ @@ -1161,6 +1172,7 @@ def select_precision(opt: SimulationOptions): raise ValueError("'Unknown ''DataCast'' option'") return precision + def check_input_combinations(self, opt: SimulationOptions, user_medium_density_input: bool, k_dim, pml_size, kgrid_N) -> None: """ Check the input combinations for correctness and validity @@ -1270,6 +1282,7 @@ def check_input_combinations(self, opt: SimulationOptions, user_medium_density_i if k.endswith("_DEF"): delattr(self, k) + def smooth_and_enlarge(self, source, k_dim, kgrid_N, opt: SimulationOptions) -> None: """ Smooth and enlarge grids @@ -1392,6 +1405,7 @@ def smooth_and_enlarge(self, source, k_dim, kgrid_N, opt: SimulationOptions) -> logging.log(logging.INFO, "smoothing density distribution...") self.medium.density = smooth(self.medium.density) + def create_sensor_variables(self) -> None: """ Create the sensor related variables @@ -1449,6 +1463,7 @@ def create_sensor_variables(self) -> None: # set the sensor mask index variable to be empty self.sensor_mask_index = [] + def create_absorption_vars(self) -> None: """ Create absorption variables for the fluid code based on @@ -1462,6 +1477,7 @@ def create_absorption_vars(self) -> None: self.kgrid, self.medium, self.equation_of_state ) + def assign_pseudonyms(self, medium: kWaveMedium, kgrid: kWaveGrid) -> None: """ Shorten commonly used field names (these act only as pointers provided that the values aren't modified) @@ -1478,6 +1494,7 @@ def assign_pseudonyms(self, medium: kWaveMedium, kgrid: kWaveGrid) -> None: self.rho0 = medium.density self.c0 = medium.sound_speed + def scale_source_terms(self, is_scale_source_terms) -> None: """ Scale the source terms based on the expanded and smoothed values of the medium parameters @@ -1535,6 +1552,7 @@ def scale_source_terms(self, is_scale_source_terms) -> None: ), ) + def create_pml_indices(self, kgrid_dim, kgrid_N: Vector, pml_size, pml_inside, is_axisymmetric): """ Define index variables to remove the PML from the display if the optional @@ -1575,10 +1593,12 @@ def create_pml_indices(self, kgrid_dim, kgrid_N: Vector, pml_size, pml_inside, i # self.record = Recorder() self.record.set_index_variables(self.kgrid, pml_size, pml_inside, is_axisymmetric) + @deprecated(version="0.4.1", reason="Use TimeReversal class instead") def check_time_reversal(self) -> bool: return self.time_rev + def _is_binary_sensor_mask(self, kgrid_dim: int) -> bool: """ Check if the sensor mask is a binary grid matching the kgrid dimensions. @@ -1617,6 +1637,7 @@ def _is_binary_sensor_mask(self, kgrid_dim: int) -> bool: return mask_shape == expected_shape + def _is_cuboid_corners_mask(self, kgrid_dim: int) -> bool: """ Check if the sensor mask is defined as cuboid corners. diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index b2c77038..1bb59ffd 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -32,14 +32,15 @@ def get_array_module(verbose: bool = False): - """Get the appropriate array module (numpy or cupy) based on GPU availability - if cupy is available and a GPU is present, cupy is used.""" + """Get the appropriate array module (numpy or cupy) based on GPU + availability - if cupy is available and a GPU is present, cupy is used.""" # check if cupy is installed cupy_spec = importlib.util.find_spec("cupy") if verbose: print(cupy_spec) if cupy_spec is None: - # if not, use numpy + # if cupy not installed, use numpy return np, False # Try to import cupy and check for a GPU @@ -52,7 +53,7 @@ def get_array_module(verbose: bool = False): except Exception: pass - # Fallback to numpy + # Fallback (cupy but no gpu) to numpy return np, False @@ -78,6 +79,9 @@ def dotdict_to_gpu(obj, verbose: bool = False): def dotdict_to_cpu(obj, verbose: bool = False): """Convert all numpy arrays in the given dotdict to numpy arrays.""" + # case in which gpu is present, but cupy is not used + if cp is None: + return obj for name, val in obj.items(): if isinstance(val, cp.ndarray): if verbose: @@ -355,9 +359,9 @@ def kspace_first_order_1D(kgrid: kWaveGrid, kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * kgrid.dt / 2.0)) kappa = np.squeeze(kappa) source_kappa = 1.0 - if (hasattr(options, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ - (hasattr(options, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): - source_kappa = scipy.fft.ifftshift(xp.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) + if (hasattr(k_sim, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ + (hasattr(k_sim, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): + source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) else: kappa = 1.0 source_kappa = 1.0 diff --git a/tests/matlab_test_data_collectors/python_testers/getWin_test.py b/tests/matlab_test_data_collectors/python_testers/getWin_test.py index 3ab74f42..4396727d 100644 --- a/tests/matlab_test_data_collectors/python_testers/getWin_test.py +++ b/tests/matlab_test_data_collectors/python_testers/getWin_test.py @@ -34,7 +34,7 @@ def test_get_win(): logging.log(logging.INFO, "N=%s, type_=%s, param=%s, rotation=%s, symmetric=%s, square=%s", N, type_, param, rotation, symmetric, square) - if (np.isscalar(N) and N > 1) or (isinstance(N, np.ndarray) and N.all() > 1): + if (np.isscalar(N) and N > 1) or (isinstance(N, np.ndarray) and (N > 1).all()): win_py, cg_py = get_win(N, type_, param=param, rotation=rotation, symmetric=symmetric, square=square) cg = reader.expected_value_of("cg") From c4fe6c2884541879bf47e93a93e15108c4b98fee Mon Sep 17 00:00:00 2001 From: David Sinden Date: Mon, 12 Jan 2026 13:01:40 +0100 Subject: [PATCH 26/29] Update kspaceFirstOrder1D.py --- kwave/kspaceFirstOrder1D.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 1bb59ffd..a821ca86 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -1,7 +1,5 @@ from typing import Union -from kwave.options.simulation_execution_options import SimulationExecutionOptions - try: import cupy as cp from cupy import fft as cp_fft @@ -50,7 +48,7 @@ def get_array_module(verbose: bool = False): ngpus = cp.cuda.runtime.getDeviceCount() if ngpus > 0: return cp, True - except Exception: + except Exception(ImportError, RuntimeError, cp.cuda.runtime.CUDARuntimeError): pass # Fallback (cupy but no gpu) to numpy @@ -73,7 +71,7 @@ def dotdict_to_gpu(obj, verbose: bool = False): if isinstance(val, np.ndarray): if verbose: print(f"Converting {name} to cupy array") - setattr(obj, name, cp.asarray(val)) + obj[name] = cp.asarray(val) return obj @@ -86,7 +84,7 @@ def dotdict_to_cpu(obj, verbose: bool = False): if isinstance(val, cp.ndarray): if verbose: print(f"Converting {name} to numpy array") - setattr(obj, name, val.get()) + obj[name] = val.get() return obj @@ -514,7 +512,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # add in the velocity source term - if (k_sim.source_ux is not False and t_index < xp.shape(source.ux)[1]): + if (k_sim.source_ux is not False and t_index < source.ux.shape[1]): if k_sim.source_ux >= t_index: match source.u_mode: case 'dirichlet': @@ -564,7 +562,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # add in the pre-scaled pressure source term as a mass source - if (k_sim.source_p is not False and t_index < xp.shape(k_sim.source.p)[1]): + if (k_sim.source_p is not False and t_index < k_sim.source.p.shape[1]): match k_sim.source.p_mode: case 'dirichlet': # enforce source values as a dirichlet boundary condition From 56173552db0f30004448582fbfdaeac1186b1e97 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Mon, 12 Jan 2026 13:55:53 +0100 Subject: [PATCH 27/29] more fixes --- kwave/kspaceFirstOrder1D.py | 39 ++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index a821ca86..0831fb77 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -48,7 +48,7 @@ def get_array_module(verbose: bool = False): ngpus = cp.cuda.runtime.getDeviceCount() if ngpus > 0: return cp, True - except Exception(ImportError, RuntimeError, cp.cuda.runtime.CUDARuntimeError): + except (ImportError, RuntimeError, AttributeError): pass # Fallback (cupy but no gpu) to numpy @@ -335,8 +335,8 @@ def kspace_first_order_1D(kgrid: kWaveGrid, c0 = np.asarray(medium.sound_speed) # get the PML operators based on the reference sound speed and PML settings - pml_x = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, False, 0) - pml_x_sgx = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, True, 0) + pml_x = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, False, 1) + pml_x_sgx = get_pml(Nx, dx, dt, c_ref, pml_x_size, pml_x_alpha, True, 1) pml_x = np.squeeze(pml_x) pml_x_sgx = np.squeeze(pml_x_sgx) @@ -375,7 +375,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, index_end: int = Nt # These should be zero indexed - if hasattr(k_sim, 's_source_sig_index') and k_sim.s_source_pos_index is not None: + if hasattr(k_sim, 's_source_pos_index') and k_sim.s_source_pos_index is not None: k_sim.s_source_pos_index = np.squeeze(k_sim.s_source_pos_index) - int(1) if hasattr(k_sim, 'u_source_pos_index') and k_sim.u_source_pos_index is not None: @@ -513,22 +513,21 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # add in the velocity source term if (k_sim.source_ux is not False and t_index < source.ux.shape[1]): - if k_sim.source_ux >= t_index: - match source.u_mode: - case 'dirichlet': - # enforce the source values as a dirichlet boundary condition - ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] - case 'additive': - # extract the source values into a matrix - source_mat.fill(0) - source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] - # apply the k-space correction - source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) - # add the source values to the existing field values including the k-space correction - ux_sgx = ux_sgx + source_mat - case 'additive-no-correction': - # add the source values to the existing field values - ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + source.ux[k_sim.u_source_sig_index, t_index] + match source.u_mode: + case 'dirichlet': + # enforce the source values as a dirichlet boundary condition + ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + case 'additive': + # extract the source values into a matrix + source_mat.fill(0) + source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + # apply the k-space correction + source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) + # add the source values to the existing field values including the k-space correction + ux_sgx = ux_sgx + source_mat + case 'additive-no-correction': + # add the source values to the existing field values + ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + source.ux[k_sim.u_source_sig_index, t_index] # calculate du/dx at the next time step From a6e9bd7cbbb79230100470e700e0306448f630a4 Mon Sep 17 00:00:00 2001 From: David Sinden Date: Mon, 12 Jan 2026 15:09:44 +0100 Subject: [PATCH 28/29] suggested fixes --- kwave/kspaceFirstOrder1D.py | 52 +++++++++++++++++++++++++------------ 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index 0831fb77..d150e0e3 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -1,4 +1,5 @@ from typing import Union +import warnings try: import cupy as cp @@ -11,6 +12,7 @@ import numpy as np import scipy.fft +from scipy.interpolate import interp1d from tqdm import tqdm from kwave.kgrid import kWaveGrid @@ -243,7 +245,10 @@ def kspace_first_order_1D(kgrid: kWaveGrid, if execution_options.is_gpu_simulation: xp, using_gpu = get_array_module() - print(f"Using {'cupy (GPU)' if using_gpu else 'numpy (CPU)'}") + if not using_gpu: + warnings.warn("GPU simulation requested but no GPU available. Falling back to CPU (NumPy).") + else: + print("Using cupy (GPU)") else: xp = np using_gpu = False @@ -301,11 +306,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, points = np.squeeze(k_sim.kgrid.x_vec) # rho0 is heterogeneous and staggered grids are used - rho0_sgx = np.interp(points + k_sim.kgrid.dx / 2.0, points, np.squeeze(k_sim.rho0)) + f = interp1d(points, rho0, bounds_error=False, fill_value=np.nan) + rho0_sgx = f(points + k_sim.kgrid.dx / 2.0) # set values outside of the interpolation range to original values - rho0_sgx[np.isnan(rho0_sgx)] = np.squeeze(k_sim.rho0)[np.isnan(rho0_sgx)] - + rho0_sgx[np.isnan(rho0_sgx)] = rho0[np.isnan(rho0_sgx)] + else: # rho0 is homogeneous or staggered grids are not used rho0_sgx = k_sim.rho0 @@ -354,12 +360,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # create k-space operator (the option options.use_kspace exists for debugging) if options.use_kspace: - kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * kgrid.dt / 2.0)) + kappa = scipy.fft.ifftshift(sinc(c_ref * kgrid.k * dt / 2.0)) kappa = np.squeeze(kappa) source_kappa = 1.0 - if (hasattr(k_sim, 'source_p') and hasattr(k_sim.source, 'p_mode')) and (k_sim.source.p_mode == 'additive') or \ - (hasattr(k_sim, 'source_ux') and hasattr(k_sim.source, 'u_mode')) and (k_sim.source.u_mode == 'additive'): - source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * kgrid.dt / 2.0)) + if (hasattr(k_sim, 'source_p') and getattr(k_sim.source, 'p_mode', None) == 'additive') or \ + (hasattr(k_sim, 'source_ux') and getattr(k_sim.source, 'u_mode', None) == 'additive'): + source_kappa = scipy.fft.ifftshift(np.cos (c_ref * kgrid.k * dt / 2.0)) else: kappa = 1.0 source_kappa = 1.0 @@ -445,6 +451,18 @@ def kspace_first_order_1D(kgrid: kWaveGrid, BonA = xp.asarray(xp.squeeze(k_sim.medium.BonA), dtype=my_type) rho0_sgx_inv = xp.asarray(rho0_sgx_inv, dtype=my_type) + if k_sim.equation_of_state == 'lossless': + pass + elif k_sim.equation_of_state == 'absorbing': + medium.medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_tau), dtype=my_type) + medium.medium.absorb_eta = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_eta), dtype=my_type) + medium.medium.absorb_nabla1 = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_nabla1), dtype=my_type) + medium.medium.absorb_nabla2 = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_nabla2), dtype=my_type) + elif k_sim.equation_of_state == 'stokes': + medium.medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_tau), dtype=my_type) + else: + raise ValueError("Unknown equation_of_state: " + str(k_sim.equation_of_state)) + pml_x = xp.asarray(pml_x, dtype=my_complex_type) pml_x_sgx = xp.asarray(pml_x_sgx, dtype=my_type) @@ -512,22 +530,22 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # add in the velocity source term - if (k_sim.source_ux is not False and t_index < source.ux.shape[1]): - match source.u_mode: + if (k_sim.source_ux is not False and t_index < k_sim.source.ux.shape[1]): + match k_sim.source.u_mode: case 'dirichlet': # enforce the source values as a dirichlet boundary condition - ux_sgx[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + ux_sgx[k_sim.u_source_pos_index] = k_sim.source.ux[k_sim.u_source_sig_index, t_index] case 'additive': # extract the source values into a matrix source_mat.fill(0) - source_mat[k_sim.u_source_pos_index] = source.ux[k_sim.u_source_sig_index, t_index] + source_mat[k_sim.u_source_pos_index] = k_sim.source.ux[k_sim.u_source_sig_index, t_index] # apply the k-space correction source_mat = xp.real(xp.fft.ifft(source_kappa * xp.fft.fft(source_mat))) # add the source values to the existing field values including the k-space correction ux_sgx = ux_sgx + source_mat case 'additive-no-correction': # add the source values to the existing field values - ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + source.ux[k_sim.u_source_sig_index, t_index] + ux_sgx[k_sim.u_source_pos_index] = ux_sgx[k_sim.u_source_pos_index] + k_sim.source.ux[k_sim.u_source_sig_index, t_index] # calculate du/dx at the next time step @@ -577,7 +595,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, rhox = rhox + source_mat case 'additive-no-correction': # add the source values to the existing field values - rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + source.p[k_sim.p_source_sig_index, t_index] + rhox[k_sim.p_source_pos_index] = rhox[k_sim.p_source_pos_index] + k_sim.source.p[k_sim.p_source_sig_index, t_index] # equation of state @@ -618,7 +636,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, + medium.absorb_tau * rho0 * duxdx + BonA * rhox**2 / (2.0 * rho0) ) - # enforce initial conditions if source.p0 is defined instead of time varying sources + # enforce initial conditions if k_sim.source.p0 is defined instead of time varying sources if t_index == 0 and k_sim.source.p0 is not None: p0 = xp.squeeze(p0) @@ -639,12 +657,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, match options.use_finite_difference: case 2: # calculate gradient using second-order accurate finite difference scheme (including half step forward) - dpdx = (xp.append(p[1:], 0.0) - p) / kgrid.dx + dpdx = (xp.append(p[1:], 0.0) - p) / dx ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 case 4: # calculate gradient using fourth-order accurate finite difference scheme (including half step backward) - dpdx = (xp.append(p[2:], [0, 0]) - 27.0 * xp.append(p[1:], 0) + 27.0 * p - xp.append(0, p[:-1])) / (24.0 * kgrid.dx) + dpdx = (xp.append(p[2:], [0, 0]) - 27.0 * xp.append(p[1:], 0) + 27.0 * p - xp.append(0, p[:-1])) / (24.0 * dx) ux_sgx = dt * rho0_sgx_inv * dpdx / 2.0 else: From 4c34d4923d9ecdff96f114e28ce5aa680304287c Mon Sep 17 00:00:00 2001 From: David Sinden Date: Mon, 12 Jan 2026 16:09:51 +0100 Subject: [PATCH 29/29] suggested fixes --- kwave/kspaceFirstOrder1D.py | 52 ++++++++++++++++++++++++++----------- 1 file changed, 37 insertions(+), 15 deletions(-) diff --git a/kwave/kspaceFirstOrder1D.py b/kwave/kspaceFirstOrder1D.py index d150e0e3..b5a0106d 100644 --- a/kwave/kspaceFirstOrder1D.py +++ b/kwave/kspaceFirstOrder1D.py @@ -1,12 +1,10 @@ -from typing import Union import warnings +from typing import Union try: import cupy as cp - from cupy import fft as cp_fft except ImportError: cp = None - cp_fft = None import importlib @@ -59,6 +57,8 @@ def get_array_module(verbose: bool = False): def to_gpu(obj, verbose: bool = False): """Convert all numpy arrays in the given object to cupy arrays.""" + if cp is None: + raise RuntimeError("CuPy is not available; cannot convert to GPU arrays.") for name, val in vars(obj).items(): if isinstance(val, np.ndarray): if verbose: @@ -69,6 +69,8 @@ def to_gpu(obj, verbose: bool = False): def dotdict_to_gpu(obj, verbose: bool = False): """Convert all numpy arrays in the given dotdict to cupy arrays.""" + if cp is None: + raise RuntimeError("CuPy is not available; cannot convert to GPU arrays.") for name, val in obj.items(): if isinstance(val, np.ndarray): if verbose: @@ -78,7 +80,7 @@ def dotdict_to_gpu(obj, verbose: bool = False): def dotdict_to_cpu(obj, verbose: bool = False): - """Convert all numpy arrays in the given dotdict to numpy arrays.""" + """Convert all cupy arrays in the given dotdict to numpy arrays.""" # case in which gpu is present, but cupy is not used if cp is None: return obj @@ -260,6 +262,20 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # this will create the sensor_data dotdict k_sim.input_checking("kspaceFirstOrder1D") + # TODO: Move to input checking + # If the user passed a 1D time‐series, promote it to shape (1, Nt) + src = k_sim.source + if getattr(k_sim.source, "ux", False) is not False: + # only promote if it has fewer than 2 dims + if k_sim.source.ux.ndim < 2: + warnings.warn("A spatially and temporally varying velocity source was passed, but is the wrong shape.") + k_sim.source.ux = np.atleast_2d(k_sim.source.ux) + + if getattr(k_sim.source, "p", False) is not False: + if k_sim.source.p.ndim < 2: + warnings.warn("A spatially and temporally varying pressure source was passed, but is the wrong shape.") + k_sim.source.p = np.atleast_2d(k_sim.source.p) + # ========================================================================= # DEFINE CAST TYPE @@ -399,6 +415,9 @@ def kspace_first_order_1D(kgrid: kWaveGrid, if hasattr(k_sim, 'p_source_sig_index') and k_sim.p_source_sig_index is not None: k_sim.p_source_sig_index = np.squeeze(k_sim.p_source_sig_index) - int(1) + # make the sensor record index zero indexed + record_start_index = k_sim.sensor.record_start_index - int(1) + # ========================================================================= # PREPARE VISUALISATIONS # ========================================================================= @@ -454,12 +473,12 @@ def kspace_first_order_1D(kgrid: kWaveGrid, if k_sim.equation_of_state == 'lossless': pass elif k_sim.equation_of_state == 'absorbing': - medium.medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_tau), dtype=my_type) - medium.medium.absorb_eta = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_eta), dtype=my_type) - medium.medium.absorb_nabla1 = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_nabla1), dtype=my_type) - medium.medium.absorb_nabla2 = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_nabla2), dtype=my_type) + medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.absorb_tau), dtype=my_type) + medium.absorb_eta = xp.asarray(xp.squeeze(k_sim.medium.absorb_eta), dtype=my_type) + medium.absorb_nabla1 = xp.asarray(xp.squeeze(k_sim.medium.absorb_nabla1), dtype=my_type) + medium.absorb_nabla2 = xp.asarray(xp.squeeze(k_sim.medium.absorb_nabla2), dtype=my_type) elif k_sim.equation_of_state == 'stokes': - medium.medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.medium.absorb_tau), dtype=my_type) + medium.absorb_tau = xp.asarray(xp.squeeze(k_sim.medium.absorb_tau), dtype=my_type) else: raise ValueError("Unknown equation_of_state: " + str(k_sim.equation_of_state)) @@ -488,6 +507,10 @@ def kspace_first_order_1D(kgrid: kWaveGrid, if k_sim.source_p is not False: source_mat = xp.zeros((Nx,), dtype=my_type) + if k_sim.nonuniform_grid and not options.use_finite_difference: + dxudxn = xp.asarray(k_sim.kgrid.dxudxn, dtype=my_type) + dxudxn_sgx = xp.asarray(k_sim.kgrid.dxudxn_sgx, dtype=my_type) + # ========================================================================= # LOOP THROUGH TIME STEPS # ========================================================================= @@ -526,7 +549,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # calculate gradient using the k-space method on a non-uniform grid # via the mapped pseudospectral method ux_sgx = pml_x_sgx * (pml_x_sgx * ux_sgx - - dt * rho0_sgx_inv * k_sim.kgrid.dxudxn_sgx * xp.real(xp.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) + dt * rho0_sgx_inv * dxudxn_sgx * xp.real(xp.fft.ifft(ddx_k * ddx_k_shift_pos * kappa * p_k)) ) # add in the velocity source term @@ -566,7 +589,7 @@ def kspace_first_order_1D(kgrid: kWaveGrid, else: # calculate gradients using a non-uniform grid via the mapped # pseudospectral method - duxdx = k_sim.kgrid.dxudxn * xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * xp.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) + duxdx = dxudxn * xp.real(xp.fft.ifftn(ddx_k * ddx_k_shift_neg * kappa * xp.fft.fftn(ux_sgx, axes=(0,)), axes=(0,))) # calculate rhox at the next time step @@ -671,12 +694,11 @@ def kspace_first_order_1D(kgrid: kWaveGrid, # extract required sensor data from the pressure and particle velocity - # fields if the number of time steps elapsed is greater than - # sensor.record_start_index (defaults to 1) - if k_sim.use_sensor and (t_index >= sensor.record_start_index): + # fields if the number of time steps elapsed is greater than record_start_index + if k_sim.use_sensor and (t_index >= record_start_index): # update index for data storage - file_index: int = t_index - sensor.record_start_index + file_index: int = t_index - record_start_index # run sub-function to extract the required data from the acoustic variables sensor_data = extract_sensor_data(kdim, xp, sensor_data, file_index, sensor_mask_index,