diff --git a/sasdata/data_util/averaging.py b/sasdata/data_util/averaging.py new file mode 100644 index 00000000..f3c4d78b --- /dev/null +++ b/sasdata/data_util/averaging.py @@ -0,0 +1,669 @@ +""" +This module contains various data processors used by Sasview's slicers. +""" + + +import numpy as np + +from sasdata.data_util.binning import DirectionalAverage +from sasdata.data_util.interval import IntervalType +from sasdata.data_util.roi import CartesianROI, PolarROI +from sasdata.dataloader.data_info import Data1D, Data2D + + +class Boxsum(CartesianROI): + """ + Compute the sum of the intensity within a rectangular Region Of Interest. + """ + + def __init__(self, qx_range: tuple[float, float] = (0.0, 0.0), qy_range: tuple[float, float] = (0.0, 0.0)) -> None: + """ + Set up the Region of Interest and its boundaries. + + The units of these parameters are A^-1 + :param qx_range: Bounds of the ROI along the Q_x direction. + :param qy_range: Bounds of the ROI along the Q_y direction. + """ + super().__init__(qx_range=qx_range, + qy_range=qy_range) + + def __call__(self, data2d: Data2D = None) -> float: + """ + Coordinate data processing operations and return the results. + + :param data2d: The Data2D object for which the sum is calculated. + """ + self.validate_and_assign_data(data2d) + total_sum, error, count = self._sum() + + return total_sum, error, count + + def _sum(self) -> float: + """ + Determine which data are inside the ROI and compute their sum. + Also calculate the error on this calculation and the total number of + datapoints in the region. + """ + + # Currently the weights are binary, but could be fractional in future + interval = IntervalType.CLOSED + x_weights = interval.weights_for_interval(array=self.qx_data, + l_bound=self.qx_min, + u_bound=self.qx_max) + y_weights = interval.weights_for_interval(array=self.qy_data, + l_bound=self.qy_min, + u_bound=self.qy_max) + weights = x_weights * y_weights + + data = weights * self.data + # Not certain that the weights should be squared here, I'm just copying + # how it was done in the old manipulations.py + err_squared = weights * weights * self.err_data * self.err_data + + total_sum = np.sum(data) + total_errors_squared = np.sum(err_squared) + total_count = np.sum(weights) + + return total_sum, np.sqrt(total_errors_squared), total_count + +class Boxavg(Boxsum): + """ + Compute the average intensity within a rectangular Region Of Interest. + """ + + def __init__(self, qx_range: tuple[float, float] = (0.0, 0.0), qy_range: tuple[float, float] = (0.0, 0.0)) -> None: + """ + Set up the Region of Interest and its boundaries. + + The units of these parameters are A^-1 + :param qx_range: Bounds of the ROI along the Q_x direction. + :param qy_range: Bounds of the ROI along the Q_y direction. + """ + super().__init__(qx_range=qx_range, + qy_range=qy_range) + + def __call__(self, data2d: Data2D) -> float: + """ + Coordinate data processing operations and return the results. + + :param data2d: The Data2D object for which the average is calculated. + """ + self.validate_and_assign_data(data2d) + total_sum, error, count = super()._sum() + + return (total_sum / count), (error / count) + +class SlabX(CartesianROI): + """ + Average I(Q_x, Q_y) along the y direction (within a ROI), giving I(Q_x). + + This class is initialised by specifying the boundaries of the ROI and is + called by supplying a Data2D object. It returns a Data1D object. + The averaging process can also be thought of as projecting 2D -> 1D. + + There also exists the option to "fold" the ROI, where Q data on opposite + sides of the origin but with equal magnitudes are averaged together, + resulting in a 1D plot with only positive Q values shown. + """ + + def __init__(self, qx_range: tuple[float, float] = (0.0, 0.0), qy_range: tuple[float, float] = (0.0, 0.0), nbins: int = 100, fold: bool = False): + """ + Set up the ROI boundaries, the binning of the output 1D data, and fold. + + The units of these parameters are A^-1 + :param qx_range: Bounds of the ROI along the Q_x direction. + :param qy_range: Bounds of the ROI along the Q_y direction. + :param nbins: The number of bins data is sorted into along Q_x. + :param fold: Whether the two halves of the ROI along Q_x should be + folded together during averaging. + """ + super().__init__(qx_range=qx_range, + qy_range=qy_range) + self.nbins = nbins + self.fold = fold + + def __call__(self, data2d: Data2D = None) -> Data1D: + """ + Compute the 1D average of 2D data, projecting along the Q_x axis. + + :param data2d: The Data2D object for which the average is computed. + :return: Data1D object for plotting. + """ + self.validate_and_assign_data(data2d) + + # SlabX is used by SasView's BoxInteractorX, which is designed so that + # the ROI is always centred on the origin. If this ever changes, then + # the behaviour of fold here will also need to change. Perhaps we could + # apply a transformation to the data like the one used in WedgePhi. + + if self.fold: + major_lims = (0, self.qx_max) + self.qx_data = np.abs(self.qx_data) + else: + major_lims = (self.qx_min, self.qx_max) + minor_lims = (self.qy_min, self.qy_max) + + directional_average = DirectionalAverage(major_axis=self.qx_data, + minor_axis=self.qy_data, + lims=(major_lims,minor_lims), + nbins=self.nbins) + qx_data, intensity, error = \ + directional_average(data=self.data, err_data=self.err_data) + + return Data1D(x=qx_data, y=intensity, dy=error) + +class SlabY(CartesianROI): + """ + Average I(Q_x, Q_y) along the x direction (within a ROI), giving I(Q_y). + + This class is initialised by specifying the boundaries of the ROI and is + called by supplying a Data2D object. It returns a Data1D object. + The averaging process can also be thought of as projecting 2D -> 1D. + + There also exists the option to "fold" the ROI, where Q data on opposite + sides of the origin but with equal magnitudes are averaged together, + resulting in a 1D plot with only positive Q values shown. + """ + + def __init__(self, qx_range: tuple[float, float] = (0.0, 0.0), qy_range: tuple[float, float] = (0.0, 0), nbins: int = 100, fold: bool = False): + """ + Set up the ROI boundaries, the binning of the output 1D data, and fold. + + The units of these parameters are A^-1 + :param qx_range: Bounds of the ROI along the Q_x direction. + :param qy_range: Bounds of the ROI along the Q_y direction. + :param qy_max: Upper bound of the ROI along the Q_y direction. + :param nbins: The number of bins data is sorted into along Q_y. + :param fold: Whether the two halves of the ROI along Q_y should be + folded together during averaging. + """ + super().__init__(qx_range=qx_range, + qy_range=qy_range) + self.nbins = nbins + self.fold = fold + + def __call__(self, data2d: Data2D = None) -> Data1D: + """ + Compute the 1D average of 2D data, projecting along the Q_y axis. + + :param data2d: The Data2D object for which the average is computed. + :return: Data1D object for plotting. + """ + self.validate_and_assign_data(data2d) + + # SlabY is used by SasView's BoxInteractorY, which is designed so that + # the ROI is always centred on the origin. If this ever changes, then + # the behaviour of fold here will also need to change. Perhaps we could + # apply a transformation to the data like the one used in WedgePhi. + + if self.fold: + major_lims = (0, self.qy_max) + self.qy_data = np.abs(self.qy_data) + else: + major_lims = (self.qy_min, self.qy_max) + minor_lims = (self.qx_min, self.qx_max) + + directional_average = DirectionalAverage(major_axis=self.qy_data, + minor_axis=self.qx_data, + lims=(major_lims,minor_lims), + nbins=self.nbins) + qy_data, intensity, error = \ + directional_average(data=self.data, err_data=self.err_data) + + return Data1D(x=qy_data, y=intensity, dy=error) + +class CircularAverage(PolarROI): + """ + Calculate I(|Q|) by circularly averaging 2D data between 2 radial limits. + + This class is initialised by specifying lower and upper limits on the + magnitude of Q values to consider during the averaging, though currently + SasView always calls this class using the full range of data. When called, + this class is supplied with a Data2D object. It returns a Data1D object + where intensity is given as a function of Q only. + """ + + def __init__(self, r_range: tuple[float, float], center: tuple[float, float] = (0.0, 0.0), nbins: int = 100) -> None: + """ + Set up the lower and upper radial limits as well as the number of bins. + + The units are A^-1 for the radial parameters. + :param r_min: Lower limit for |Q| values to use during averaging. + :param r_max: Upper limit for |Q| values to use during averaging. + :param nbins: The number of bins data is sorted into along |Q| the axis + """ + super().__init__(r_range=r_range, center = center) + self.nbins = nbins + + def __call__(self, data2d: Data2D = None) -> Data1D: + """ + Compute the 1D average of 2D data, projecting along the Q axis. + + :param data2d: The Data2D object for which the average is computed. + :return: Data1D object for plotting. + """ + self.validate_and_assign_data(data2d) + + # Averaging takes place between radial limits + major_lims = (self.r_min, self.r_max) + # minor_lims is None because a full-circle angular range is used + directional_average = DirectionalAverage(major_axis=self.q_data, + minor_axis=self.phi_data, + lims=(major_lims,None), + nbins=self.nbins) + q_data, intensity, error = \ + directional_average(data=self.data, err_data=self.err_data) + + return Data1D(x=q_data, y=intensity, dy=error) + +class Ring(PolarROI): + """ + Calculate I(φ) by radially averaging 2D data between 2 radial limits. + + This class is initialised by specifying lower and upper limits on the + magnitude of Q values to consider during the averaging. When called, + this class is supplied with a Data2D object. It returns a Data1D object. + This Data1D object gives intensity as a function of the angle from the + positive x-axis, φ, only. + """ + + def __init__(self, r_range: tuple[float, float], center: tuple[float, float] = (0.0, 0.0), nbins: int = 100) -> None: + """ + Set up the lower and upper radial limits as well as the number of bins. + + The units are A^-1 for the radial parameters. + :param r_min: Lower limit for |Q| values to use during averaging. + :param r_max: Upper limit for |Q| values to use during averaging. + :param nbins: The number of bins data is sorted into along Phi the axis + """ + super().__init__(r_range=r_range, center=center) + self.nbins = nbins + + def __call__(self, data2d: Data2D = None) -> Data1D: + """ + Compute the 1D average of 2D data, projecting along the Phi axis. + + :param data2d: The Data2D object for which the average is computed. + :return: Data1D object for plotting. + """ + self.validate_and_assign_data(data2d) + + # Averaging takes place between radial limits + minor_lims = (self.r_min, self.r_max) + # major_lims is None because a full-circle angular range is used + directional_average = DirectionalAverage(major_axis=self.phi_data, + minor_axis=self.q_data, + lims=(None,minor_lims), + nbins=self.nbins) + phi_data, intensity, error = \ + directional_average(data=self.data, err_data=self.err_data) + + return Data1D(x=phi_data, y=intensity, dy=error) + +class SectorQ(PolarROI): + """ + Project I(Q, φ) data onto I(Q) within a region defined by Cartesian limits. + + The projection is computed by averaging together datapoints with the same + angle φ (so long as they are within the ROI), measured anticlockwise from + the positive x-axis. + + This class is initialised by specifying lower and upper limits on both the + magnitude of Q and the angle φ. These four parameters specify the primary + Region Of Interest, however there is a secondary ROI with the same |Q| + values on the opposite side of the origin (φ + π). How this secondary ROI + is treated depends on the value of the `fold` parameter. If fold is set to + True, data on opposite sides of the origin are averaged together and the + results are plotted against positive values of Q. If fold is set to False, + the data from the two regions are graphed separeately, with the secondary + ROI data labelled using negative Q values. + + When called, this class is supplied with a Data2D object. It returns a + Data1D object where intensity is given as a function of Q only. + """ + + def __init__(self, r_range: tuple[float, float], phi_range: tuple[float, float] = (0.0, 2*np.pi), center: tuple[float, float] = (0.0, 0.0), nbins: int = 100, fold: bool = True) -> None: + """ + Set up the ROI boundaries, the binning of the output 1D data, and fold. + + The units are A^-1 for radial parameters, and radians for anglar ones. + :param r_range: Tuple (r_min, r_max) defining limits for |Q| values to use during averaging. + :param phi_range: Tuple (phi_min, phi_max) defining limits for φ in radians (in the primary ROI). + :Defaults to full circle (0, 2*pi). + :param nbins: The number of bins data is sorted into along the |Q| axis + :param fold: Whether the primary and secondary ROIs should be folded + together during averaging. + """ + super().__init__(r_range=r_range, phi_range=phi_range, center = center) + + self.nbins = nbins + self.fold = fold + + def __call__(self, data2d: Data2D = None) -> Data1D: + """ + Compute the 1D average of 2D data, projecting along the Q_y axis. + + :param data2d: The Data2D object for which the average is computed. + :return: Data1D object for plotting. + """ + self.validate_and_assign_data(data2d) + + # Transform all angles to the range [0,2π) where phi_min is at zero, + # eliminating errors when the ROI straddles the 2π -> 0 discontinuity. + # We won't need to convert back later because we're plotting against Q. + phi_offset = self.phi_min + self.phi_min = 0.0 + self.phi_max = (self.phi_max - phi_offset) % (2 * np.pi) + self.phi_data = (self.phi_data - phi_offset) % (2 * np.pi) + + major_lims = (self.r_min, self.r_max) + minor_lims = (self.phi_min, self.phi_max) + # Secondary region of interest covers angles on opposite side of origin + minor_lims_alt = (self.phi_min + np.pi, self.phi_max + np.pi) + + primary_region = DirectionalAverage(major_axis=self.q_data, + minor_axis=self.phi_data, + lims=(major_lims,minor_lims), + nbins=self.nbins) + secondary_region = DirectionalAverage(major_axis=self.q_data, + minor_axis=self.phi_data, + lims=(major_lims,minor_lims_alt), + nbins=self.nbins) + + primary_q, primary_I, primary_err = \ + primary_region(data=self.data, err_data=self.err_data) + secondary_q, secondary_I, secondary_err = \ + secondary_region(data=self.data, err_data=self.err_data) + + if self.fold: + # Combining the two regions requires re-binning; the q value + # arrays may be unequal lengths, or the indices may correspond to + # different q values. To average the results from >2 ROIs you would + # need to generalise this process. + combined_q = np.zeros(self.nbins) + average_intensity = np.zeros(self.nbins) + combined_err = np.zeros(self.nbins) + bin_counts = np.zeros(self.nbins) + for old_index, q_val in enumerate(primary_q): + old_index = int(old_index) + new_index = primary_region.get_bin_index(q_val) + combined_q[new_index] += q_val + average_intensity[new_index] += primary_I[old_index] + combined_err[new_index] += primary_err[old_index] ** 2 + bin_counts[new_index] += 1 + for old_index, q_val in enumerate(secondary_q): + old_index = int(old_index) + new_index = secondary_region.get_bin_index(q_val) + combined_q[new_index] += q_val + average_intensity[new_index] += secondary_I[old_index] + combined_err[new_index] += secondary_err[old_index] ** 2 + bin_counts[new_index] += 1 + + combined_q /= bin_counts + average_intensity /= bin_counts + combined_err = np.sqrt(combined_err) / bin_counts + + finite = np.isfinite(average_intensity) + + data1d = Data1D(x=combined_q[finite], y=average_intensity[finite], + dy=combined_err[finite]) + else: + # The secondary ROI is labelled with negative Q values. + combined_q = np.append(np.flip(-1 * secondary_q), primary_q) + combined_intensity = np.append(np.flip(secondary_I), primary_I) + combined_error = np.append(np.flip(secondary_err), primary_err) + data1d = Data1D(x=combined_q, y=combined_intensity, + dy=combined_error) + + return data1d + +class WedgeQ(PolarROI): + """ + Project I(Q, φ) data onto I(Q) within a region defined by Cartesian limits. + + The projection is computed by averaging together datapoints with the same + angle φ (so long as they are within the ROI), measured anticlockwise from + the positive x-axis. + + This class is initialised by specifying lower and upper limits on both the + magnitude of Q and the angle φ. + When called, this class is supplied with a Data2D object. It returns a + Data1D object where intensity is given as a function of Q only. + """ + + def __init__(self, r_range: tuple[float, float], phi_range: tuple[float, float] = (0.0, 2*np.pi), center: tuple[float, float] = (0.0, 0.0),nbins: int = 100) -> None: + """ + Set up the ROI boundaries, and the binning of the output 1D data. + + The units are A^-1 for radial parameters, and radians for anglar ones. + :param r_range: Tuple (r_min, r_max) defining limits for |Q| values to use during averaging. + :param phi_range: Tuple (phi_min, phi_max) defining limits for φ in radians (in the primary ROI). + :Defaults to full circle (0, 2*pi). + :param nbins: The number of bins data is sorted into along the |Q| axis + """ + super().__init__(r_range=r_range, phi_range=phi_range, center = center) + self.nbins = nbins + + def __call__(self, data2d: Data2D = None) -> Data1D: + """ + Compute the 1D average of 2D data, projecting along the Q_y axis. + + :param data2d: The Data2D object for which the average is computed. + :return: Data1D object for plotting. + """ + self.validate_and_assign_data(data2d) + + # Transform all angles to the range [0,2π) where phi_min is at zero, + # eliminating errors when the ROI straddles the 2π -> 0 discontinuity. + # We won't need to convert back later because we're plotting against Q. + phi_offset = self.phi_min + self.phi_min = 0.0 + self.phi_max = (self.phi_max - phi_offset) % (2 * np.pi) + self.phi_data = (self.phi_data - phi_offset) % (2 * np.pi) + + # Averaging takes place between radial and angular limits + major_lims = (self.r_min, self.r_max) + # When phi_max and phi_min have the same angle, ROI is a full circle. + if self.phi_max == 0: + minor_lims = None + else: + minor_lims = (self.phi_min, self.phi_max) + + directional_average = DirectionalAverage(major_axis=self.q_data, + minor_axis=self.phi_data, + lims=(major_lims,minor_lims), + nbins=self.nbins) + q_data, intensity, error = \ + directional_average(data=self.data, err_data=self.err_data) + + return Data1D(x=q_data, y=intensity, dy=error) + +class WedgePhi(PolarROI): + """ + Project I(Q, φ) data onto I(φ) within a region defined by Cartesian limits. + + The projection is computed by averaging together datapoints with the same + Q value (so long as they are within the ROI). + + This class is initialised by specifying lower and upper limits on both the + magnitude of Q and the angle φ, measured anticlockwise from the positive + x-axis. + When called, this class is supplied with a Data2D object. It returns a + Data1D object where intensity is given as a function of Q only. + """ + + def __init__(self, r_range: tuple[float, float], phi_range: tuple[float, float] = (0.0, 2*np.pi), center: tuple[float, float] = (0.0, 0.0), nbins: int = 100) -> None: + """ + Set up the ROI boundaries, and the binning of the output 1D data. + + The units are A^-1 for radial parameters, and radians for anglar ones. + :param r_range: Tuple (r_min, r_max) defining limits for |Q| values to use during averaging. + :param phi_range: Tuple (phi_min, phi_max) defining angular bounds in radians. + Defaults to full circle (0, 2*pi). + :param nbins: The number of bins data is sorted into along the φ axis. + """ + super().__init__(r_range=r_range, phi_range=phi_range, center = center) + self.nbins = nbins + + def __call__(self, data2d: Data2D = None) -> Data1D: + """ + Compute the 1D average of 2D data, projecting along the Q_y axis. + + :param data2d: The Data2D object for which the average is computed. + :return: Data1D object for plotting. + """ + self.validate_and_assign_data(data2d) + + # Transform all angles to the range [0,2π) where phi_min is at zero, + # eliminating errors when the ROI straddles the 2π -> 0 discontinuity. + # Remember to transform back afterward as we're plotting against phi. + phi_offset = self.phi_min + self.phi_min = 0.0 + self.phi_max = (self.phi_max - phi_offset) % (2 * np.pi) + self.phi_data = (self.phi_data - phi_offset) % (2 * np.pi) + + # Averaging takes place between angular and radial limits + # When phi_max and phi_min have the same angle, ROI is a full circle. + if self.phi_max == 0: + major_lims = None + else: + major_lims = (self.phi_min, self.phi_max) + minor_lims = (self.r_min, self.r_max) + + directional_average = DirectionalAverage(major_axis=self.phi_data, + minor_axis=self.q_data, + lims=(major_lims,minor_lims), + nbins=self.nbins) + phi_data, intensity, error = \ + directional_average(data=self.data, err_data=self.err_data) + + # Convert angular data back to the original phi range + phi_data += phi_offset + # In the old manipulations.py, we also had this shift to plot the data + # at the centre of the bins. I'm not sure why it's only angular binning + # which gets this treatment. + # TODO: Update this once non-linear binning options are implemented + phi_data += directional_average.bin_widths / 2 + + return Data1D(x=phi_data, y=intensity, dy=error) + +class SectorPhi(WedgePhi): + """ + Sector average as a function of phi. + I(phi) is return and the data is averaged over Q. + + A sector is defined by r_min, r_max, phi_min, phi_max. + The number of bin in phi also has to be defined. + """ + + # This class has only been kept around in case users are using it in + # scripts, SectorPhi was never used by SasView. The functionality is now in + # use through WedgeSlicer.py, so the rewritten version of this class has + # been named WedgePhi. + +################################################################################ + +class Ringcut(PolarROI): + """ + Defines a ring on a 2D data set. + The ring is defined by r_min, r_max, and + the position of the center of the ring. + + The data returned is the region inside the ring + + Phi_min and phi_max should be defined between 0 and 2*pi + in anti-clockwise starting from the x- axis on the left-hand side + """ + + def __init__(self, r_range: tuple[float, float] = (0.0, 0.0), phi_range: tuple[float, float] = (0.0, 2*np.pi), center: tuple[float, float] = (0.0, 0.0)): + + super().__init__(r_range, phi_range, center) + + def __call__(self, data2D: Data2D) -> np.ndarray[bool]: + """ + Apply the ring to the data set. + Returns the angular distribution for a given q range + + :param data2D: Data2D object + + :return: index array in the range + """ + super().validate_and_assign_data(data2D) + + # Calculate q_data using unmasked qx_data and qy_data + q_data = np.sqrt(data2D.qx_data * data2D.qx_data + data2D.qy_data * data2D.qy_data) + + # check whether each data point is inside ROI + out = (self.r_min <= q_data) & (self.r_max >= q_data) + return out + +class Boxcut(CartesianROI): + """ + Find a rectangular 2D region of interest. + """ + + def __init__(self, qx_range: tuple[float, float] = (0.0, 0.0), qy_range: tuple[float, float] = (0.0, 0.0)): + super().__init__(qx_range=qx_range, qy_range=qy_range) + + def __call__(self, data2D: Data2D) -> np.ndarray[bool]: + """ + Find a rectangular 2D region of interest where data points inside the ROI are True, and False otherwise + + :param data2D: Data2D object + :return: mask, 1d array (len = len(data)) + """ + super().validate_and_assign_data(data2D) + + # check whether each data point is inside ROI + outx = (self.qx_min <= data2D.qx_data) & (self.qx_max > data2D.qx_data) + outy = (self.qy_min <= data2D.qy_data) & (self.qy_max > data2D.qy_data) + + return outx & outy + +class Sectorcut(PolarROI): + """ + Defines a sector (major + minor) region on a 2D data set. + The sector is defined by phi_min, phi_max, + where phi_min and phi_max are defined by the right + and left lines wrt central line. + + Phi_min and phi_max are given in units of radian + and (phi_max-phi_min) should not be larger than pi + """ + + def __init__(self, phi_range: tuple[float, float] = (0.0, np.pi), center: tuple[float, float] = (0.0, 0.0)): + super().__init__(r_range=(0, np.inf), phi_range=phi_range, center=center) + + def __call__(self, data2D: Data2D) -> np.ndarray[bool]: + """ + Find a rectangular 2D region of interest where data points inside the ROI are True, and False otherwise + + :param data2D: Data2D object + :return: mask, 1d array (len = len(data)) + """ + super().validate_and_assign_data(data2D) + + # Ensure unmasked data is used for the phi_data calculation to ensure data sizes match + self.phi_data = np.arctan2(data2D.qy_data, data2D.qx_data) + # Calculate q_data using unmasked qx_data and qy_data to ensure data sizes match + q_data = np.sqrt(data2D.qx_data * data2D.qx_data + data2D.qy_data * data2D.qy_data) + + phi_offset = self.phi_min + self.phi_min = 0.0 + self.phi_max = (self.phi_max - phi_offset) % (2 * np.pi) + self.phi_data = (self.phi_data - phi_offset) % (2 * np.pi) + phi_shifted = self.phi_data - np.pi + + # Determine angular bounds for both upper and lower half of image + phi_min_angle, phi_max_angle = (self.phi_min, self.phi_max) + + # Determine regions of interest + out_radial = (self.r_min <= q_data) & (self.r_max > q_data) + out_upper = (phi_min_angle <= self.phi_data) & (phi_max_angle >= self.phi_data) + out_lower = (phi_min_angle <= phi_shifted) & (phi_max_angle >= phi_shifted) + + upper_roi = out_radial & out_upper + lower_roi = out_radial & out_lower + out = upper_roi | lower_roi + + return out diff --git a/sasdata/data_util/binning.py b/sasdata/data_util/binning.py new file mode 100644 index 00000000..456fcf78 --- /dev/null +++ b/sasdata/data_util/binning.py @@ -0,0 +1,216 @@ +import numpy as np +from numpy.typing import ArrayLike + +from sasdata.data_util.interval import IntervalType + + +class DirectionalAverage: + """ + Average along one coordinate axis of 2D data and return data for a 1D plot. + This can also be thought of as a projection onto the major axis: 2D -> 1D. + + This class operates on a decomposed Data2D object, and returns data needed + to construct a Data1D object. The class is instantiated with two arrays of + orthogonal coordinate data (depending on the coordinate system, these may + have undergone some pre-processing) and two corresponding two-element + tuples/lists defining the lower and upper limits on the Region of Interest + (ROI) for each coordinate axis. One of these axes is averaged along, and + the other is divided into bins and becomes the dependent variable of the + eventual 1D plot. These are called the minor and major axes respectively. + When a class instance is called, it is passed the intensity and error data + from the original Data2D object. These should not have undergone any + coordinate system dependent pre-processing. + + Note that the old version of manipulations.py had an option for logarithmic + binning which was only used by SectorQ. This functionality is never called + upon by SasView however, so I haven't implemented it here (yet). + """ + + def __init__(self, + major_axis: ArrayLike, + minor_axis: ArrayLike, + lims: tuple[tuple[float, float] | None, tuple[float, float] | None] | None = None, + nbins: int = 100): + """ + Set up direction of averaging, limits on the ROI, & the number of bins. + + :param major_axis: Coordinate data for axis onto which the 2D data is + projected. + :param minor_axis: Coordinate data for the axis perpendicular to the + major axis. + :param lims: Tuple (major_lims, minor_lims). Each element may be a + 2-tuple or None. + :param nbins: The number of bins the major axis is divided up into. + """ + + # Step 1: quick checks and parsing + self._validate_coordinate_arrays(major_axis, minor_axis) + major_lims, minor_lims = self._parse_lims(lims) + self.nbins = self._coerce_nbins(nbins) + + # Step 2: assign arrays and check sizes + self.major_axis, self.minor_axis = self._assign_axes_and_check_lengths(major_axis, minor_axis) + + # Step 3: set final limits and compute bin limits + self.major_lims, self.minor_lims = self._set_default_lims_and_bin_limits(major_lims, minor_lims) + + def _validate_coordinate_arrays(self, major_axis, minor_axis) -> None: + """Ensure both major and minor coordinate inputs are array-like.""" + if any(not hasattr(coordinate_data, "__array__") for + coordinate_data in (major_axis, minor_axis)): + msg = "Must provide major & minor coordinate arrays for binning." + raise ValueError(msg) + + def _parse_lims(self, lims): + """ + Validate the lims parameter and return (major_lims, minor_lims). + Accepts None or a 2-tuple (major_lims, minor_lims). Each of the two + elements may be None or a 2-tuple of floats. + """ + if lims is None: + return None, None + + if not (isinstance(lims, (list, tuple)) and len(lims) == 2): + msg = "Parameter 'lims' must be a 2-tuple (major_lims, minor_lims) or None." + raise ValueError(msg) + + major_lims, minor_lims = lims + return major_lims, minor_lims + + def _coerce_nbins(self, nbins): + """Coerce nbins to int, raising a TypeError with the original message on failure.""" + try: + return int(nbins) + except Exception: + msg = f"Parameter 'nbins' must be convertable to an integer via int(), got type {type(nbins)} (={nbins})" + raise TypeError(msg) + + def _assign_axes_and_check_lengths(self, major_axis, minor_axis): + """Assign axes to numpy arrays and check they have equal length.""" + major_arr = np.asarray(major_axis) + minor_arr = np.asarray(minor_axis) + if major_arr.size != minor_arr.size: + msg = "Major and minor axes must have same length" + raise ValueError(msg) + return major_arr, minor_arr + + def _set_default_lims_and_bin_limits(self, major_lims, minor_lims): + """ + Determine final major and minor limits (using data min/max if None) + and compute bin_limits based on major_lims and self.nbins. + Returns (major_lims_final, minor_lims_final). + """ + # Major limits + if major_lims is None: + major_lims_final = (self.major_axis.min(), self.major_axis.max()) + else: + major_lims_final = major_lims + + # Minor limits + if minor_lims is None: + minor_lims_final = (self.minor_axis.min(), self.minor_axis.max()) + else: + minor_lims_final = minor_lims + + # Store and compute bin limits (nbins + 1 points for boundaries) + self.bin_limits = np.linspace(major_lims_final[0], major_lims_final[1], self.nbins + 1) + + return major_lims_final, minor_lims_final + + @property + def bin_widths(self) -> np.ndarray: + """Return a numpy array of all bin widths, regardless of the point spacings.""" + return np.asarray([self.bin_width_n(i) for i in range(0, self.nbins)]) + + def bin_width_n(self, bin_number: int) -> float: + """Calculate the bin width for the nth bin. + :param bin_number: The starting array index of the bin between 0 and self.nbins - 1. + :return: The bin width, as a float. + """ + lower, upper = self.get_bin_interval(bin_number) + return upper - lower + + def get_bin_interval(self, bin_number: int) -> tuple[float, float]: + + """ + Return the lower and upper limits defining a bin, given its index. + + :param bin_number: The index of the bin (between 0 and self.nbins - 1) + :return: A tuple of the interval limits as (lower, upper). + """ + # Ensure bin_number is an integer and not a float or a string representation + bin_number = int(bin_number) + return self.bin_limits[bin_number], self.bin_limits[bin_number+1] + + def get_bin_index(self, value): + """ + Return the index of the bin to which the supplied value belongs. + + :param value: A coordinate value from somewhere along the major axis. + """ + numerator = value - self.major_lims[0] + denominator = self.major_lims[1] - self.major_lims[0] + bin_index = int(np.floor(self.nbins * numerator / denominator)) + + # Bins are indexed from 0 to nbins-1, so this check protects against + # out-of-range indices when value == self.major_lims[1] + if bin_index == self.nbins: + bin_index -= 1 + + return bin_index + + def compute_weights(self): + """ + Return weights array for the contribution of each datapoint to each bin + + Each row of the weights array corresponds to the bin with the same + index. + """ + major_weights = np.zeros((self.nbins, self.major_axis.size)) + closed = IntervalType.CLOSED + for m in range(self.nbins): + # Include the value at the end of the binning range, but in + # general use half-open intervals so each value belongs in only + # one bin. + if m == self.nbins - 1: + interval = closed + else: + interval = IntervalType.HALF_OPEN + bin_start, bin_end = self.get_bin_interval(bin_number=m) + major_weights[m] = interval.weights_for_interval(array=self.major_axis, + l_bound=bin_start, + u_bound=bin_end) + minor_weights = closed.weights_for_interval(array=self.minor_axis, + l_bound=self.minor_lims[0], + u_bound=self.minor_lims[1]) + return major_weights * minor_weights + + def __call__(self, data, err_data): + """ + Compute the directional average of the supplied intensity & error data. + + :param data: intensity data from the origninal Data2D object. + :param err_data: the corresponding errors for the intensity data. + """ + weights = self.compute_weights() + + x_axis_values = np.sum(weights * self.major_axis, axis=1) + intensity = np.sum(weights * data, axis=1) + errs_squared = np.sum((weights * err_data)**2, axis=1) + + bin_counts = np.sum(weights, axis=1) + # Prepare results, only compute division where bin_counts > 0 + if not np.any(bin_counts > 0): + raise ValueError("Average Error: No bins inside ROI to average...") + + errors = np.sqrt(errs_squared) + x_axis_values /= bin_counts + intensity /= bin_counts + errors /= bin_counts + + finite = np.isfinite(intensity) + if not finite.any(): + msg = "Average Error: No points inside ROI to average..." + raise ValueError(msg) + + return x_axis_values[finite], intensity[finite], errors[finite] diff --git a/sasdata/data_util/interval.py b/sasdata/data_util/interval.py new file mode 100644 index 00000000..4987bb1a --- /dev/null +++ b/sasdata/data_util/interval.py @@ -0,0 +1,35 @@ +from enum import Enum, auto + +import numpy as np + + +class IntervalType(Enum): + HALF_OPEN = auto() + CLOSED = auto() + + def weights_for_interval(self, array, l_bound, u_bound): + """ + Weight coordinate data by position relative to a specified interval. + + :param array: the array for which the weights are calculated + :param l_bound: value defining the lower limit of the region of interest + :param u_bound: value defining the upper limit of the region of interest + + If and when fractional binning is implemented (ask Lucas), this function + will be changed so that instead of outputting zeros and ones, it gives + fractional values instead. These will depend on how close the array value + is to being within the interval defined. + """ + + # Whether the endpoint should be included depends on circumstance. + # Half-open is used when binning the major axis (except for the final bin) + # and closed used for the minor axis and the final bin of the major axis. + if self.name.lower() == 'half_open': + in_range = np.logical_and(l_bound <= array, array < u_bound) + elif self.name.lower() == 'closed': + in_range = np.logical_and(l_bound <= array, array <= u_bound) + else: + msg = f"Unrecognised interval_type: {self.name}" + raise ValueError(msg) + + return np.asarray(in_range, dtype=int) diff --git a/sasdata/data_util/manipulations.py b/sasdata/data_util/manipulations.py index 3a552d80..f2fb8dcb 100644 --- a/sasdata/data_util/manipulations.py +++ b/sasdata/data_util/manipulations.py @@ -1,12 +1,11 @@ """ Data manipulations for 2D data sets. -Using the meta data information, various types of averaging -are performed in Q-space +Using the meta data information, various types of averaging are performed in Q-space To test this module use: ``` cd test -PYTHONPATH=../src/ python2 -m sasdataloader.test.utest_averaging DataInfoTests.test_sectorphi_quarter +PYTHONPATH=../src/ python2 -m sasmanipulations.test.utest_averaging DataInfoTests.test_sectorphi_quarter ``` """ ##################################################################### @@ -20,10 +19,61 @@ # TODO: copy the meta data from the 2D object to the resulting 1D object import math +from warnings import warn import numpy as np -from sasdata.dataloader.data_info import Data1D, Data2D +from sasdata.data_util.averaging import ( + Boxavg as AvgBoxavg, +) +from sasdata.data_util.averaging import ( + Boxcut as AvgBoxcut, +) + +################################################################################ +# Backwards-compatible wrappers that delegate to the new implementations +# in averaging.py. +# The original manipulations classes used different parameter names. +# The wrappers below translate the old style +# parameters to the new classes to preserve external behaviour. +################################################################################ +from sasdata.data_util.averaging import ( + Boxsum as AvgBoxsum, +) +from sasdata.data_util.averaging import ( + CircularAverage as AvgCircularAverage, +) +from sasdata.data_util.averaging import ( + Ring as AvgRing, +) +from sasdata.data_util.averaging import ( + Ringcut as AvgRingcut, +) +from sasdata.data_util.averaging import ( + Sectorcut as AvgSectorcut, +) +from sasdata.data_util.averaging import ( + SectorPhi as AvgSectorPhi, +) +from sasdata.data_util.averaging import ( + SectorQ as AvgSectorQ, +) +from sasdata.data_util.averaging import ( + SlabX as AvgSlabX, +) +from sasdata.data_util.averaging import ( + SlabY as AvgSlabY, +) +from sasdata.data_util.averaging import ( + WedgePhi as AvgWedgePhi, +) +from sasdata.data_util.averaging import ( + WedgeQ as AvgWedgeQ, +) +from sasdata.dataloader.data_info import Data2D + +warn("sasdata.data_util.manipulations is deprecated. Unless otherwise noted, update your import to " + "sasdata.data_util.averaging.", DeprecationWarning, stacklevel=2) def position_and_wavelength_to_q(dx: float, dy: float, detector_distance: float, wavelength: float) -> float: @@ -196,7 +246,7 @@ def get_dq_data(data2d: Data2D) -> np.array: Get the dq for resolution averaging The pinholes and det. pix contribution present in both direction of the 2D which must be subtracted when - converting to 1D: dq_overlap should calculated ideally at + converting to 1D: dq_overlap should be calculated ideally at q = 0. Note This method works on only pinhole geometry. Extrapolate dqx(r) and dqy(phi) at q = 0, and take an average. ''' @@ -245,6 +295,8 @@ def reader2D_converter(data2d: Data2D | None = None) -> Data2D: :return: 1d arrays of Data2D object """ + warn("reader2D_converter should be imported in the future sasdata.dataloader.data_info.", + DeprecationWarning, stacklevel=2) if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None: raise ValueError("Can't convert this data: data=None...") new_x = np.tile(data2d.x_bins, (len(data2d.y_bins), 1)) @@ -316,980 +368,171 @@ def get_bin_index(self, value): ################################################################################ -class _Slab: - """ - Compute average I(Q) for a region of interest +class SlabX: """ + Wrapper for new SlabX. + Old signature: + SlabX(x_min=0, x_max=0, y_min=0, y_max=0, bin_width=0.001, fold=False) + New signature uses nbins; translate bin_width -> nbins using ceil(range/bin_width) + """ def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, - y_max=0.0, bin_width=0.001, fold = False): - # Minimum Qx value [A-1] - self.x_min = x_min - # Maximum Qx value [A-1] - self.x_max = x_max - # Minimum Qy value [A-1] - self.y_min = y_min - # Maximum Qy value [A-1] - self.y_max = y_max - # Bin width (step size) [A-1] - self.bin_width = bin_width - # If True, I(|Q|) will be return, otherwise, - # negative q-values are allowed - self.fold = fold + y_max=0.0, bin_width=0.001, fold=False): + # protect against zero-width or negative widths + width = max(abs(x_max - x_min), 1e-12) + nbins = int(math.ceil(width / abs(bin_width))) if bin_width != 0 else 1 + self._impl = AvgSlabX(qx_range=(x_min, x_max), qy_range=(y_min, y_max), + nbins=nbins, fold=fold) def __call__(self, data2D): - return NotImplemented + return self._impl(data2D) - def _avg(self, data2D, maj): - """ - Compute average I(Q_maj) for a region of interest. - The major axis is defined as the axis of Q_maj. - The minor axis is the axis that we average over. - :param data2D: Data2D object - :param maj_min: min value on the major axis - :return: Data1D object - """ - if len(data2D.detector) > 1: - msg = "_Slab._avg: invalid number of " - msg += " detectors: %g" % len(data2D.detector) - raise RuntimeError(msg) - - # Get data - data = data2D.data[np.isfinite(data2D.data)] - err_data =None - if data2D.err_data is not None: - err_data = data2D.err_data[np.isfinite(data2D.data)] - qx_data = data2D.qx_data[np.isfinite(data2D.data)] - qy_data = data2D.qy_data[np.isfinite(data2D.data)] - mask_data = data2D.mask[np.isfinite(data2D.data)] - - # Bin width calculation returns negative values when either axis has no points above 0. - self.bin_width = abs(self.bin_width) - - # Build array of Q intervals - if maj == 'x': - if self.fold: - # Set x_max based on which is further from Qx = 0 - x_max = max(abs(self.x_min),abs(self.x_max)) - # Set x_min based on which is closer to Qx = 0, but will have different limits depending on whether - # x_min and x_max are on the same side of Qx = 0 - if self.x_min*self.x_max >= 0: # If on same side - x_min = min(abs(self.x_min),abs(self.x_max)) - else: - x_min = 0 - else: - x_max = self.x_max - x_min = self.x_min - y_max = self.y_max - y_min = self.y_min - nbins = int(math.ceil((x_max - x_min) / self.bin_width)) - elif maj == 'y': - if self.fold: - # Set y_max based on which is further from Qy = 0 - y_max = max(abs(self.y_min), abs(self.y_max)) - # Set y_min based on which is closer to Qy = 0, but will have different limits depending on whether - # y_min and y_max are on the same side of Qy = 0 - if self.y_min * self.y_max >= 0: # If on same side - y_min = min(abs(self.y_min), abs(self.y_max)) - else: - y_min = 0 - else: - y_max = self.y_max - y_min = self.y_min - x_max = self.x_max - x_min = self.x_min - nbins = int(math.ceil((y_max - y_min) / self.bin_width)) - else: - raise RuntimeError("_Slab._avg: unrecognized axis %s" % str(maj)) - - x = np.zeros(nbins) - y = np.zeros(nbins) - err_y = np.zeros(nbins) - y_counts = np.zeros(nbins) - - # Average pixelsize in q space - for npts in range(len(data)): - if not mask_data[npts]: - # ignore points that are masked - continue - # default frac - frac_x = 0 - frac_y = 0 - # get ROI - if self.fold: - # If folded, need to satisfy absolute value of Q, but also make sure we're only pulling - # from data inside the box (an issue when the box is not centered on 0) - if maj == 'x': - if self.x_min <= qx_data[npts] < self.x_max and x_min <= abs(qx_data[npts]) < x_max: - frac_x = 1 - if self.y_min <= qy_data[npts] < self.y_max: - frac_y = 1 - elif maj == 'y': # The case where maj != 'x' or 'y' was handled earlier - if self.y_min <= qy_data[npts] < self.y_max and y_min <= abs(qy_data[npts]) < y_max: - frac_y = 1 - if self.x_min <= qx_data[npts] < self.x_max: - frac_x = 1 - else: - if self.x_min <= qx_data[npts] < self.x_max: - frac_x = 1 - if self.y_min <= qy_data[npts] < self.y_max: - frac_y = 1 - frac = frac_x * frac_y - - if frac == 0: - continue - # binning: find axis of q - if maj == 'x': - q_value = qx_data[npts] - min_value = x_min - if maj == 'y': - q_value = qy_data[npts] - min_value = y_min - if self.fold and q_value < 0: - q_value = -q_value - # bin - i_q = int(math.ceil((q_value - min_value) / self.bin_width)) - 1 - - # skip outside of max bins - if i_q < 0 or i_q >= nbins: - continue - - # TODO: find better definition of x[i_q] based on q_data - # min_value + (i_q + 1) * self.bin_width / 2.0 - x[i_q] += frac * q_value - y[i_q] += frac * data[npts] - - if err_data is None or err_data[npts] == 0.0: - if data[npts] < 0: - data[npts] = -data[npts] - err_y[i_q] += frac * frac * data[npts] - else: - err_y[i_q] += frac * frac * err_data[npts] * err_data[npts] - y_counts[i_q] += frac - - # Average the sums - for n in range(nbins): - err_y[n] = math.sqrt(err_y[n]) - - err_y = err_y / y_counts - y = y / y_counts - x = x / y_counts - idx = (np.isfinite(y) & np.isfinite(x)) - - if not idx.any(): - msg = "Average Error: No points inside ROI to average..." - raise ValueError(msg) - return Data1D(x=x[idx], y=y[idx], dy=err_y[idx]) - - -class SlabY(_Slab): +class SlabY: """ - Compute average I(Qy) for a region of interest + Wrapper for new SlabY. Same bin_width -> nbins translation as SlabX. """ + def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, + y_max=0.0, bin_width=0.001, fold=False): + height = max(abs(y_max - y_min), 1e-12) + nbins = int(math.ceil(height / abs(bin_width))) if bin_width != 0 else 1 + self._impl = AvgSlabY(qx_range=(x_min, x_max), qy_range=(y_min, y_max), + nbins=nbins, fold=fold) def __call__(self, data2D): - """ - Compute average I(Qy) for a region of interest - - :param data2D: Data2D object - :return: Data1D object - """ - return self._avg(data2D, 'y') + return self._impl(data2D) -class SlabX(_Slab): - """ - Compute average I(Qx) for a region of interest - """ - - def __call__(self, data2D): - """ - Compute average I(Qx) for a region of interest - :param data2D: Data2D object - :return: Data1D object - """ - return self._avg(data2D, 'x') - ################################################################################ class Boxsum: - """ - Perform the sum of counts in a 2D region of interest. - """ - def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): - # Minimum Qx value [A-1] - self.x_min = x_min - # Maximum Qx value [A-1] - self.x_max = x_max - # Minimum Qy value [A-1] - self.y_min = y_min - # Maximum Qy value [A-1] - self.y_max = y_max + self._impl = AvgBoxsum(qx_range=(x_min, x_max), qy_range=(y_min, y_max)) def __call__(self, data2D): - """ - Perform the sum in the region of interest - - :param data2D: Data2D object - :return: number of counts, error on number of counts, - number of points summed - """ - y, err_y, y_counts = self._sum(data2D) - - # Average the sums - counts = 0 if y_counts == 0 else y - error = 0 if y_counts == 0 else math.sqrt(err_y) + return self._impl(data2D) - # Added y_counts to return, SMK & PDB, 04/03/2013 - return counts, error, y_counts - - def _sum(self, data2D): - """ - Perform the sum in the region of interest - - :param data2D: Data2D object - :return: number of counts, - error on number of counts, number of entries summed - """ - if len(data2D.detector) > 1: - msg = "Circular averaging: invalid number " - msg += "of detectors: %g" % len(data2D.detector) - raise RuntimeError(msg) - # Get data - data = data2D.data[np.isfinite(data2D.data)] - err_data = None - if data2D.err_data is not None: - err_data = data2D.err_data[np.isfinite(data2D.data)] - qx_data = data2D.qx_data[np.isfinite(data2D.data)] - qy_data = data2D.qy_data[np.isfinite(data2D.data)] - mask_data = data2D.mask[np.isfinite(data2D.data)] - - y = 0.0 - err_y = 0.0 - y_counts = 0.0 - - # Average pixelsize in q space - for npts in range(len(data)): - if not mask_data[npts]: - # ignore points that are masked - continue - # default frac - frac_x = 0 - frac_y = 0 - - # get min and max at each points - qx = qx_data[npts] - qy = qy_data[npts] - - # get the ROI - if self.x_min <= qx and self.x_max > qx: - frac_x = 1 - if self.y_min <= qy and self.y_max > qy: - frac_y = 1 - # Find the fraction along each directions - frac = frac_x * frac_y - if frac == 0: - continue - y += frac * data[npts] - if err_data is None or err_data[npts] == 0.0: - if data[npts] < 0: - data[npts] = -data[npts] - err_y += frac * frac * data[npts] - else: - err_y += frac * frac * err_data[npts] * err_data[npts] - y_counts += frac - return y, err_y, y_counts - - -class Boxavg(Boxsum): - """ - Perform the average of counts in a 2D region of interest. - """ +class Boxavg: def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): - super(Boxavg, self).__init__(x_min=x_min, x_max=x_max, - y_min=y_min, y_max=y_max) + self._impl = AvgBoxavg(qx_range=(x_min, x_max), qy_range=(y_min, y_max)) def __call__(self, data2D): - """ - Perform the sum in the region of interest - - :param data2D: Data2D object - :return: average counts, error on average counts - - """ - y, err_y, y_counts = self._sum(data2D) - - # Average the sums - counts = 0 if y_counts == 0 else y / y_counts - error = 0 if y_counts == 0 else math.sqrt(err_y) / y_counts - - return counts, error + return self._impl(data2D) ################################################################################ class CircularAverage: """ - Perform circular averaging on 2D data - - The data returned is the distribution of counts - as a function of Q + Wrapper for new CircularAverage. + Old signature: CircularAverage(r_min=0.0, r_max=0.0, bin_width=0.0005) + New signature uses r_range and nbins; translate bin_width -> nbins. """ - def __init__(self, r_min=0.0, r_max=0.0, bin_width=0.0005): - # Minimum radius included in the average [A-1] - self.r_min = r_min - # Maximum radius included in the average [A-1] - self.r_max = r_max - # Bin width (step size) [A-1] - self.bin_width = bin_width - - def __call__(self, data2D, ismask=False): - """ - Perform circular averaging on the data - - :param data2D: Data2D object - :return: Data1D object - """ - # Get data W/ finite values - data = data2D.data[np.isfinite(data2D.data)] - q_data = data2D.q_data[np.isfinite(data2D.data)] - err_data = None - if data2D.err_data is not None: - err_data = data2D.err_data[np.isfinite(data2D.data)] - mask_data = data2D.mask[np.isfinite(data2D.data)] - - dq_data = None - if data2D.dqx_data is not None and data2D.dqy_data is not None: - dq_data = get_dq_data(data2D) - - if len(q_data) == 0: - msg = "Circular averaging: invalid q_data: %g" % data2D.q_data - raise RuntimeError(msg) - - # Build array of Q intervals - nbins = int(math.ceil((self.r_max - self.r_min) / self.bin_width)) - - x = np.zeros(nbins) - y = np.zeros(nbins) - err_y = np.zeros(nbins) - err_x = np.zeros(nbins) - y_counts = np.zeros(nbins) - - for npt in range(len(data)): - - if ismask and not mask_data[npt]: - continue - - frac = 0 - - # q-value at the pixel (j,i) - q_value = q_data[npt] - data_n = data[npt] - - # No need to calculate the frac when all data are within range - if self.r_min >= self.r_max: - raise ValueError("Limit Error: min > max") - - if self.r_min <= q_value and q_value <= self.r_max: - frac = 1 - if frac == 0: - continue - i_q = int(math.floor((q_value - self.r_min) / self.bin_width)) - - # Take care of the edge case at phi = 2pi. - if i_q == nbins: - i_q = nbins - 1 - y[i_q] += frac * data_n - # Take dqs from data to get the q_average - x[i_q] += frac * q_value - if err_data is None or err_data[npt] == 0.0: - if data_n < 0: - data_n = -data_n - err_y[i_q] += frac * frac * data_n - else: - err_y[i_q] += frac * frac * err_data[npt] * err_data[npt] - if dq_data is not None: - # To be consistent with dq calculation in 1d reduction, - # we need just the averages (not quadratures) because - # it should not depend on the number of the q points - # in the qr bins. - err_x[i_q] += frac * dq_data[npt] - else: - err_x = None - y_counts[i_q] += frac - - # Average the sums - for n in range(nbins): - if err_y[n] < 0: - err_y[n] = -err_y[n] - err_y[n] = math.sqrt(err_y[n]) - # if err_x is not None: - # err_x[n] = math.sqrt(err_x[n]) - - err_y = err_y / y_counts - err_y[err_y == 0] = np.average(err_y) - y = y / y_counts - x = x / y_counts - idx = (np.isfinite(y)) & (np.isfinite(x)) - - if err_x is not None: - d_x = err_x[idx] / y_counts[idx] - else: - d_x = None - - if not idx.any(): - msg = "Average Error: No points inside ROI to average..." - raise ValueError(msg) + width = max(r_max - r_min, 1e-12) + nbins = int(math.ceil(width / abs(bin_width))) if bin_width != 0 else 1 + self._impl = AvgCircularAverage(r_range=(r_min, r_max), nbins=nbins) - return Data1D(x=x[idx], y=y[idx], dy=err_y[idx], dx=d_x) + def __call__(self, data2D): + return self._impl(data2D) ################################################################################ class Ring: """ - Defines a ring on a 2D data set. - The ring is defined by r_min, r_max, and - the position of the center of the ring. - - The data returned is the distribution of counts - around the ring as a function of phi. - - Phi_min and phi_max should be defined between 0 and 2*pi - in anti-clockwise starting from the x- axis on the left-hand side + Wrapper for new Ring. + Old signature: Ring(r_min=0, r_max=0, center_x=0, center_y=0, nbins=36) + New signature: Ring(r_range, nbins) + center_x/center_y are ignored for compatibility. """ - # Todo: remove center. - - def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0, nbins=36): - # Minimum radius - self.r_min = r_min - # Maximum radius - self.r_max = r_max - # Center of the ring in x - self.center_x = center_x - # Center of the ring in y - self.center_y = center_y - # Number of angular bins - self.nbins_phi = nbins + def __init__(self, r_min=0.0, r_max=0.0, center_x=0.0, center_y=0.0, nbins=36): + self._impl = AvgRing(r_range=(r_min, r_max), center=(center_x, center_y),nbins=nbins) def __call__(self, data2D): - """ - Apply the ring to the data set. - Returns the angular distribution for a given q range + return self._impl(data2D) - :param data2D: Data2D object - :return: Data1D object - """ - if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: - raise RuntimeError("Ring averaging only take plottable_2D objects") - - Pi = math.pi - - # Get data - data = data2D.data[np.isfinite(data2D.data)] - q_data = data2D.q_data[np.isfinite(data2D.data)] - err_data = None - if data2D.err_data is not None: - err_data = data2D.err_data[np.isfinite(data2D.data)] - qx_data = data2D.qx_data[np.isfinite(data2D.data)] - qy_data = data2D.qy_data[np.isfinite(data2D.data)] - mask_data = data2D.mask[np.isfinite(data2D.data)] - - # Set space for 1d outputs - phi_bins = np.zeros(self.nbins_phi) - phi_counts = np.zeros(self.nbins_phi) - phi_values = np.zeros(self.nbins_phi) - phi_err = np.zeros(self.nbins_phi) - - # Shift to apply to calculated phi values in order - # to center first bin at zero - phi_shift = Pi / self.nbins_phi - - for npt in range(len(data)): - if not mask_data[npt]: - # ignore points that are masked - continue - frac = 0 - # q-value at the point (npt) - q_value = q_data[npt] - data_n = data[npt] - - # phi-value at the point (npt) - phi_value = math.atan2(qy_data[npt], qx_data[npt]) + Pi - - if self.r_min <= q_value and q_value <= self.r_max: - frac = 1 - if frac == 0: - continue - # binning - i_phi = int(math.floor((self.nbins_phi) * - (phi_value + phi_shift) / (2 * Pi))) - - # Take care of the edge case at phi = 2pi. - if i_phi >= self.nbins_phi: - i_phi = 0 - phi_bins[i_phi] += frac * data[npt] - - if err_data is None or err_data[npt] == 0.0: - if data_n < 0: - data_n = -data_n - phi_err[i_phi] += frac * frac * math.fabs(data_n) - else: - phi_err[i_phi] += frac * frac * err_data[npt] * err_data[npt] - phi_counts[i_phi] += frac - - for i in range(self.nbins_phi): - phi_bins[i] = phi_bins[i] / phi_counts[i] - phi_err[i] = math.sqrt(phi_err[i]) / phi_counts[i] - phi_values[i] = 2.0 * math.pi / self.nbins_phi * (1.0 * i) - - idx = (np.isfinite(phi_bins)) - - if not idx.any(): - msg = "Average Error: No points inside ROI to average..." - raise ValueError(msg) - - return Data1D(x=phi_values[idx], y=phi_bins[idx], dy=phi_err[idx]) - - -class _Sector: +class SectorPhi: """ - Defines a sector region on a 2D data set. - The sector is defined by r_min, r_max, phi_min and phi_max. - phi_min and phi_max are defined by the right and left lines wrt a central - line such that phi_max could be less than phi_min if they straddle the - discontinuity from 2pi to 0. - - Phi is defined between 0 and 2*pi in anti-clockwise - starting from the negative x-axis. + Backwards-compatible name for angular sector averaging. + Delegates to new SectorPhi/WedgePhi implementation. """ + def __init__(self, r_min=0.0, r_max=0.0, phi_min=0.0, phi_max=2 * math.pi, center_x=0.0, center_y=0.0, nbins=20): + # SectorPhi in new module is essentially WedgePhi; pass through phi_range and nbins + self._impl = AvgSectorPhi(r_range=(r_min, r_max), phi_range=(phi_min, phi_max),center=(center_x, center_y), + nbins=nbins) - def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, nbins=20, - base=None): - ''' - :param base: must be a valid base for an algorithm, i.e., - a positive number - ''' - self.r_min = r_min - self.r_max = r_max - self.phi_min = phi_min - self.phi_max = phi_max - self.nbins = nbins - self.base = base - - # set up to use the asymmetric sector average - default to symmetric - self.fold = True - - def _agv(self, data2D, run='phi'): - """ - Perform sector averaging. - - :param data2D: Data2D object - :param run: define the varying parameter ('phi' , or 'sector') - - :return: Data1D object - """ - if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: - raise RuntimeError("Ring averaging only take plottable_2D objects") - - # Get all the data & info - data = data2D.data[np.isfinite(data2D.data)] - q_data = data2D.q_data[np.isfinite(data2D.data)] - err_data=None - if data2D.err_data is not None: - err_data = data2D.err_data[np.isfinite(data2D.data)] - qx_data = data2D.qx_data[np.isfinite(data2D.data)] - qy_data = data2D.qy_data[np.isfinite(data2D.data)] - mask_data = data2D.mask[np.isfinite(data2D.data)] - - dq_data = None - if data2D.dqx_data is not None and data2D.dqy_data is not None: - dq_data = get_dq_data(data2D) - - # set space for 1d outputs - x = np.zeros(self.nbins) - y = np.zeros(self.nbins) - y_err = np.zeros(self.nbins) - x_err = np.zeros(self.nbins) - y_counts = np.zeros(self.nbins) # Cycle counts (for the mean) - - # Get the min and max into the region: 0 <= phi < 2Pi - phi_min = flip_phi(self.phi_min) - phi_max = flip_phi(self.phi_max) - # Now calculate the angles for the opposite side sector, here referred - # to as "minor wing," and ensure these too are within 0 to 2pi - phi_min_minor = flip_phi(phi_min - math.pi) - phi_max_minor = flip_phi(phi_max - math.pi) - - # set up the bins by creating a binning object - if run.lower() == 'phi': - # The check here ensures when a range straddles the discontinuity - # inherent in circular angles (jumping from 2pi to 0) that the - # Binning class still recieves a continuous interval. phi_min/max - # are used here instead of self.phi_min/max as they are always in - # the range 0, 2pi - making their values more predictable. - # Note that their values must not be altered, as they are used to - # determine what points (also in the range 0, 2pi) are in the ROI. - if phi_min > phi_max: - binning = Binning(phi_min, phi_max + 2 * np.pi, self.nbins, self.base) - else: - binning = Binning(phi_min, phi_max, self.nbins, self.base) - elif self.fold: - binning = Binning(self.r_min, self.r_max, self.nbins, self.base) - else: - binning = Binning(-self.r_max, self.r_max, self.nbins, self.base) - - for n in range(len(data)): - if not mask_data[n]: - # ignore points that are masked - continue - - # q-value at the pixel (j,i) - q_value = q_data[n] - data_n = data[n] - - # Is pixel within range? - is_in = False - - # calculate the phi-value of the pixel (j,i) and convert the range - # [-pi,pi] returned by the atan2 function to the [0,2pi] range used - # as the reference frame for these calculations - phi_value = math.atan2(qy_data[n], qx_data[n]) + math.pi - - # No need to calculate: data outside of the radius - if self.r_min > q_value or q_value > self.r_max: - continue - - # For all cases(i.e.,for 'sector' (fold true or false), and 'phi') - # Find pixels within the main ROI (primary sector (main wing) - # in the case of sectors) - if phi_min > phi_max: - is_in = is_in or (phi_value > phi_min or - phi_value < phi_max) - else: - is_in = is_in or (phi_value >= phi_min and - phi_value < phi_max) - - # For sector cuts we need to check if the point is within the - # "minor wing" before checking if it is in the major wing. - # There are effectively two ROIs here as each sector on opposite - # sides of 0,0 need to be checked separately. - if run.lower() == 'sector' and not is_in: - if phi_min_minor > phi_max_minor: - is_in = (phi_value > phi_min_minor or - phi_value < phi_max_minor) - else: - is_in = (phi_value > phi_min_minor and - phi_value < phi_max_minor) - # now, if we want to keep both sides separate we arbitrarily, - # assign negative q to the qs in the minor wing. As calculated, - # all qs are postive and in fact all qs in the same ring are - # the same. This will allow us to plot both sides of 0,0 - # independently. - if not self.fold: - if is_in: - q_value *= -1 - - # data oustide of the phi range - if not is_in: - continue - - # Get the binning index - if run.lower() == 'phi': - # If the original range used to instantiate `binning` was - # shifted by 2pi to accommodate the 2pi to 0 discontinuity, - # then phi_value needs to be shifted too so that it falls in - # the continuous range set up for the binning process. - if phi_min > phi_value: - i_bin = binning.get_bin_index(phi_value + 2 * np.pi) - else: - i_bin = binning.get_bin_index(phi_value) - else: - i_bin = binning.get_bin_index(q_value) - - # Take care of the edge case at phi = 2pi. - if i_bin == self.nbins: - i_bin = self.nbins - 1 - - # Get the total y - y[i_bin] += data_n - x[i_bin] += q_value - if err_data is None or err_data[n] == 0.0: - if data_n < 0: - data_n = -data_n - y_err[i_bin] += data_n - else: - y_err[i_bin] += err_data[n]**2 - - if dq_data is not None: - # To be consistent with dq calculation in 1d reduction, - # we need just the averages (not quadratures) because - # it should not depend on the number of the q points - # in the qr bins. - x_err[i_bin] += dq_data[n] - else: - x_err = None - y_counts[i_bin] += 1 - - # Organize the results - with np.errstate(divide='ignore', invalid='ignore'): - y = y/y_counts - y_err = np.sqrt(y_err)/y_counts - # Calculate x values at the center of the bin depending on the - # the type of averaging (phi or sector) - if run.lower() == 'phi': - # Determining the step size is best done via the binning - # object's interval, as this is set up so max > min in all - # cases. One could also use phi_min and phi_max, so long as - # they have not been changed. - # In setting up x, phi_min makes a better starting point than - # self.phi_min, as the resulting array is garenteed to be > 0 - # throughout. This works better with the sasview gui, which - # will convert the result to the range -pi, pi. - step = (binning.max - binning.min) / self.nbins - x = (np.arange(self.nbins) + 0.5) * step + phi_min - else: - # set q to the average of the q values within each bin - x = x/y_counts - - ### Alternate algorithm - ## We take the center of ring area, not radius. - ## This is more accurate than taking the radial center of ring. - #step = (self.r_max - self.r_min) / self.nbins - #r_inner = self.r_min + step * np.arange(self.nbins) - #x = math.sqrt((r_inner**2 + (r_inner + step)**2) / 2) - - idx = (np.isfinite(y) & np.isfinite(y_err)) - if x_err is not None: - d_x = x_err[idx] / y_counts[idx] - else: - d_x = None - if not idx.any(): - msg = "Average Error: No points inside sector of ROI to average..." - raise ValueError(msg) - return Data1D(x=x[idx], y=y[idx], dy=y_err[idx], dx=d_x) + def __call__(self, data2D): + return self._impl(data2D) -class SectorPhi(_Sector): +class SectorQ: """ - Sector average as a function of phi. - I(phi) is return and the data is averaged over Q. - - A sector is defined by r_min, r_max, phi_min, phi_max. - The number of bin in phi also has to be defined. + Wrapper for new SectorQ. + Old signature: SectorQ(r_min, r_max, phi_min=0, phi_max=2*pi, nbins=20, base=None) + New signature: SectorQ(r_range, phi_range=(0,2pi), nbins=100, fold=True) + Keeps the same default folding behaviour (fold True). """ + def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, center_x=0.0, center_y=0.0, nbins=20, base=None): + self._impl = AvgSectorQ(r_range=(r_min, r_max), phi_range=(phi_min, phi_max), center=(center_x, center_y), + nbins=nbins, fold=True) def __call__(self, data2D): - """ - Perform sector average and return I(phi). - - :param data2D: Data2D object - :return: Data1D object - """ - return self._agv(data2D, 'phi') - + return self._impl(data2D) -class SectorQ(_Sector): +class WedgePhi: """ - Sector average as a function of Q for both wings. setting the _Sector.fold - attribute determines whether or not the two sectors are averaged together - (folded over) or separate. In the case of separate (not folded), the - qs for the "minor wing" are arbitrarily set to a negative value. - I(Q) is returned and the data is averaged over phi. - - A sector is defined by r_min, r_max, phi_min, phi_max. - where r_min, r_max, phi_min, phi_max >0. - The number of bin in Q also has to be defined. + Wrapper for new WedgePhi (behaviour matches legacy WedgePhi expectations). """ + def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, center_x=0.0, center_y=0.0, nbins=20): + self._impl = AvgWedgePhi(r_range=(r_min, r_max), phi_range=(phi_min, phi_max), center=(center_x, center_y), + nbins=nbins) def __call__(self, data2D): - """ - Perform sector average and return I(Q). - - :param data2D: Data2D object - - :return: Data1D object - """ - return self._agv(data2D, 'sector') - -################################################################################ - + return self._impl(data2D) -class Ringcut: +class WedgeQ: """ - Defines a ring on a 2D data set. - The ring is defined by r_min, r_max, and - the position of the center of the ring. - - The data returned is the region inside the ring - - Phi_min and phi_max should be defined between 0 and 2*pi - in anti-clockwise starting from the x- axis on the left-hand side + Wrapper for new WedgeQ (behaviour matches legacy WedgeQ expectations). """ - - def __init__(self, r_min=0, r_max=0, center_x=0, center_y=0): - # Minimum radius - self.r_min = r_min - # Maximum radius - self.r_max = r_max - # Center of the ring in x - self.center_x = center_x - # Center of the ring in y - self.center_y = center_y + def __init__(self, r_min, r_max, phi_min=0, phi_max=2 * math.pi, center_x=0.0, center_y=0.0, nbins=20): + self._impl = AvgWedgeQ(r_range=(r_min, r_max), phi_range=(phi_min, phi_max), center=(center_x, center_y), + nbins=nbins) def __call__(self, data2D): - """ - Apply the ring to the data set. - Returns the angular distribution for a given q range + return self._impl(data2D) - :param data2D: Data2D object +################################################################################ - :return: index array in the range - """ - if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: - raise RuntimeError("Ring cut only take plottable_2D objects") - # Get data - qx_data = data2D.qx_data - qy_data = data2D.qy_data - q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) +class Ringcut: + def __init__(self, r_min=0.0, r_max=0.0, center_x=0.0, center_y=0.0): + # center_x, center_y ignored for compatibility + self._impl = AvgRingcut(r_range=(r_min, r_max), phi_range=(0.0, 2 * math.pi), center=(center_x, center_y)) - # check whether or not the data point is inside ROI - out = (self.r_min <= q_data) & (self.r_max >= q_data) - return out + def __call__(self, data2D): + return self._impl(data2D) ################################################################################ class Boxcut: - """ - Find a rectangular 2D region of interest. - """ - def __init__(self, x_min=0.0, x_max=0.0, y_min=0.0, y_max=0.0): - # Minimum Qx value [A-1] - self.x_min = x_min - # Maximum Qx value [A-1] - self.x_max = x_max - # Minimum Qy value [A-1] - self.y_min = y_min - # Maximum Qy value [A-1] - self.y_max = y_max + self._impl = AvgBoxcut(qx_range=(x_min, x_max), qy_range=(y_min, y_max)) def __call__(self, data2D): - """ - Find a rectangular 2D region of interest. - - :param data2D: Data2D object - :return: mask, 1d array (len = len(data)) - with Trues where the data points are inside ROI, otherwise False - """ - mask = self._find(data2D) - - return mask - - def _find(self, data2D): - """ - Find a rectangular 2D region of interest. - - :param data2D: Data2D object - - :return: out, 1d array (length = len(data)) - with Trues where the data points are inside ROI, otherwise Falses - """ - if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: - raise RuntimeError("Boxcut take only plottable_2D objects") - # Get qx_ and qy_data - qx_data = data2D.qx_data - qy_data = data2D.qy_data - - # check whether or not the data point is inside ROI - outx = (self.x_min <= qx_data) & (self.x_max > qx_data) - outy = (self.y_min <= qy_data) & (self.y_max > qy_data) - - return outx & outy + return self._impl(data2D) ################################################################################ class Sectorcut: - """ - Defines a sector (major + minor) region on a 2D data set. - The sector is defined by phi_min, phi_max, - where phi_min and phi_max are defined by the right - and left lines wrt central line. - - Phi_min and phi_max are given in units of radian - and (phi_max-phi_min) should not be larger than pi - """ - - def __init__(self, phi_min=0, phi_max=math.pi): - self.phi_min = phi_min - self.phi_max = phi_max + def __init__(self, phi_min=0.0, phi_max=math.pi, center_x=0.0, center_y=0.0): + # The new Sectorcut expects a phi_range; set radial range to full image + self._impl = AvgSectorcut(phi_range=(phi_min, phi_max), center=(center_x, center_y)) def __call__(self, data2D): - """ - Find a rectangular 2D region of interest. - - :param data2D: Data2D object - - :return: mask, 1d array (len = len(data)) - - with Trues where the data points are inside ROI, otherwise False - """ - mask = self._find(data2D) - - return mask - - def _find(self, data2D): - """ - Find a rectangular 2D region of interest. - - :param data2D: Data2D object - - :return: out, 1d array (length = len(data)) - - with Trues where the data points are inside ROI, otherwise Falses - """ - if data2D.__class__.__name__ not in ["Data2D", "plottable_2D"]: - raise RuntimeError("Sectorcut take only plottable_2D objects") - Pi = math.pi - # Get data - qx_data = data2D.qx_data - qy_data = data2D.qy_data - - # get phi from data - phi_data = np.arctan2(qy_data, qx_data) - - # Get the min and max into the region: -pi <= phi < Pi - phi_min_major = flip_phi(self.phi_min + Pi) - Pi - phi_max_major = flip_phi(self.phi_max + Pi) - Pi - # check for major sector - if phi_min_major > phi_max_major: - out_major = (phi_min_major <= phi_data) + \ - (phi_max_major > phi_data) - else: - out_major = (phi_min_major <= phi_data) & ( - phi_max_major > phi_data) - - # minor sector - # Get the min and max into the region: -pi <= phi < Pi - phi_min_minor = flip_phi(self.phi_min) - Pi - phi_max_minor = flip_phi(self.phi_max) - Pi - - # check for minor sector - if phi_min_minor > phi_max_minor: - out_minor = (phi_min_minor <= phi_data) + \ - (phi_max_minor >= phi_data) - else: - out_minor = (phi_min_minor <= phi_data) & \ - (phi_max_minor >= phi_data) - out = out_major + out_minor - - return out + return self._impl(data2D) diff --git a/sasdata/data_util/roi.py b/sasdata/data_util/roi.py new file mode 100644 index 00000000..ac562ea6 --- /dev/null +++ b/sasdata/data_util/roi.py @@ -0,0 +1,129 @@ +import numpy as np + +from sasdata.dataloader.data_info import Data2D + + +class GenericROI: + """ + Base class used to set up the data from a Data2D object for processing. + This class performs any coordinate system independent setup and validation. + """ + + def __init__(self): + """ + Assign the variables used to label the properties of the Data2D object. + + In classes inheriting from GenericROI, the variables used to define the + boundaries of the Region Of Interest are also set up during __init__. + """ + + self.data = None + self.err_data = None + self.q_data = None + self.qx_data = None + self.qy_data = None + self.center_x =0.0 + self.center_y =0.0 + + def validate_and_assign_data(self, data2d: Data2D = None) -> None: + """ + Check that the data supplied is valid and assign data to variables. + This method must be executed before any further data processing happens + + :param data2d: A Data2D object which is the target of a child class' + data manipulations. + """ + # Check that the supplied data2d is valid and usable. + if not isinstance(data2d, Data2D): + msg = "Data supplied must be of type Data2D." + raise TypeError(msg) + if len(data2d.detector) > 1: + msg = f"Invalid number of detectors: {len(data2d.detector)}" + raise ValueError(msg) + + # Only use data which is finite and not masked off + valid_data = np.isfinite(data2d.data) & data2d.mask + + # Assign properties of the Data2D object to variables for reference + # during data processing. + self.data = data2d.data[valid_data] + self.err_data = data2d.err_data[valid_data] + self.q_data = data2d.q_data[valid_data]- np.sqrt(self.center_x**2 + self.center_y**2) + self.qx_data = data2d.qx_data[valid_data]-self.center_x + self.qy_data = data2d.qy_data[valid_data]-self.center_y + + # No points should have zero error, if they do then assume the error is + # the square root of the data. This code was added to replicate + # previous functionality. It's a bit dodgy, so feel free to remove. + self.err_data[self.err_data == 0] = \ + np.sqrt(np.abs(self.data[self.err_data == 0])) + +class CartesianROI(GenericROI): + """ + Base class for data manipulators with a Cartesian (rectangular) ROI. + """ + + def __init__(self, qx_range: tuple[float, float] = (0.0, 0.0), qy_range: tuple[float, float] = (0.0, 0.0)) -> None: + """ + Assign the variables used to label the properties of the Data2D object. + Also establish the upper and lower bounds defining the ROI. + + The units of these parameters are A^-1 + :param qx_range: Bounds of the ROI along the Q_x direction. + :param qy_range: Bounds of the ROI along the Q_y direction. + """ + qx_min, qx_max = qx_range + qy_min, qy_max = qy_range + super().__init__() + self.qx_min = qx_min + self.qx_max = qx_max + self.qy_min = qy_min + self.qy_max = qy_max + +class PolarROI(GenericROI): + """ + Base class for data manipulators with a polar ROI. + """ + + def __init__(self, r_range: tuple[float, float], phi_range: tuple[float, float] = (0.0, 2*np.pi), center=tuple[float, float] = (0.0, 0.0)) -> None: + """ + Assign the variables used to label the properties of the Data2D object. + Also establish the upper and lower bounds defining the ROI. + + The units are A^-1 for radial parameters, and radians for anglar ones. + :param r_range: Tuple (r_min, r_max) defining limits for |Q| values to use during averaging. + :param phi_range: Tuple (phi_min, phi_max) defining limits for φ in radians (in the ROI). + + Note that Phi is measured anti-clockwise from the positive x-axis. + """ + r_min, r_max = r_range + phi_min, phi_max = phi_range + center_x, center_y = center + super().__init__() + + self.phi_data = None + + if r_min >= r_max: + msg = "Minimum radius cannot be greater than maximum radius." + raise ValueError(msg) + # Units A^-1 for radii, radians for angles + self.r_min = r_min + self.r_max = r_max + self.phi_min = phi_min + self.phi_max = phi_max + self.center_x = center_x + self.center_y = center_y + + def validate_and_assign_data(self, data2d: Data2D = None) -> None: + """ + Check that the data supplied valid and assign data variables. + This method must be executed before any further data processing happens + + :param data2d: A Data2D object which is the target of a child class' + data manipulations. + """ + + # Most validation and pre-processing is taken care of by GenericROI. + super().validate_and_assign_data(data2d) + # Phi data can be calculated from the Cartesian Q coordinates. + self.phi_data = np.arctan2(self.qy_data, self.qx_data) diff --git a/sasdata/dataloader/data_info.py b/sasdata/dataloader/data_info.py index 16676a4b..8c3a199e 100644 --- a/sasdata/dataloader/data_info.py +++ b/sasdata/dataloader/data_info.py @@ -1234,6 +1234,43 @@ def _perform_union(self, other): return result +def reader2D_converter(data2d: Data2D | None = None) -> Data2D: + """ + convert old 2d format opened by IhorReader or danse_reader + to new Data2D format + This is mainly used by the Readers + + :param data2d: 2d array of Data2D object + :return: 1d arrays of Data2D object + + """ + if data2d.data is None or data2d.x_bins is None or data2d.y_bins is None: + raise ValueError("Can't convert this data: data=None...") + new_x = np.tile(data2d.x_bins, (len(data2d.y_bins), 1)) + new_y = np.tile(data2d.y_bins, (len(data2d.x_bins), 1)) + new_y = new_y.swapaxes(0, 1) + + new_data = data2d.data.flatten() + qx_data = new_x.flatten() + qy_data = new_y.flatten() + q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) + if data2d.err_data is None or np.any(data2d.err_data <= 0): + new_err_data = np.sqrt(np.abs(new_data)) + else: + new_err_data = data2d.err_data.flatten() + mask = np.ones(len(new_data), dtype=bool) + + output = data2d + output.data = new_data + output.err_data = new_err_data + output.qx_data = qx_data + output.qy_data = qy_data + output.q_data = q_data + output.mask = mask + + return output + + def combine_data_info_with_plottable(data, datainfo): """ A function that combines the DataInfo data in self.current_datainto with a diff --git a/test/sasmanipulations/data/MAR07232_rest.h5 b/test/sasmanipulations/data/MAR07232_rest.h5 new file mode 100644 index 00000000..053eecaf Binary files /dev/null and b/test/sasmanipulations/data/MAR07232_rest.h5 differ diff --git a/test/sasmanipulations/data/avg_testdata.txt b/test/sasmanipulations/data/avg_testdata.txt new file mode 100644 index 00000000..3fd5dde3 --- /dev/null +++ b/test/sasmanipulations/data/avg_testdata.txt @@ -0,0 +1,21 @@ +0.00019987186878 -0.01196215 0.148605728355 +0.000453772721237 0.02091606 0.23372601 +0.000750492390439 -0.01337855 0.17169562 +0.00103996394336 0.03062 0.13136407 +0.0013420198959 0.0811008333333 0.10681163 +0.001652061869 0.167022288372 0.10098903 +0.00196086470492 27.5554711176 0.7350533 +0.00226262401224 105.031578947 1.35744586624 +0.00256734439716 82.1791776119 1.10749938588 +0.0028637128388 54.714657971 0.890486416264 +0.00315257408712 36.8455584416 0.691746880003 +0.00344644126616 24.8938701149 0.534917225468 +0.00374248202229 16.5905619565 0.424655384023 +0.00404393067437 11.4714217925 0.328969543128 +0.004346317814 8.05405805556 0.273083524998 +0.00465162170627 5.5823291129 0.21217630209 +0.00495449803049 4.2574845082 0.186808495528 +0.00525641407066 3.30448963768 0.154743584955 +0.00555735057365 2.6995389781 0.140373302568 +0.00585577429002 2.03298512 0.116418358232 + diff --git a/test/sasdataloader/data/ring_testdata.txt b/test/sasmanipulations/data/ring_testdata.txt similarity index 100% rename from test/sasdataloader/data/ring_testdata.txt rename to test/sasmanipulations/data/ring_testdata.txt diff --git a/test/sasdataloader/data/sectorphi_testdata.txt b/test/sasmanipulations/data/sectorphi_testdata.txt similarity index 100% rename from test/sasdataloader/data/sectorphi_testdata.txt rename to test/sasmanipulations/data/sectorphi_testdata.txt diff --git a/test/sasdataloader/data/sectorq_testdata.txt b/test/sasmanipulations/data/sectorq_testdata.txt similarity index 100% rename from test/sasdataloader/data/sectorq_testdata.txt rename to test/sasmanipulations/data/sectorq_testdata.txt diff --git a/test/sasdataloader/data/slabx_testdata.txt b/test/sasmanipulations/data/slabx_testdata.txt similarity index 100% rename from test/sasdataloader/data/slabx_testdata.txt rename to test/sasmanipulations/data/slabx_testdata.txt diff --git a/test/sasdataloader/data/slaby_testdata.txt b/test/sasmanipulations/data/slaby_testdata.txt similarity index 100% rename from test/sasdataloader/data/slaby_testdata.txt rename to test/sasmanipulations/data/slaby_testdata.txt diff --git a/test/sasmanipulations/helper.py b/test/sasmanipulations/helper.py new file mode 100644 index 00000000..1b52d8c8 --- /dev/null +++ b/test/sasmanipulations/helper.py @@ -0,0 +1,235 @@ +""" +Shared test helpers for averaging tests. +""" +import numpy as np +from scipy import integrate + +from sasdata.dataloader import data_info + + +def make_dd_from_func(func, matrix_size=201): + """ + Create a MatrixToData2D from a function of (x, y). Returns the MatrixToData2D + instance and matrix_size for convenience. + func should accept (x, y) meshgrid arrays and return a 2D array. + """ + x, y = np.meshgrid(np.linspace(-1, 1, matrix_size), + np.linspace(-1, 1, matrix_size)) + mat = func(x, y) + return MatrixToData2D(data2d=mat), matrix_size + +def make_uniform_dd(shape=(100, 100), value=1.0): + mat = np.full(shape, value, dtype=float) + return MatrixToData2D(data2d=mat) + +def integrate_1d_output(output, method="simpson"): + """ + Integrate output from an averager consistently. + - If output is a Data1D-like object with .x and .y -> integrate y(x) + - If output is a tuple (result, error[, npoints]) -> return numeric result + """ + if hasattr(output, "x") and hasattr(output, "y"): + if method == "trapezoid": + return integrate.trapezoid(output.y, output.x) + return integrate.simpson(output.y, output.x) + if isinstance(output, tuple) and len(output) >= 1: + return output[0] + raise TypeError("Unsupported averager output type: %r" % type(output)) + + +def expected_slabx_area(qx_min, qx_max, qy_min, qy_max): + # data = x^2 * y -> integrate x^2 dx and average y across qy range + x_part_integ = (qx_max**3 - qx_min**3) / 3 + y_part_integ = (qy_max**2 - qy_min**2) / 2 + y_part_avg = y_part_integ / (qy_max - qy_min) + return y_part_avg * x_part_integ + +def expected_slaby_area(qx_min, qx_max, qy_min, qy_max): + # data = x * y^2 -> integrate y^2 dy and average x across qx range + y_part_integ = (qy_max**3 - qy_min**3) / 3 + x_part_integ = (qx_max**2 - qx_min**2) / 2 + x_part_avg = x_part_integ / (qx_max - qx_min) + return x_part_avg * y_part_integ + +def make_uniform_dd(shape=(100, 100), value=1.0): + """Convenience for tests that need a constant matrix Data2D.""" + mat = np.full(shape, value, dtype=float) + return MatrixToData2D(data2d=mat) + +def run_and_integrate(averager, dd, integrator="simpson"): + """ + Run an averager (callable) with a Data2D container returned by MatrixToData2D + and return the integrated result (scalar area / sum) consistently. + """ + out = averager(dd.data) + return integrate_1d_output(out, method=("trapezoid" if integrator == "trapezoid" else "simpson")) + +def expected_boxsum_and_err(matrix, slice_rows=None, slice_cols=None): + """ + Compute expected Boxsum (sum) and its error for a given 2D numpy matrix. + Optional slice indices can restrict the region (tuples/lists of indices). + """ + mat = np.asarray(matrix) + if slice_rows is not None and slice_cols is not None: + mat = mat[np.ix_(slice_rows, slice_cols)] + total = np.sum(mat) + err = np.sqrt(np.sum(mat)) + return total, err + +def expected_boxavg_and_err(matrix, slice_rows=None, slice_cols=None): + """ + Compute expected Boxavg (mean) and its error for a given 2D numpy matrix. + Error uses sqrt(sum)/N as in existing tests. + """ + mat = np.asarray(matrix) + if slice_rows is not None and slice_cols is not None: + mat = mat[np.ix_(slice_rows, slice_cols)] + avg = np.mean(mat) if mat.size > 0 else 0.0 + err = np.sqrt(np.sum(mat)) / mat.size if mat.size > 0 else 0.0 + return avg, err + + +class MatrixToData2D: + """ + Create Data2D objects from supplied 2D arrays of data. + Error data can also be included. + + Adapted from sasdata.data_util.manipulations.reader_2D_converter + """ + + def __init__(self, data2d=None, err_data=None): + matrix, err_arr = self._validate_and_convert_inputs(data2d, err_data) + qx_bins, qy_bins = self._compute_bins(matrix.shape) + data_flat, err_flat, qx_data, qy_data, q_data, mask = self._build_flat_arrays(matrix, err_arr, qx_bins, qy_bins) + + # qmax can be any number, 1 just makes things simple. + self.qmax = 1 + # Creating a Data2D object to use for testing the averagers. + self.data = data_info.Data2D(data=data_flat, err_data=err_flat, + qx_data=qx_data, qy_data=qy_data, + q_data=q_data, mask=mask) + + def _validate_and_convert_inputs(self, data2d, err_data): + """Validate inputs and coerce to numpy arrays. Returns (matrix, err_data_or_None).""" + if data2d is None: + raise ValueError("Data must be supplied to convert to Data2D") + matrix = np.asarray(data2d) + if matrix.ndim != 2: + raise ValueError("Supplied array must have 2 dimensions to convert to Data2D") + + if err_data is not None: + err_arr = np.asarray(err_data) + if err_arr.shape != matrix.shape: + raise ValueError("Data and errors must have the same shape") + else: + err_arr = None + return matrix, err_arr + + def _compute_bins(self, matrix_shape): + """Compute qx and qy bin edges given the matrix shape.""" + cols = matrix_shape[1] + rows = matrix_shape[0] + qx_bins = np.linspace(start=-1 * 1, stop=1, num=cols, endpoint=True) + qy_bins = np.linspace(start=-1 * 1, stop=1, num=rows, endpoint=True) + return qx_bins, qy_bins + # qmax can be any number, 1 just makes things simple. + self.qmax = 1 + qx_bins = np.linspace(start=-1 * self.qmax, + stop=self.qmax, + num=matrix.shape[1], + endpoint=True) + qy_bins = np.linspace(start=-1 * self.qmax, + stop=self.qmax, + num=matrix.shape[0], + endpoint=True) + + def _build_flat_arrays(self, matrix, err_arr, qx_bins, qy_bins): + """Flatten matrix and build qx, qy, q arrays plus mask and error handling.""" + data_flat = matrix.flatten() + if err_arr is None or np.any(err_arr <= 0): + # Error data of some kind is needed, so we fabricate some + err_flat = np.sqrt(np.abs(data_flat)) + else: + err_flat = np.asarray(err_arr).flatten() + + qx_data = np.tile(qx_bins, (len(qy_bins), 1)).flatten() + qy_data = np.tile(qy_bins, (len(qx_bins), 1)).swapaxes(0, 1).flatten() + q_data = np.sqrt(qx_data * qx_data + qy_data * qy_data) + mask = np.ones(len(data_flat), dtype=bool) + return data_flat, err_flat, qx_data, qy_data, q_data, mask + +class CircularTestingMatrix: + """ + This class is used to generate a 2D array representing a function in polar + coordinates. The function, f(r, φ) = R(r) * Φ(φ), factorises into simple + radial and angular parts. This makes it easy to determine the form of the + function after one of the parts has been averaged over, and therefore good + for testing the directional averagers in manipulations.py. + This testing is done by comparing the area under the functions, as these + will only match if the limits defining the ROI were applied correctly. + + f(r, φ) = R(r) * Φ(φ) + R(r) = r ; where 0 <= r <= 1. + Φ(φ) = 1 + sin(ν * φ) ; where ν is the frequency and 0 <= φ <= 2π. + """ + + def __init__(self, frequency=1, matrix_size=201, major_axis=None): + """ + :param frequency: No. times Φ(φ) oscillates over the 0 <= φ <= 2π range + This parameter is largely arbitrary. + :param matrix_size: The len() of the output matrix. + Note that odd numbers give a centrepoint of 0,0. + :param major_axis: 'Q' or 'Phi' - the axis plotted against by the + averager being tested. + """ + if major_axis not in ('Q', 'Phi'): + msg = "Major axis must be either 'Q' or 'Phi'." + raise ValueError(msg) + + self.freq = frequency + self.matrix_size = matrix_size + self.major = major_axis + + # Grid with same dimensions as data matrix, ranging from -1 to 1 + x, y = np.meshgrid(np.linspace(-1, 1, self.matrix_size), + np.linspace(-1, 1, self.matrix_size)) + # radius is 0 at the centre, and 1 at (0, +/-1) and (+/-1, 0) + radius = np.sqrt(x**2 + y**2) + angle = np.arctan2(y, x) + # Create the 2D array of data + # The sinusoidal part is shifted up by 1 so its average is never 0 + self.matrix = radius * (1 + np.sin(self.freq * angle)) + + def area_under_region(self, r_min=0, r_max=1, phi_min=0, phi_max=2*np.pi): + """ + Integral of the testing matrix along the major axis, between the limits + specified. This can be compared to the integral under the 1D data + output by the averager being tested to confirm it's working properly. + :param r_min: value defining the minimum Q in the ROI. + :param r_max: value defining the maximum Q in the ROI. + :param phi_min: value defining the minimum Phi in the ROI. + :param phi_max: value defining the maximum Phi in the ROI. + """ + + phi_range = phi_max - phi_min + # ∫(1 + sin(ν * φ)) dφ = φ + (-cos(ν * φ) / ν) + constant. + sine_part_integ = phi_range - (np.cos(self.freq * phi_max) - + np.cos(self.freq * phi_min)) / self.freq + sine_part_avg = sine_part_integ / phi_range + + # ∫(r) dr = r²/2 + constant. + linear_part_integ = (r_max ** 2 - r_min ** 2) / 2 + # The average radius is weighted towards higher radii. The probability + # of a point having a given radius value is proportional to the radius: + # P(r) = k * r ; where k is some proportionality constant. + # ∫[r₀, r₁] P(r) dr = 1, which can be solved for k. This can then be + # substituted into ⟨r⟩ = ∫[r₀, r₁] P(r) * r dr, giving: + linear_part_avg = 2/3 * (r_max**3 - r_min**3) / (r_max**2 - r_min**2) + + # The integral along the major axis is modulated by the average value + # along the minor axis (between the limits). + if self.major == 'Q': + calculated_area = sine_part_avg * linear_part_integ + else: + calculated_area = linear_part_avg * sine_part_integ + return calculated_area diff --git a/test/sasdataloader/utest_averaging.py b/test/sasmanipulations/utest_averaging.py similarity index 100% rename from test/sasdataloader/utest_averaging.py rename to test/sasmanipulations/utest_averaging.py diff --git a/test/sasmanipulations/utest_averaging_box.py b/test/sasmanipulations/utest_averaging_box.py new file mode 100644 index 00000000..f3b4ec4a --- /dev/null +++ b/test/sasmanipulations/utest_averaging_box.py @@ -0,0 +1,187 @@ +""" +Unit tests for SlabX and SlabY averagers (moved out of utest_averaging_analytical.py). +""" +import unittest + +import numpy as np + +from sasdata.data_util.averaging import Boxavg, Boxsum +from sasdata.dataloader import data_info +from test.sasmanipulations.helper import ( + MatrixToData2D, + expected_boxavg_and_err, + expected_boxsum_and_err, + make_uniform_dd, +) + +# TODO - also check the errors are being calculated correctly + +class BoxsumTests(unittest.TestCase): + """ + This class contains all the unit tests for the Boxsum class from + manipulations.py + """ + + def test_boxsum_init(self): + """ + Test that Boxsum's __init__ method does what it's supposed to. + """ + qx_min = 1 + qx_max = 2 + qy_min = 3 + qy_max = 4 + + box_object = Boxsum(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max)) + + self.assertEqual(box_object.qx_min, qx_min) + self.assertEqual(box_object.qx_max, qx_max) + self.assertEqual(box_object.qy_min, qy_min) + self.assertEqual(box_object.qy_max, qy_max) + + def test_boxsum_multiple_detectors(self): + """ + Test Boxsum raises an error when there are multiple detectors. + """ + dd = make_uniform_dd((100, 100), value=1.0) + detector1 = data_info.Detector() + detector2 = data_info.Detector() + dd.data.detector.append(detector1) + dd.data.detector.append(detector2) + + box_object = Boxsum() + self.assertRaises(ValueError, box_object, dd.data) + + def test_boxsum_total(self): + """ + Test that Boxsum can find the sum of all of a data set + """ + # Creating a 100x100 matrix for a distribution which is flat in y + # and linear in x. + test_data = np.tile(np.arange(100), (100, 1)) + dd = MatrixToData2D(data2d=test_data) + + box_object = Boxsum(qx_range=(-1 * dd.qmax, dd.qmax), qy_range=(-1 * dd.qmax, dd.qmax)) + result, error, npoints = box_object(dd.data) + correct_sum, correct_error = expected_boxsum_and_err(test_data) + + self.assertAlmostEqual(result, correct_sum, 6) + self.assertAlmostEqual(error, correct_error, 6) + + def test_boxsum_subset_total(self): + """ + Test that Boxsum can find the sum of a portion of a data set + """ + # Creating a 100x100 matrix for a distribution which is flat in y + # and linear in x. + test_data = np.tile(np.arange(100), (100, 1)) + dd = MatrixToData2D(data2d=test_data) + + # region corresponds to central 50x50 in original test + box_object = Boxsum(qx_range=(-0.5 * dd.qmax, 0.5 * dd.qmax), qy_range=(-0.5 * dd.qmax, 0.5 * dd.qmax)) + result, error, npoints = box_object(dd.data) + inner_portion = test_data[25:75, 25:75] + correct_sum, correct_error = expected_boxsum_and_err(inner_portion) + + self.assertAlmostEqual(result, correct_sum, 6) + self.assertAlmostEqual(error, correct_error, 6) + + def test_boxsum_zero_sum(self): + """ + Test that Boxsum returns 0 when there are no points within the ROI + """ + test_data = np.ones([100, 100]) + test_data[25:75, 25:75] = 0 + dd = MatrixToData2D(data2d=test_data) + + box_object = Boxsum(qx_range=(-0.5 * dd.qmax, 0.5 * dd.qmax), qy_range=(-0.5 * dd.qmax, 0.5 * dd.qmax)) + result, error, npoints = box_object(dd.data) + + self.assertAlmostEqual(result, 0, 6) + self.assertAlmostEqual(error, 0, 6) + + +class BoxavgTests(unittest.TestCase): + """ + This class contains all the unit tests for the Boxavg class from + manipulations.py + """ + + def test_boxavg_init(self): + """ + Test that Boxavg's __init__ method does what it's supposed to. + """ + qx_min = 1 + qx_max = 2 + qy_min = 3 + qy_max = 4 + + box_object = Boxavg(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max)) + + self.assertEqual(box_object.qx_min, qx_min) + self.assertEqual(box_object.qx_max, qx_max) + self.assertEqual(box_object.qy_min, qy_min) + self.assertEqual(box_object.qy_max, qy_max) + + def test_boxavg_multiple_detectors(self): + """ + Test Boxavg raises an error when there are multiple detectors. + """ + dd = make_uniform_dd((100, 100), value=1.0) + detector1 = data_info.Detector() + detector2 = data_info.Detector() + dd.data.detector.append(detector1) + dd.data.detector.append(detector2) + + box_object = Boxavg() + self.assertRaises(ValueError, box_object, dd.data) + + def test_boxavg_total(self): + """ + Test that Boxavg can find the average of all of a data set + """ + # Creating a 100x100 matrix for a distribution which is flat in y + # and linear in x. + test_data = np.tile(np.arange(100), (100, 1)) + dd = MatrixToData2D(data2d=test_data) + + box_object = Boxavg(qx_range=(-1 * dd.qmax, dd.qmax), qy_range=(-1 * dd.qmax, dd.qmax)) + result, error = box_object(dd.data) + correct_avg, correct_error = expected_boxavg_and_err(test_data) + + self.assertAlmostEqual(result, correct_avg, 6) + self.assertAlmostEqual(error, correct_error, 6) + + def test_boxavg_subset_total(self): + """ + Test that Boxavg can find the average of a portion of a data set + """ + # Creating a 100x100 matrix for a distribution which is flat in y + # and linear in x. + test_data = np.tile(np.arange(100), (100, 1)) + dd = MatrixToData2D(data2d=test_data) + + box_object = Boxavg(qx_range=(-0.5 * dd.qmax, 0.5 * dd.qmax), qy_range=(-0.5 * dd.qmax, 0.5 * dd.qmax)) + result, error = box_object(dd.data) + inner_portion = test_data[25:75, 25:75] + correct_avg, correct_error = expected_boxavg_and_err(inner_portion) + + self.assertAlmostEqual(result, correct_avg, 6) + self.assertAlmostEqual(error, correct_error, 6) + + def test_boxavg_zero_average(self): + """ + Test that Boxavg returns 0 when there are no points within the ROI + """ + test_data = np.ones([100, 100]) + # Make a hole in the middle with zeros + test_data[25:75, 25:75] = np.zeros([50, 50]) + dd = MatrixToData2D(data2d=test_data) + + box_object = Boxavg(qx_range=(-0.5 * dd.qmax, 0.5 * dd.qmax), qy_range=(-0.5 * dd.qmax, 0.5 * dd.qmax)) + result, error = box_object(dd.data) + + self.assertAlmostEqual(result, 0, 6) + self.assertAlmostEqual(error, 0, 6) + +if __name__ == '__main__': + unittest.main() diff --git a/test/sasmanipulations/utest_averaging_circle.py b/test/sasmanipulations/utest_averaging_circle.py new file mode 100644 index 00000000..419bd234 --- /dev/null +++ b/test/sasmanipulations/utest_averaging_circle.py @@ -0,0 +1,397 @@ +""" +Unit tests for SlabX and SlabY averagers (moved out of utest_averaging_analytical.py). +""" +import unittest + +import numpy as np +from scipy import integrate + +from sasdata.data_util.averaging import CircularAverage, Ring, SectorQ, WedgePhi, WedgeQ +from test.sasmanipulations.helper import CircularTestingMatrix, MatrixToData2D + +# TODO - also check the errors are being calculated correctly + +class CircularAverageTests(unittest.TestCase): + """ + This class contains all the tests for the CircularAverage class + from manipulations.py + """ + + def test_circularaverage_init(self): + """ + Test that CircularAverage's __init__ method does what it's supposed to. + """ + r_min = 1 + r_max = 2 + nbins = 100 + + circ_object = CircularAverage(r_range=(r_min, r_max), nbins=nbins) + + self.assertEqual(circ_object.r_min, r_min) + self.assertEqual(circ_object.r_max, r_max) + self.assertEqual(circ_object.nbins, nbins) + + def test_circularaverage_dq_retrieval(self): + """ + Test that CircularAverage is able to calclate dq_data correctly when + the data provided has dqx_data and dqy_data. + """ + + # I'm saving the implementation of this bit for later + pass + + def test_circularaverage_multiple_detectors(self): + """ + Test CircularAverage raises an error when there are multiple detectors + """ + + # This test can't be implemented yet, because CircularAverage does not + # check the number of detectors. + # TODO - establish whether CircularAverage should be making this check. + pass + + def test_circularaverage_check_q_data(self): + """ + Check CircularAverage ensures the data supplied has `q_data` populated + """ + # test_data = np.ones([100, 100]) + # averager_data = DataMatrixToData2D(test_data) + # # Overwrite q_data so it's empty + # averager_data.data.q_data = np.array([]) + # circ_object = CircularAverage() + # self.assertRaises(RuntimeError, circ_object, averager_data.data) + + # This doesn't work. I'll come back to this later too + pass + + def test_circularaverage_check_valid_radii(self): + """ + Test that CircularAverage raises ValueError when r_min > r_max + """ + self.assertRaises(ValueError, CircularAverage, r_range=(0.1, 0.05)) + + def test_circularaverage_no_points_to_average(self): + """ + Test CircularAverage raises ValueError when the ROI contains no data + """ + test_data = np.ones([100, 100]) + averager_data = MatrixToData2D(test_data) + + # Region of interest well outside region with data + circ_object = CircularAverage(r_range=(2 * averager_data.qmax,3 * averager_data.qmax)) + self.assertRaises(ValueError, circ_object, averager_data.data) + + def test_circularaverage_averages_circularly(self): + """ + Test that CircularAverage can calculate a circular average correctly. + """ + test_data = CircularTestingMatrix(frequency=2, matrix_size=201, + major_axis='Q') + averager_data = MatrixToData2D(test_data.matrix) + + # Test the ability to average over a subsection of the data + r_min = averager_data.qmax * 0.25 + r_max = averager_data.qmax * 0.75 + + nbins = test_data.matrix_size + circ_object = CircularAverage(r_range=(r_min, r_max), nbins=nbins) + data1d = circ_object(averager_data.data) + + expected_area = test_data.area_under_region(r_min=r_min, r_max=r_max) + actual_area = integrate.trapezoid(data1d.y, data1d.x) + + # This used to be able to pass with a precision of 3 d.p. with the old + # manipulations.py - I'm not sure why it doesn't anymore. + # This is still a good level of precision compared to the others though + self.assertAlmostEqual(actual_area, expected_area, 2) + +class RingTests(unittest.TestCase): + """ + This class contains the tests for the Ring class from manipulations.py + A.K.A AnnulusSlicer on the sasview side + """ + + def test_ring_init(self): + """ + Test that Ring's __init__ method does what it's supposed to. + """ + r_min = 1 + r_max = 2 + nbins = 100 + + # Note that Ring also has params center_x and center_y, but these are + # not used by the slicers and there is a 'todo' in manipulations.py to + # remove them. For this reason, I have not tested their initialisation. + ring_object = Ring(r_range=(r_min, r_max), nbins=nbins) + + self.assertEqual(ring_object.r_min, r_min) + self.assertEqual(ring_object.r_max, r_max) + self.assertEqual(ring_object.nbins, nbins) + + def test_ring_non_plottable_data(self): + """ + Test that RuntimeError is raised if the data supplied isn't plottable + """ + # with patch("sasdata.data_util.manipulations.Ring.data2D.__class__.__name__") as p: + # p.return_value = "bad_name" + # ring_object = Ring() + # self.assertRaises(RuntimeError, ring_object.__call__) + + # I can't seem to get patch working, in this test or in others. + pass + + def test_ring_no_points_to_average(self): + """ + Test Ring raises ValueError when the ROI contains no data + """ + test_data = np.ones([100, 100]) + averager_data = MatrixToData2D(test_data) + + # Region of interest well outside region with data + ring_object = Ring(r_range=(2 * averager_data.qmax, 3 * averager_data.qmax)) + self.assertRaises(ValueError, ring_object, averager_data.data) + + def test_ring_averages_azimuthally(self): + """ + Test that Ring can calculate an azimuthal average correctly. + """ + test_data = CircularTestingMatrix(frequency=1, matrix_size=201, + major_axis='Phi') + averager_data = MatrixToData2D(test_data.matrix) + + # Test the ability to average over a subsection of the data + r_min = 0.25 * averager_data.qmax + r_max = 0.75 * averager_data.qmax + nbins = test_data.matrix_size // 2 + + ring_object = Ring(r_range=(r_min, r_max), nbins=nbins) + data1d = ring_object(averager_data.data) + + expected_area = test_data.area_under_region(r_min=r_min, r_max=r_max) + actual_area = integrate.simpson(data1d.y, data1d.x) + + self.assertAlmostEqual(actual_area, expected_area, 1) + +class SectorQTests(unittest.TestCase): + """ + This class contains the tests for the SectorQ class from manipulations.py + On the sasview side, this includes SectorSlicer and WedgeSlicer. + + The parameters frequency, r_min, r_max, phi_min and phi_max are largely + arbitrary, and the tests should pass if any sane value is used for them. + """ + + def test_sectorq_init(self): + """ + Test that SectorQ's __init__ method does what it's supposed to. + """ + r_min = 0 + r_max = 1 + phi_min = 0 + phi_max = np.pi + nbins = 100 + # base = 10 + + # sector_object = SectorQ(r_min=r_min, r_max=r_max, phi_min=phi_min, + # phi_max=phi_max, nbins=nbins, base=base) + + sector_object = SectorQ(r_range=(r_min, r_max), phi_range=(phi_min,phi_max), nbins=nbins) + + self.assertEqual(sector_object.r_min, r_min) + self.assertEqual(sector_object.r_max, r_max) + self.assertEqual(sector_object.phi_min, phi_min) + self.assertEqual(sector_object.phi_max, phi_max) + self.assertEqual(sector_object.nbins, nbins) + # self.assertEqual(sector_object.base, base) + + def test_sectorq_non_plottable_data(self): + """ + Test that RuntimeError is raised if the data supplied isn't plottable + """ + # Implementing this test can wait + pass + + def test_sectorq_averaging_without_fold(self): + """ + Test SectorQ can average correctly w/ major axis q and fold disabled. + All min/max r & phi params are specified and have their expected form. + """ + test_data = CircularTestingMatrix(frequency=1, matrix_size=201, + major_axis='Q') + averager_data = MatrixToData2D(test_data.matrix) + + r_min = 0 + r_max = 0.9 * averager_data.qmax + phi_min = np.pi/6 + phi_max = 5*np.pi/6 + nbins = int(test_data.matrix_size * np.sqrt(2)/4) # usually reliable + + wedge_object = SectorQ(r_range=(r_min, r_max), phi_range=(phi_min,phi_max), nbins=nbins) + # Explicitly set fold to False - results span full +/- range + wedge_object.fold = False + data1d = wedge_object(averager_data.data) + + expected_area = test_data.area_under_region(r_min=r_min, r_max=r_max, + phi_min=phi_min, + phi_max=phi_max) + # With fold set to False, the sector on the opposite side of the origin + # to the one specified is also graphed as negative Q values. Therefore, + # the area of this other half needs to be accounted for. + expected_area += test_data.area_under_region(r_min=r_min, r_max=r_max, + phi_min=phi_min+np.pi, + phi_max=phi_max+np.pi) + actual_area = integrate.simpson(data1d.y, data1d.x) + + self.assertAlmostEqual(actual_area, expected_area, 1) + + def test_sectorq_averaging_with_fold(self): + """ + Test SectorQ can average correctly w/ major axis q and fold enabled. + All min/max r & phi params are specified and have their expected form. + """ + test_data = CircularTestingMatrix(frequency=1, matrix_size=201, + major_axis='Q') + averager_data = MatrixToData2D(test_data.matrix) + + r_min = 0 + r_max = 0.9 * averager_data.qmax + phi_min = np.pi/6 + phi_max = 5*np.pi/6 + nbins = int(test_data.matrix_size * np.sqrt(2)/4) # usually reliable + + wedge_object = SectorQ(r_range=(r_min, r_max), phi_range=(phi_min,phi_max), nbins=nbins) + # Explicitly set fold to True - points either side of 0,0 are averaged + wedge_object.fold = True + data1d = wedge_object(averager_data.data) + + expected_area = test_data.area_under_region(r_min=r_min, r_max=r_max, + phi_min=phi_min, + phi_max=phi_max) + # With fold set to True, points from the sector on the opposite side of + # the origin to the one specified are averaged with points from the + # specified sector. + expected_area += test_data.area_under_region(r_min=r_min, r_max=r_max, + phi_min=phi_min+np.pi, + phi_max=phi_max+np.pi) + expected_area /= 2 + actual_area = integrate.simpson(data1d.y, data1d.x) + + self.assertAlmostEqual(actual_area, expected_area, 1) + + +class WedgeQTests(unittest.TestCase): + """ + This class contains the tests for the WedgeQ class from manipulations.py + + The parameters frequency, r_min, r_max, phi_min and phi_max are largely + arbitrary, and the tests should pass if any sane value is used for them. + """ + + def test_wedgeq_init(self): + """ + Test that WedgeQ's __init__ method does what it's supposed to. + """ + r_min = 1 + r_max = 2 + phi_min = 0 + phi_max = np.pi + nbins = 10 + + wedge_object = WedgeQ(r_range=(r_min, r_max), phi_range=(phi_min,phi_max), nbins=nbins) + + self.assertEqual(wedge_object.r_min, r_min) + self.assertEqual(wedge_object.r_max, r_max) + self.assertEqual(wedge_object.phi_min, phi_min) + self.assertEqual(wedge_object.phi_max, phi_max) + self.assertEqual(wedge_object.nbins, nbins) + + def test_wedgeq_averaging(self): + """ + Test WedgeQ can average correctly, when all of min/max r & phi params + are specified and have their expected form. + """ + test_data = CircularTestingMatrix(frequency=3, matrix_size=201, + major_axis='Q') + averager_data = MatrixToData2D(test_data.matrix) + + r_min = 0.1 * averager_data.qmax + r_max = 0.9 * averager_data.qmax + phi_min = np.pi/6 + phi_max = 5*np.pi/6 + nbins = int(test_data.matrix_size * np.sqrt(2)/4) # usually reliable + + wedge_object = WedgeQ(r_range=(r_min, r_max), phi_range=(phi_min,phi_max), nbins=nbins) + data1d = wedge_object(averager_data.data) + + expected_area = test_data.area_under_region(r_min=r_min, r_max=r_max, + phi_min=phi_min, + phi_max=phi_max) + actual_area = integrate.simpson(data1d.y, data1d.x) + + self.assertAlmostEqual(actual_area, expected_area, 1) + + +class WedgePhiTests(unittest.TestCase): + """ + This class contains the tests for the WedgePhi class from manipulations.py + + The parameters frequency, r_min, r_max, phi_min and phi_max are largely + arbitrary, and the tests should pass if any sane value is used for them. + """ + + def test_wedgephi_init(self): + """ + Test that WedgePhi's __init__ method does what it's supposed to. + """ + r_min = 1 + r_max = 2 + phi_min = 0 + phi_max = np.pi + nbins = 100 + # base = 10 + + # wedge_object = WedgePhi(r_min=r_min, r_max=r_max, phi_min=phi_min, + # phi_max=phi_max, nbins=nbins, base=base) + wedge_object = WedgePhi(r_range=(r_min, r_max), phi_range=(phi_min,phi_max), nbins=nbins) + + self.assertEqual(wedge_object.r_min, r_min) + self.assertEqual(wedge_object.r_max, r_max) + self.assertEqual(wedge_object.phi_min, phi_min) + self.assertEqual(wedge_object.phi_max, phi_max) + self.assertEqual(wedge_object.nbins, nbins) + # self.assertEqual(wedge_object.base, base) + + def test_wedgephi_non_plottable_data(self): + """ + Test that RuntimeError is raised if the data supplied isn't plottable + """ + # Implementing this test can wait + pass + + def test_wedgephi_averaging(self): + """ + Test WedgePhi can average correctly, when all of min/max r & phi params + are specified and have their expected form. + """ + test_data = CircularTestingMatrix(frequency=1, matrix_size=201, + major_axis='Phi') + averager_data = MatrixToData2D(test_data.matrix) + + r_min = 0.1 * averager_data.qmax + r_max = 0.9 * averager_data.qmax + phi_min = np.pi/6 + phi_max = 5*np.pi/6 + nbins = int(test_data.matrix_size * np.sqrt(2)/4) # usually reliable + + wedge_object = WedgePhi(r_range=(r_min, r_max), phi_range=(phi_min,phi_max), nbins=nbins) + data1d = wedge_object(averager_data.data) + + expected_area = test_data.area_under_region(r_min=r_min, r_max=r_max, + phi_min=phi_min, + phi_max=phi_max) + actual_area = integrate.simpson(data1d.y, data1d.x) + + self.assertAlmostEqual(actual_area, expected_area, 1) + +if __name__ == '__main__': + unittest.main() diff --git a/test/sasmanipulations/utest_averaging_directional.py b/test/sasmanipulations/utest_averaging_directional.py new file mode 100644 index 00000000..f36111c3 --- /dev/null +++ b/test/sasmanipulations/utest_averaging_directional.py @@ -0,0 +1,163 @@ +""" +This file contains unit tests for the various averagers found in +sasdata/data_util/manipulations.py - These tests are based on analytical +formulae rather than imported data files. +""" + +import unittest + +import numpy as np + +from sasdata.data_util.averaging import DirectionalAverage +from test.sasmanipulations.helper import MatrixToData2D + +# TODO - also check the errors are being calculated correctly + +class DirectionalAverageValidationTests(unittest.TestCase): + """ + This class tests DirectionalAverage's data validation checks. + """ + + def test_missing_coordinate_data(self): + """ + Ensure a ValueError is raised if no axis data is supplied. + """ + self.assertRaises(ValueError, DirectionalAverage, + major_axis=None, minor_axis=None) + + def test_inappropriate_limits_arrays(self): + """ + Ensure a ValueError is raised if the wrong number of limits is suppied. + """ + self.assertRaises(ValueError, DirectionalAverage, major_axis=[], + minor_axis=[], lims=([], [])) + + def test_nbins_not_int(self): + """ + Ensure a TypeError is raised if the parameter nbins is not a number + that can be converted to integer. + """ + self.assertRaises(TypeError, DirectionalAverage, major_axis=np.array([0, 1]), + minor_axis=np.array([0, 1]), nbins=np.array([])) + + def test_axes_unequal_lengths(self): + """ + Ensure ValueError is raised if the major and minor axes don't match. + """ + self.assertRaises(ValueError, DirectionalAverage, major_axis=[0, 1, 2], + minor_axis=[3, 4]) + + def test_no_limits_on_an_axis(self): + """ + Ensure correct behaviour when there are no limits provided. + The min. and max. values from major/minor_axis are taken as the limits. + """ + dir_avg = DirectionalAverage(major_axis=np.array([1, 2, 3]), + minor_axis=np.array([4, 5, 6])) + self.assertEqual(dir_avg.major_lims, (1, 3)) + self.assertEqual(dir_avg.minor_lims, (4, 6)) + +class DirectionalAverageFunctionalityTests(unittest.TestCase): + """ + Placeholder + """ + + def setUp(self): + """ + Setup for the DirectionalAverageFunctionalityTests tests. + """ + + # 21 bins, with spacing 0.1 + self.qx_data = np.linspace(-1, 1, 21) + self.qy_data = self.qx_data + x, y = np.meshgrid(self.qx_data, self.qy_data) + # quadratic in x, linear in y + data = x * x * y + self.data2d = MatrixToData2D(data) + + # ROI is the first quadrant only. Same limits for both axes. + self.lims = (0.0, 1.0) + self.in_roi = (self.lims[0] <= self.qx_data) & \ + (self.qx_data <= self.lims[1]) + self.nbins = int(np.sum(self.in_roi)) + # Note that the bin width is less than the spacing of the datapoints, + # because we're insisting that there be as many bins as datapoints. + self.bin_width = (self.lims[1] - self.lims[0]) / self.nbins + + self.directional_average = \ + DirectionalAverage(major_axis=self.data2d.data.qx_data, + minor_axis=self.data2d.data.qy_data, + lims=(self.lims,self.lims), nbins=self.nbins) + + def test_bin_width(self): + """ + Test that the bin width is calculated correctly. + """ + self.assertAlmostEqual(np.average(self.directional_average.bin_widths), self.bin_width) + + def test_get_bin_interval(self): + """ + Test that the get_bin_interval method works correctly. + """ + for b in range(self.nbins): + bin_start, bin_end = self.directional_average.get_bin_interval(b) + expected_bin_start = self.lims[0] + b * self.bin_width + expected_bin_end = self.lims[0] + (b + 1) * self.bin_width + self.assertAlmostEqual(bin_start, expected_bin_start, 10) + self.assertAlmostEqual(bin_end, expected_bin_end, 10) + + def test_get_bin_index(self): + """ + Test that the get_bin_index method works correctly. + """ + # use values at the edges of bins, and values in the middles + values = np.linspace(self.lims[0], self.lims[1], self.nbins * 2) + expected_indices = np.repeat(np.arange(self.nbins), 2) + for n, v in enumerate(values): + self.assertAlmostEqual(self.directional_average.get_bin_index(v), + expected_indices[n], 10) + + def test_binary_weights(self): + """ + Test weights are calculated correctly when the bins & ROI are aligned. + When aligned perfectly, the weights should be ones and zeros only. + + Variations on this test will be needed once fractional weighting is + possible. These should have ROIs which do not line up perfectly with + the bins. + """ + + # I think this test needs mocks, it'd be very complex otherwise. + # I'm struggling to come up with a test for this one. + pass + + def test_directional_averaging(self): + """ + Test that a directinal average is computed correctly. + + Variations on this test will be needed once fractional weighting is + possible. These should have ROIs which do not line up perfectly with + the bins. + """ + x_axis_values, intensity, errors = \ + self.directional_average(data=self.data2d.data.data, + err_data=self.data2d.data.err_data) + + expected_x = self.qx_data[self.in_roi] + expected_intensity = np.mean(self.qy_data[self.in_roi]) * expected_x**2 + + np.testing.assert_array_almost_equal(x_axis_values, expected_x, 10) + np.testing.assert_array_almost_equal(intensity, expected_intensity, 10) + + def test_no_points_in_roi(self): + """ + Test that ValueError is raised if there were on points in the ROI. + """ + # move the region of interest to outside the range of the data + self.directional_average.major_lims = (2, 3) + self.directional_average.minor_lims = (2, 3) + self.assertRaises(ValueError, self.directional_average, + self.data2d.data.data, self.data2d.data.err_data) + +if __name__ == '__main__': + unittest.main() diff --git a/test/sasmanipulations/utest_averaging_slab.py b/test/sasmanipulations/utest_averaging_slab.py new file mode 100644 index 00000000..b93691ff --- /dev/null +++ b/test/sasmanipulations/utest_averaging_slab.py @@ -0,0 +1,238 @@ +""" +Unit tests for SlabX and SlabY averagers (moved out of utest_averaging_analytical.py). +""" +import unittest + +import numpy as np + +from sasdata.data_util.averaging import SlabX, SlabY +from sasdata.dataloader import data_info +from test.sasmanipulations.helper import ( + MatrixToData2D, + expected_slabx_area, + expected_slaby_area, + integrate_1d_output, + make_dd_from_func, +) + +# TODO - also check the errors are being calculated correctly + +class SlabXTests(unittest.TestCase): + """ + This class contains all the unit tests for the SlabX class from + manipulations.py + """ + + def test_slabx_init(self): + """ + Test that SlabX's __init__ method does what it's supposed to. + """ + qx_min = 1 + qx_max = 2 + qy_min = 3 + qy_max = 4 + nbins = 100 + fold = True + + slab_object = SlabX(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max), nbins=nbins, fold=fold) + + self.assertEqual(slab_object.qx_min, qx_min) + self.assertEqual(slab_object.qx_max, qx_max) + self.assertEqual(slab_object.qy_min, qy_min) + self.assertEqual(slab_object.qy_max, qy_max) + self.assertEqual(slab_object.nbins, nbins) + self.assertEqual(slab_object.fold, fold) + + def test_slabx_multiple_detectors(self): + """ + Test that SlabX raises an error when there are multiple detectors + """ + averager_data = MatrixToData2D(np.ones([100, 100])) + detector1 = data_info.Detector() + detector2 = data_info.Detector() + averager_data.data.detector.append(detector1) + averager_data.data.detector.append(detector2) + + slab_object = SlabX() + self.assertRaises(ValueError, slab_object, averager_data.data) + + def test_slabx_no_points_to_average(self): + """ + Test SlabX raises ValueError when the ROI contains no data + """ + test_data = np.ones([100, 100]) + averager_data = MatrixToData2D(data2d=test_data) + + # Region of interest well outside region with data + qx_min = 2 * averager_data.qmax + qx_max = 3 * averager_data.qmax + qy_min = 2 * averager_data.qmax + qy_max = 3 * averager_data.qmax + + slab_object = SlabX(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max)) + self.assertRaises(ValueError, slab_object, averager_data.data) + + def test_slabx_averaging_without_fold(self): + """ + Test that SlabX can average correctly when x is the major axis + """ + def func(x, y): + return x**2 * y + averager_data, matrix_size = make_dd_from_func(func, matrix_size=201) + + + # Set up region of interest to average over - the limits are arbitrary. + qx_min = -0.5 * averager_data.qmax # = -0.5 + qx_max = averager_data.qmax # = 1 + qy_min = -0.5 * averager_data.qmax # = -0.5 + qy_max = averager_data.qmax # = 1 + nbins = int((qx_max - qx_min) / 2 * matrix_size) + # Explicitly not using fold in this test + fold = False + + slab_object = SlabX(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max), nbins=nbins, fold=fold) + data1d = slab_object(averager_data.data) + + expected_area = expected_slabx_area(qx_min, qx_max, qy_min, qy_max) + actual_area = integrate_1d_output(data1d, method="simpson") + + self.assertAlmostEqual(actual_area, expected_area, 2) + + def test_slabx_averaging_with_fold(self): + """ + Test that SlabX can average correctly when x is the major axis + """ + def func(x, y): + return x**2 * y + averager_data, matrix_size = make_dd_from_func(func, matrix_size=201) + + # Set up region of interest to average over - the limits are arbitrary. + qx_min = -0.5 * averager_data.qmax # = -0.5 + qx_max = averager_data.qmax # = 1 + qy_min = -0.5 * averager_data.qmax # = -0.5 + qy_max = averager_data.qmax # = 1 + nbins = int((qx_max - qx_min) / 2 * matrix_size) + # Explicitly using fold in this test + fold = True + + slab_object = SlabX(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max), nbins=nbins, fold=fold) + data1d = slab_object(averager_data.data) + + # Negative values of x are not graphed when fold = True + qx_min_fold = 0 + expected_area = expected_slabx_area(qx_min_fold, qx_max, qy_min, qy_max) + actual_area = integrate_1d_output(data1d, method="simpson") + + self.assertAlmostEqual(actual_area, expected_area, 2) + +class SlabYTests(unittest.TestCase): + """ + This class contains all the unit tests for the SlabY class from + manipulations.py + """ + + def test_slaby_init(self): + """ + Test that SlabY's __init__ method does what it's supposed to. + """ + qx_min = 1 + qx_max = 2 + qy_min = 3 + qy_max = 4 + nbins = 100 + fold = True + + slab_object = SlabY(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max), nbins=nbins, fold=fold) + + self.assertEqual(slab_object.qx_min, qx_min) + self.assertEqual(slab_object.qx_max, qx_max) + self.assertEqual(slab_object.qy_min, qy_min) + self.assertEqual(slab_object.qy_max, qy_max) + self.assertEqual(slab_object.nbins, nbins) + self.assertEqual(slab_object.fold, fold) + + def test_slaby_multiple_detectors(self): + """ + Test that SlabY raises an error when there are multiple detectors + """ + averager_data = MatrixToData2D(np.ones([100, 100])) + detector1 = data_info.Detector() + detector2 = data_info.Detector() + averager_data.data.detector.append(detector1) + averager_data.data.detector.append(detector2) + slab_object = SlabY() + self.assertRaises(ValueError, slab_object, averager_data.data) + + def test_slaby_no_points_to_average(self): + """ + Test SlabY raises ValueError when the ROI contains no data + """ + test_data = np.ones([100, 100]) + averager_data = MatrixToData2D(data2d=test_data) + + # Region of interest well outside region with data + qx_min = 2 * averager_data.qmax + qx_max = 3 * averager_data.qmax + qy_min = 2 * averager_data.qmax + qy_max = 3 * averager_data.qmax + + slab_object = SlabY(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max)) + self.assertRaises(ValueError, slab_object, averager_data.data) + + def test_slaby_averaging_without_fold(self): + """ + Test that SlabY can average correctly when y is the major axis + """ + def func(x, y): + return x * y**2 + averager_data, matrix_size = make_dd_from_func(func, matrix_size=201) + + + # Set up region of interest to average over - the limits are arbitrary. + qx_min = -0.5 * averager_data.qmax # = -0.5 + qx_max = averager_data.qmax # = 1 + qy_min = -0.5 * averager_data.qmax # = -0.5 + qy_max = averager_data.qmax # = 1 + nbins = int((qx_max - qx_min) / 2 * matrix_size) + # Explicitly not using fold in this test + fold = False + + slab_object = SlabY(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max), nbins=nbins, fold=fold) + data1d = slab_object(averager_data.data) + + expected_area = expected_slaby_area(qx_min, qx_max, qy_min, qy_max) + actual_area = integrate_1d_output(data1d, method="simpson") + + self.assertAlmostEqual(actual_area, expected_area, 2) + + def test_slab_averaging_y_with_fold(self): + """ + Test that SlabY can average correctly when y is the major axis + """ + def func(x, y): + return x * y**2 + + averager_data, matrix_size = make_dd_from_func(func, matrix_size=201) + + # Set up region of interest to average over - the limits are arbitrary. + qx_min = -0.5 * averager_data.qmax # = -0.5 + qx_max = averager_data.qmax # = 1 + qy_min = -0.5 * averager_data.qmax # = -0.5 + qy_max = averager_data.qmax # = 1 + nbins = int((qx_max - qx_min) / 2 * matrix_size) + # Explicitly using fold in this test + fold = True + + slab_object = SlabY(qx_range=(qx_min, qx_max), qy_range=(qy_min,qy_max), nbins=nbins, fold=fold) + data1d = slab_object(averager_data.data) + + # Negative values of y are not graphed when fold = True, so don't + # include them in the area calculation. + qy_min_fold = 0 + expected_area = expected_slaby_area(qx_min, qx_max, qy_min_fold, qy_max) + actual_area = integrate_1d_output(data1d, method="simpson") + + self.assertAlmostEqual(actual_area, expected_area, 2) + +if __name__ == '__main__': + unittest.main()