Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions packages/essnmx/src/ess/nmx/configurations.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,15 @@ def positive_nbins(self):
description="Random seed for TOF simulation.",
default=42, # No reason.
)
result_time_bin_unit: TimeBinUnit = Field(
title="Output Time Bin Unit",
description="Time bin unit of the histogram after reduction. "
"If the input time bin is different from the result time bin unit, "
"the unit will be converted to the result time bin "
"before the result is returned.",
default=TimeBinUnit.ns,
# DIALS expects [ns] by default.
)


class OutputConfig(BaseModel):
Expand Down
16 changes: 10 additions & 6 deletions packages/essnmx/src/ess/nmx/executables.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def _build_time_bin_edges(
true_last_bin_edge = bin_edges[t_coord_name, -1] + time_bin_width
bin_edges = sc.concat([bin_edges, true_last_bin_edge], dim=t_coord_name)

return bin_edges
return bin_edges.to(dtype=float)

else: # Number of bin edges are given but not the bin width.
n_edges = wf_config.nbins + 1
Expand All @@ -197,7 +197,9 @@ def _build_time_bin_edges(
# Avoid dropping the event that has the exact same
# `event_time_offset`` or `tof` value as the upper bin edge.
max_t.value = np.nextafter(max_t.value, np.inf)
return sc.linspace(dim=t_coord_name, start=min_t, stop=max_t, num=n_edges)
return sc.linspace(
dim=t_coord_name, start=min_t, stop=max_t, num=n_edges, dtype=float
)


def reduction(
Expand Down Expand Up @@ -281,11 +283,14 @@ def reduction(
)

# Histogram detector counts
output_tunit = config.workflow.result_time_bin_unit
t_bin_edges = t_bin_edges.to(unit=output_tunit)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If DIALS expects ns (the default), does it also expect integers?
I.e. if the output unit is ns, do we then need to convert also to int?

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure... but it computes wavelength from the time again so shouldn't it prefer float...?

Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I asked Aaron in person but for now we want to keep the dtypes the same as the older version, which is the float64.

tof_histograms = sc.DataGroup()
for detector_name, tof_da in tof_das.items():
t_coord_unit = tof_da.bins.coords[t_coord_name].unit
histogram = tof_da.hist({t_coord_name: t_bin_edges.to(unit=t_coord_unit)})
tof_histograms[detector_name] = histogram
tof_da.bins.coords[t_coord_name] = tof_da.bins.coords[t_coord_name].to(
Comment thread
YooSunYoung marked this conversation as resolved.
dtype=float, unit=output_tunit
)
tof_histograms[detector_name] = tof_da.hist({t_coord_name: t_bin_edges})

_tof_histogram = next(iter(tof_histograms.values()))
monitor_metadata = NMXMonitorMetadata(
Expand Down Expand Up @@ -325,7 +330,6 @@ def reduction(

def save_results(*, results: NMXLauetof, output_config: OutputConfig) -> None:
# Validate if results have expected fields

export_static_metadata_as_nxlauetof(
sample_metadata=results.sample,
source_metadata=results.instrument.source,
Expand Down
22 changes: 15 additions & 7 deletions packages/essnmx/tests/executable_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ def test_reduction_config() -> None:
tof_simulation_min_ltotal=140.0,
tof_simulation_max_ltotal=200.0,
tof_simulation_seed=12345,
result_time_bin_unit=TimeBinUnit.us,
)
output_options = OutputConfig(
output_file='test-output.h5',
Expand Down Expand Up @@ -162,9 +163,11 @@ def _check_output_file(
if nbins and bin_width is None:
assert len(toa_edges) == nbins
else:
assert (toa_edges[1] - toa_edges[0]) == sc.scalar(
bin_width, unit='ms'
).to(unit='us')
computed_bin_width = toa_edges[1] - toa_edges[0]
expected_bin_width = sc.scalar(bin_width, unit='ms').to(
unit=computed_bin_width.unit
)
assert_identical(computed_bin_width, expected_bin_width)
assert all(field_name in det_gr for field_name in mandatory_fields)


Expand All @@ -173,14 +176,14 @@ def test_executable_runs(small_nmx_nexus_path, tmp_path: pathlib.Path):
output_file = tmp_path / "output.h5"
assert not output_file.exists()

bin_width = 10 # Bigger bins for testing.
bin_width = 10.0 # Bigger bins for testing.
# The output has 1280x1280 pixels per detector per time bin.
commands = (
'essnmx-reduce',
'--input-file',
small_nmx_nexus_path,
'--time-bin-width',
str(bin_width),
str(int(bin_width)),
'--output-file',
output_file.as_posix(),
)
Expand Down Expand Up @@ -294,7 +297,9 @@ def test_reduction_only_time_bin_width(reduction_config: ReductionConfig) -> Non
hist = _retrieve_one_hist(reduction(config=reduction_config))

width = hist.coords['tof'][1] - hist.coords['tof'][0]
assert width == sc.scalar(20.0, unit='ms').to(unit='us')
assert width == sc.scalar(20.0, unit='ms').to(
unit=reduction_config.workflow.result_time_bin_unit
)


def test_reduction_only_number_of_time_bins(reduction_config: ReductionConfig) -> None:
Expand All @@ -315,7 +320,10 @@ def test_histogram_event_time_offset(reduction_config: ReductionConfig) -> None:

# Check that the number of time bins is as expected.
width = hist.coords['event_time_offset'][1] - hist.coords['event_time_offset'][0]
assert_identical(width, sc.scalar(20.0, unit='ms').to(unit='ns'))
expected_output_width = sc.scalar(20.0, unit='ms').to(
unit=reduction_config.workflow.result_time_bin_unit
)
assert_identical(width, expected_output_width)
# Check if the histogram result is reasonable
zero = sc.scalar(0.0, unit='counts', dtype='float32', variance=0.0)
assert bool(hist.data.sum() > zero)
Expand Down
Loading