-
Notifications
You must be signed in to change notification settings - Fork 126
FEAT: Improve interactions of user pools #1009
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
1bd7a08
8bee9e0
479c100
978c696
dcd05dd
86b993c
c51e15c
c4654a9
ba1df87
2025cf0
10e4267
079d1c9
1ab2fa4
cffe25c
463af87
975cbfc
e95381e
c914a22
4f92ce1
eba6eec
29ecb3b
82bcf1e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11,6 +11,7 @@ | |
| loaded_modules_dict, | ||
| logger, | ||
| ) | ||
| from ..utils.parallel import bilby_pool | ||
| from . import proposal | ||
| from .base_sampler import Sampler, SamplingMarginalisedParameterError | ||
|
|
||
|
|
@@ -158,6 +159,7 @@ def run_sampler( | |
| gzip=False, | ||
| result_class=None, | ||
| npool=1, | ||
| pool=None, | ||
| **kwargs, | ||
| ): | ||
| """ | ||
|
|
@@ -266,36 +268,27 @@ def run_sampler( | |
|
|
||
| likelihood = ZeroLikelihood(likelihood) | ||
|
|
||
| common_kwargs = dict( | ||
| likelihood=likelihood, | ||
| priors=priors, | ||
| outdir=outdir, | ||
| label=label, | ||
| injection_parameters=injection_parameters, | ||
| meta_data=meta_data, | ||
| use_ratio=use_ratio, | ||
| plot=plot, | ||
| result_class=result_class, | ||
| npool=npool, | ||
| pool=pool, | ||
| ) | ||
|
|
||
| if isinstance(sampler, Sampler): | ||
| pass | ||
| elif isinstance(sampler, str): | ||
| sampler_class = get_sampler_class(sampler) | ||
| sampler = sampler_class( | ||
| likelihood, | ||
| priors=priors, | ||
| outdir=outdir, | ||
| label=label, | ||
| injection_parameters=injection_parameters, | ||
| meta_data=meta_data, | ||
| use_ratio=use_ratio, | ||
| plot=plot, | ||
| result_class=result_class, | ||
| npool=npool, | ||
| **kwargs, | ||
| ) | ||
| sampler = sampler_class(**common_kwargs, **kwargs) | ||
| elif inspect.isclass(sampler): | ||
| sampler = sampler.__init__( | ||
| likelihood, | ||
| priors=priors, | ||
| outdir=outdir, | ||
| label=label, | ||
| use_ratio=use_ratio, | ||
| plot=plot, | ||
| injection_parameters=injection_parameters, | ||
| meta_data=meta_data, | ||
| npool=npool, | ||
| **kwargs, | ||
| ) | ||
| sampler = sampler.__init__(**common_kwargs, **kwargs) | ||
| else: | ||
| raise ValueError( | ||
| "Provided sampler should be a Sampler object or name of a known " | ||
|
|
@@ -305,42 +298,81 @@ def run_sampler( | |
| if sampler.cached_result: | ||
| logger.warning("Using cached result") | ||
| result = sampler.cached_result | ||
| result = apply_conversion_function( | ||
| result=result, | ||
| likelihood=likelihood, | ||
| conversion_function=conversion_function, | ||
| npool=npool, | ||
| pool=pool, | ||
| ) | ||
| else: | ||
| # Run the sampler | ||
| start_time = datetime.datetime.now() | ||
| if command_line_args.bilby_test_mode: | ||
| result = sampler._run_test() | ||
| else: | ||
| result = sampler.run_sampler() | ||
| end_time = datetime.datetime.now() | ||
|
|
||
| # Some samplers calculate the sampling time internally | ||
| if result.sampling_time is None: | ||
| result.sampling_time = end_time - start_time | ||
| elif isinstance(result.sampling_time, (float, int)): | ||
| result.sampling_time = datetime.timedelta(result.sampling_time) | ||
|
|
||
| logger.info(f"Sampling time: {result.sampling_time}") | ||
| # Convert sampling time into seconds | ||
| result.sampling_time = result.sampling_time.total_seconds() | ||
|
|
||
| if sampler.use_ratio: | ||
| result.log_noise_evidence = likelihood.noise_log_likelihood() | ||
| result.log_bayes_factor = result.log_evidence | ||
| result.log_evidence = result.log_bayes_factor + result.log_noise_evidence | ||
| else: | ||
| result.log_noise_evidence = likelihood.noise_log_likelihood() | ||
| result.log_bayes_factor = result.log_evidence - result.log_noise_evidence | ||
| with bilby_pool( | ||
| likelihood, | ||
| priors, | ||
| use_ratio=sampler.use_ratio, | ||
| search_parameter_keys=sampler.search_parameter_keys, | ||
| npool=npool, | ||
| pool=pool, | ||
| parameters=priors.sample(), | ||
| ) as _pool: | ||
| start_time = datetime.datetime.now() | ||
| sampler.pool = _pool | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this safe to do? Depending on how the sampler uses the pool, are there settings where the pool the sampler has stored is not updated? For example, if the sampler has constructed a likelihood using the
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm trying to work through what this would look like. If the initial input One potential issue is that if a specific sampler implementation handles the pool internally by itself this will create two pools and then in the best case, we have a bunch of extra processes we don't need. I think this is what nessai does, so maybe we should game through that specific case. |
||
| if command_line_args.bilby_test_mode: | ||
| result = sampler._run_test() | ||
| else: | ||
| result = sampler.run_sampler() | ||
| end_time = datetime.datetime.now() | ||
| result = finalize_result( | ||
| result=result, | ||
| likelihood=likelihood, | ||
| use_ratio=sampler.use_ratio, | ||
| start_time=start_time, | ||
| end_time=end_time, | ||
| ) | ||
|
|
||
| if None not in [result.injection_parameters, conversion_function]: | ||
| result.injection_parameters = conversion_function( | ||
| result.injection_parameters | ||
| # Initial save of the sampler in case of failure in samples_to_posterior | ||
| if save: | ||
| result.save_to_file(extension=save, gzip=gzip, outdir=outdir) | ||
|
|
||
| result = apply_conversion_function( | ||
| result=result, | ||
| likelihood=likelihood, | ||
| conversion_function=conversion_function, | ||
| npool=npool, | ||
| pool=_pool, | ||
| ) | ||
|
|
||
| # Initial save of the sampler in case of failure in samples_to_posterior | ||
| if save: | ||
| result.save_to_file(extension=save, gzip=gzip, outdir=outdir) | ||
| if save: | ||
| # The overwrite here ensures we overwrite the initially stored data | ||
| result.save_to_file(overwrite=True, extension=save, gzip=gzip, outdir=outdir) | ||
|
|
||
| if plot: | ||
| result.plot_corner() | ||
| logger.info(f"Summary of results:\n{result}") | ||
| return result | ||
|
|
||
|
|
||
| def apply_conversion_function( | ||
| result, likelihood, conversion_function, npool=None, pool=None | ||
| ): | ||
| """ | ||
| Apply the conversion function to the injected parameters and posterior if the | ||
| posterior has not already been created from the stored samples. | ||
|
|
||
| Parameters | ||
| ---------- | ||
| result : bilby.core.result.Result | ||
| The result object from the sampler. | ||
| likelihood : bilby.Likelihood | ||
| The likelihood used during sampling. | ||
| conversion_function : function | ||
| The conversion function to apply. | ||
| npool : int, optional | ||
| The number of processes to use in a processing pool. | ||
| pool : multiprocessing.Pool, schwimmbad.MPIPool, optional | ||
| The pool to use for parallelisation, this overrides the :code:`npool` argument. | ||
| """ | ||
| if None not in [result.injection_parameters, conversion_function]: | ||
| result.injection_parameters = conversion_function( | ||
| result.injection_parameters, | ||
|
|
@@ -354,15 +386,30 @@ def run_sampler( | |
| priors=result.priors, | ||
| conversion_function=conversion_function, | ||
| npool=npool, | ||
| pool=pool, | ||
| ) | ||
| return result | ||
|
|
||
| if save: | ||
| # The overwrite here ensures we overwrite the initially stored data | ||
| result.save_to_file(overwrite=True, extension=save, gzip=gzip, outdir=outdir) | ||
|
|
||
| if plot: | ||
| result.plot_corner() | ||
| logger.info(f"Summary of results:\n{result}") | ||
| def finalize_result(result, likelihood, use_ratio, start_time=None, end_time=None): | ||
| # Some samplers calculate the sampling time internally | ||
| if result.sampling_time is None and None not in [start_time, end_time]: | ||
| result.sampling_time = end_time - start_time | ||
| elif isinstance(result.sampling_time, (float, int)): | ||
| result.sampling_time = datetime.timedelta(result.sampling_time) | ||
|
|
||
| logger.info(f"Sampling time: {result.sampling_time}") | ||
| # Convert sampling time into seconds | ||
| result.sampling_time = result.sampling_time.total_seconds() | ||
|
|
||
| if use_ratio: | ||
| result.log_noise_evidence = likelihood.noise_log_likelihood() | ||
| result.log_bayes_factor = result.log_evidence | ||
| result.log_evidence = result.log_bayes_factor + result.log_noise_evidence | ||
| else: | ||
| result.log_noise_evidence = likelihood.noise_log_likelihood() | ||
| result.log_bayes_factor = result.log_evidence - result.log_noise_evidence | ||
|
|
||
| return result | ||
|
|
||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Will this always reapply the conversion function? If so, I'm unsure if this is desirable since rather than bilby quickly exiting when a run is already done it will spend time doing the conversion. That said, i don't feel strongly about this, so happy to keep it. Thoughts?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It will reapply it, this maintains the current behaviour. I'd be open to changing that behaviour, maybe by adding a flag to the result file to say if the conversion has been applied, but I would say to do that as a separate change.