From 560d8a797c1f3cbef7b662f684ca6a118e6ddb57 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 14:56:16 -0400 Subject: [PATCH 01/16] move polynomial model example to example/models --- example/models/polynomial.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 example/models/polynomial.py diff --git a/example/models/polynomial.py b/example/models/polynomial.py new file mode 100644 index 00000000..06f761d9 --- /dev/null +++ b/example/models/polynomial.py @@ -0,0 +1,13 @@ +from numpy import inf + +parameters = [ + ["n", "", 1, [1,5], "", "number of coefficients (or degree+1)"], + ["c[n]", "", 0, [-inf, inf], "", "coefficients to c_n x^n"], +] + +Iq = r""" + int int_n = (int)n; + double result = c[int_n-1]; + for (int k=int_n-2; k >= 0; k--) { result = result*q + c[k]; } + return result; + """ From 49dd290a1f8868f99229ecdc1f5de1b01de26c10 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 15:05:33 -0400 Subject: [PATCH 02/16] =?UTF-8?q?rewrite=20sesans=202=CE=BC=20example=20to?= =?UTF-8?q?=20use=20bumps=20directly?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- example/sesans_sphere_2micron.py | 65 ++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 28 deletions(-) diff --git a/example/sesans_sphere_2micron.py b/example/sesans_sphere_2micron.py index b7ebea0e..1d0c8984 100644 --- a/example/sesans_sphere_2micron.py +++ b/example/sesans_sphere_2micron.py @@ -1,45 +1,54 @@ """ This is a data file used to load in sesans data and fit it using the bumps engine + +Usage: + + bumps sesans_sphere_2micron.py """ -import sesansfit -from bumps.names import Parameter +from bumps.names import FitProblem, Parameter -# Enter the model name to use -model_name = "sphere" +from sasdata import data_path -# DO NOT MODIFY THIS LINE -model = sesansfit.get_bumps_model(model_name) +from sasmodels.bumps_model import Experiment, Model +from sasmodels.core import load_model +from sasmodels.data import load_data -# Enter any custom parameters -# name = Parameter(initial_value, name='name') -phi = Parameter(0.0855, name='phi') -# Add the parameters to this list that should be displayed in the fitting window -custom_params = {"phi" : phi} +# Enter the model name and the datafile path +model_name = "sphere" +data_file = data_path / "sesans_data" / "sphere2micron.ses" -# SESANS data file name -sesans_file = "spheres2micron.ses" +# Custom parameters for use in expressions +# name = Parameter(initial_value, name='name') +phi = Parameter(0.0855, name='phi') # scale = phi*(1-phi) -# Initial parameter values (if other than defaults) -# "model_parameter_name" : value -initial_vals = { +# Initial parameter values and expressions (if other than defaults) +# "model_parameter_name" : value or expression +pars = { + "scale": phi*(1-phi), "sld" : 1.41, "radius" : 10000, "sld_solvent" : 2.70, } -# Ranges for parameters if other than default -# "model_parameter_name" : [min, max] -param_range = { - "phi" : [0.001, 0.5], - "radius" : [100, 100000] -} +# DO NOT MODIFY THIS LINE +model = Model(load_model(model_name), **pars) -# Constraints +# Bounds constraints # model.param_name = f(other params) -# EXAMPLE: model.scale = model.radius*model.radius*(1 - phi) - where radius -# and scale are model functions and phi is a custom parameter -model.scale = phi*(1-phi) +model.radius.range(100, 100000) +phi.range(0.001, 0.5) + # Send to the fitting engine -# DO NOT MODIFY THIS LINE -problem = sesansfit.sesans_fit(sesans_file, model, initial_vals, custom_params, param_range) +# DO NOT MODIFY THESE LINES +data = load_data(str(data_file)) +M = Experiment(data=data, model=model) +problem = FitProblem([M]) + +if __name__ == "__main__": + import matplotlib.pyplot as plt + + print(f"==== {model_name} model for {data_file.name} has χ² = {problem.chisq_str()} ====") + print(problem.summarize()) + problem.plot() + plt.show() From b83ee18a48d16ab9d0bf2cac4a325b99259f4c1f Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 15:08:12 -0400 Subject: [PATCH 03/16] update sesans Code Camp III example to use sasdata loader --- example/sesansfit_CodeCampIII.py | 49 ++++++++++---------------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/example/sesansfit_CodeCampIII.py b/example/sesansfit_CodeCampIII.py index b578f03b..64d83952 100644 --- a/example/sesansfit_CodeCampIII.py +++ b/example/sesansfit_CodeCampIII.py @@ -1,48 +1,23 @@ -import numpy as np +from pathlib import Path + from bumps.names import FitProblem, Parameter from sasmodels import bumps_model, core +from sasmodels.data import load_data -if True: # fix when data loader exists -# from sas.dataloader.readers\ - from sas.dataloader.loader import Loader - loader = Loader() - filename = 'sphere.ses' - data = loader.load(filename) - if data is None: - raise OSError("Could not load file %r"%(filename,)) - data.x /= 10 -# print data -# data = load_sesans('mydatfile.pz') -# sans_data = load_sans('mysansfile.xml') - -else: - SElength = np.linspace(0, 2400, 61) # [A] - data = np.ones_like(SElength) - err_data = np.ones_like(SElength)*0.03 - - class Sample: - zacceptance = 0.1 # [A^-1] - thickness = 0.2 # [cm] - - class SESANSData1D: - #q_zmax = 0.23 # [A^-1] - lam = 0.2 # [nm] - x = SElength - y = data - dy = err_data - sample = Sample() - data = SESANSData1D() +path = Path(__file__).resolve().parent +data = load_data(str(path / 'sphere.ses')) radius = 1000 data.Rmax = 3*radius # [A] ## Sphere parameters -kernel = core.load_model("sphere", dtype='single') +kernel = core.load_model("sphere") phi = Parameter(0.1, name="phi") -model = bumps_model.Model(kernel, - scale=phi*(1-phi), sld=7.0, solvent_sld=1.0, radius=radius, +model = bumps_model.Model( + kernel, + scale=phi*(1-phi), sld=7.0, sld_solvent=1.0, radius=radius, ) phi.range(0.001,0.5) #model.radius.pmp(40) @@ -74,3 +49,9 @@ class SESANSData1D: else: M_sesans = bumps_model.Experiment(data=data, model=model) problem = FitProblem(M_sesans) + + +if __name__ == "__main__": + import matplotlib.pyplot as plt + problem.plot() + plt.show() \ No newline at end of file From 20d6fac62ce509f3de0e5b52c004bb6401fb11d8 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 15:10:59 -0400 Subject: [PATCH 04/16] update 2d fitting example to load data relative to model file --- example/fit2d.py | 204 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 204 insertions(+) create mode 100644 example/fit2d.py diff --git a/example/fit2d.py b/example/fit2d.py new file mode 100644 index 00000000..4a586e5d --- /dev/null +++ b/example/fit2d.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python + +import sys +from pathlib import Path + +from bumps.names import FitProblem + +from sasmodels.bumps_model import Experiment, Model +from sasmodels.core import load_model +from sasmodels.data import load_data, set_beam_stop, set_top + +""" IMPORT THE DATA USED """ +path = Path(__file__).resolve().parent +radial_data = load_data(str(path / 'DEC07267.DAT')) +set_beam_stop(radial_data, 0.00669, outer=0.025) +set_top(radial_data, -.0185) + +tan_data = load_data(str(path / 'DEC07266.DAT')) +set_beam_stop(tan_data, 0.00669, outer=0.025) +set_top(tan_data, -.0185) +#sas.set_half(tan_data, 'right') + +name = "ellipsoid" if len(sys.argv) < 2 else sys.argv[1] +section = "radial" if len(sys.argv) < 3 else sys.argv[2] +if section not in ("radial","tangential","both"): + raise ValueError("section %r should be 'radial', 'tangential' or 'both'" + % section) +data = radial_data if section != "tangential" else tan_data +theta = 89.9 if section != "tangential" else 0 +phi = 90 +kernel = load_model(name, dtype="single") +cutoff = 1e-3 + +if name == "ellipsoid": + pars = dict( + scale=0.08, background=35, + radius_polar=15, radius_equatorial=800, + sld=.291, sld_solvent=7.105, + theta=theta, phi=phi, + theta_pd=0, theta_pd_n=0, theta_pd_nsigma=3, + phi_pd=0, phi_pd_n=20, phi_pd_nsigma=3, + radius_polar_pd=0.222296, radius_polar_pd_n=1, radius_polar_pd_nsigma=0, + radius_equatorial_pd=.000128, radius_equatorial_pd_n=1, + radius_equatorial_pd_nsigma=0, + ) + model = Model(kernel, **pars) + + # SET THE FITTING PARAMETERS + model.radius_polar.range(15, 1000) + model.radius_equatorial.range(15, 1000) + #model.theta.range(0, 90) + #model.theta_pd.range(0,10) + model.phi_pd.range(0,20) + model.phi.range(0, 180) + model.background.range(0,1000) + model.scale.range(0, 10) + + +elif name == "lamellar": + pars = dict( + scale=0.08, background=0.003, + thickness=19.2946, + sld=5.38,sld_sol=7.105, + thickness_pd= 0.37765, thickness_pd_n=10, thickness_pd_nsigma=3, + ) + model = Model(kernel, **pars) + + # SET THE FITTING PARAMETERS + #model.thickness.range(0, 1000) + #model.scale.range(0, 1) + #model.thickness_pd.range(0, 1000) + #model.background.range(0, 1000) + model.sld.range(0, 1) + + +elif name == "cylinder": + pars = dict( + scale=.01, background=35, + sld=.291, sld_solvent=5.77, + radius=250, length=178, + radius_pd=0.1, radius_pd_n=5, radius_pd_nsigma=3, + length_pd=0.1,length_pd_n=5, length_pd_nsigma=3, + theta=theta, phi=phi, + theta_pd=0, theta_pd_n=0, theta_pd_nsigma=3, + phi_pd=10, phi_pd_n=20, phi_pd_nsigma=3) + model = Model(kernel, **pars) + + # SET THE FITTING PARAMETERS + model.radius.range(1, 500) + model.length.range(1, 5000) + #model.theta.range(0, 90) + model.phi.range(0, 180) + model.phi_pd.range(0, 30) + model.radius_pd.range(0, 1) + model.length_pd.range(0, 1) + model.scale.range(0, 10) + model.background.range(0, 100) + + +elif name == "core_shell_cylinder": + model = Model(kernel, + scale= .031, background=0, + radius=19.5, thickness=30, length=22, + sld_core=7.105, sld_shell=.291, sld_solvent=7.105, + radius_pd=0.26, radius_pd_n=10, radius_pd_nsigma=3, + length_pd=0.26, length_pd_n=10, length_pd_nsigma=3, + thickness_pd=1, thickness_pd_n=1, thickness_pd_nsigma=1, + theta=theta, phi=phi, + theta_pd=1, theta_pd_n=1, theta_pd_nsigma=3, + phi_pd=0, phi_pd_n=20, phi_pd_nsigma=3, + ) + + # SET THE FITTING PARAMETERS + model.radius.range(115, 1000) + model.length.range(0, 2500) + #model.thickness.range(18, 38) + #model.thickness_pd.range(0, 1) + #model.phi.range(0, 90) + model.phi_pd.range(0,20) + #model.radius_pd.range(0, 1) + #model.length_pd.range(0, 1) + #model.theta_pd.range(0, 360) + #model.background.range(0,5) + model.scale.range(0, 1) + + + +elif name == "capped_cylinder": + model = Model(kernel, + scale=.08, background=35, + radius=20, cap_radius=40, length=400, + sld=1, sld_solvent=6.3, + radius_pd=.1, radius_pd_n=5, radius_pd_nsigma=3, + cap_radius_pd=.1, cap_radius_pd_n=5, cap_radius_pd_nsigma=3, + length_pd=.1, length_pd_n=1, length_pd_nsigma=0, + theta=theta, phi=phi, + theta_pd=0, theta_pd_n=1, theta_pd_nsigma=0, + phi_pd=10, phi_pd_n=20, phi_pd_nsigma=0, + ) + + model.radius.range(115, 1000) + model.length.range(0, 2500) + #model.thickness.range(18, 38) + #model.thickness_pd.range(0, 1) + #model.phi.range(0, 90) + model.phi_pd.range(0,20) + #model.radius_pd.range(0, 1) + #model.length_pd.range(0, 1) + #model.theta_pd.range(0, 360) + #model.background.range(0,5) + model.scale.range(0, 1) + + +elif name == "triaxial_ellipsoid": + pars = dict( + scale=0.08, background=35, + radius_equat_minor=15, radius_equat_major=20, radius_polar=500, + sld=7.105, solvent_sld=.291, + radius_equat_minor_pd=.1, radius_equat_minor_pd_n=1, radius_equat_minor_pd_nsigma=0, + radius_equat_major_pd=.1, radius_equat_major_pd_n=1, radius_equat_major_pd_nsigma=0, + radius_polar_pd=.1, radius_polar_pd_n=1, radius_polar_pd_nsigma=0, + theta=theta, phi=phi, psi=0, + theta_pd=20, theta_pd_n=40, theta_pd_nsigma=3, + phi_pd=.1, phi_pd_n=1, phi_pd_nsigma=0, + psi_pd=30, psi_pd_n=1, psi_pd_nsigma=0, + ) + model = Model(kernel, **pars) + + # SET THE FITTING PARAMETERS + model.radius_equat_minor.range(15, 1000) + model.radius_equat_major.range(15, 1000) + #model.radius_polar.range(15, 1000) + #model.background.range(0,1000) + #model.theta_pd.range(0, 360) + #model.phi_pd.range(0, 360) + #model.psi_pd.range(0, 360) + +else: + print("No parameters for %s"%name) + sys.exit(1) + +model.cutoff = cutoff +M = Experiment(data=data, model=model) +if section == "both": + tan_model = Model(model.sasmodel, **model.parameters()) + tan_model.phi = model.phi - 90 + tan_model.cutoff = cutoff + tan_M = Experiment(data=tan_data, model=tan_model) + problem = FitProblem([M, tan_M]) +else: + problem = FitProblem(M) + +M.problem = problem + +if __name__ == "__main__": + import matplotlib.pyplot as plt + + problem.summarize() + # Plot using plotly + fig = M._plot(backend="plotly") + fig.show(renderer='browser') + # Plot using matplotlib + M._plot(backend="matplotlib") + plt.show() From 75a250baac8931f660c9b340b559684de2da1be6 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 15:12:01 -0400 Subject: [PATCH 05/16] update examples to use latex_smeared from sasdata example datasets --- example/fit.py | 194 -------------------------------------- example/multiscatfit.py | 8 +- example/oriented_usans.py | 4 +- example/polynomial.py | 13 --- example/simul_fit.py | 9 +- 5 files changed, 16 insertions(+), 212 deletions(-) delete mode 100644 example/fit.py delete mode 100644 example/polynomial.py diff --git a/example/fit.py b/example/fit.py deleted file mode 100644 index 5e6bc5e8..00000000 --- a/example/fit.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/bin/env python - -import sys - -from bumps.names import FitProblem - -from sasmodels.bumps_model import Experiment, Model -from sasmodels.core import load_model -from sasmodels.data import load_data, set_beam_stop, set_top - -""" IMPORT THE DATA USED """ -radial_data = load_data('DEC07267.DAT') -set_beam_stop(radial_data, 0.00669, outer=0.025) -set_top(radial_data, -.0185) - -tan_data = load_data('DEC07266.DAT') -set_beam_stop(tan_data, 0.00669, outer=0.025) -set_top(tan_data, -.0185) -#sas.set_half(tan_data, 'right') - -name = "ellipsoid" if len(sys.argv) < 2 else sys.argv[1] -section = "radial" if len(sys.argv) < 3 else sys.argv[2] -if section not in ("radial","tangential","both"): - raise ValueError("section %r should be 'radial', 'tangential' or 'both'" - % section) -data = radial_data if section != "tangential" else tan_data -theta = 89.9 if section != "tangential" else 0 -phi = 90 -kernel = load_model(name, dtype="single") -cutoff = 1e-3 - -if name == "ellipsoid": - pars = dict( - scale=0.08, background=35, - radius_polar=15, radius_equatorial=800, - sld=.291, sld_solvent=7.105, - theta=theta, phi=phi, - theta_pd=0, theta_pd_n=0, theta_pd_nsigma=3, - phi_pd=0, phi_pd_n=20, phi_pd_nsigma=3, - radius_polar_pd=0.222296, radius_polar_pd_n=1, radius_polar_pd_nsigma=0, - radius_equatorial_pd=.000128, radius_equatorial_pd_n=1, - radius_equatorial_pd_nsigma=0, - ) - model = Model(kernel, **pars) - - # SET THE FITTING PARAMETERS - model.radius_polar.range(15, 1000) - model.radius_equatorial.range(15, 1000) - #model.theta.range(0, 90) - #model.theta_pd.range(0,10) - model.phi_pd.range(0,20) - model.phi.range(0, 180) - model.background.range(0,1000) - model.scale.range(0, 10) - - -elif name == "lamellar": - pars = dict( - scale=0.08, background=0.003, - thickness=19.2946, - sld=5.38,sld_sol=7.105, - thickness_pd= 0.37765, thickness_pd_n=10, thickness_pd_nsigma=3, - ) - model = Model(kernel, **pars) - - # SET THE FITTING PARAMETERS - #model.thickness.range(0, 1000) - #model.scale.range(0, 1) - #model.thickness_pd.range(0, 1000) - #model.background.range(0, 1000) - model.sld.range(0, 1) - - -elif name == "cylinder": - pars = dict( - scale=.01, background=35, - sld=.291, sld_solvent=5.77, - radius=250, length=178, - radius_pd=0.1, radius_pd_n=5, radius_pd_nsigma=3, - length_pd=0.1,length_pd_n=5, length_pd_nsigma=3, - theta=theta, phi=phi, - theta_pd=0, theta_pd_n=0, theta_pd_nsigma=3, - phi_pd=10, phi_pd_n=20, phi_pd_nsigma=3) - model = Model(kernel, **pars) - - # SET THE FITTING PARAMETERS - model.radius.range(1, 500) - model.length.range(1, 5000) - #model.theta.range(0, 90) - model.phi.range(0, 180) - model.phi_pd.range(0, 30) - model.radius_pd.range(0, 1) - model.length_pd.range(0, 1) - model.scale.range(0, 10) - model.background.range(0, 100) - - -elif name == "core_shell_cylinder": - model = Model(kernel, - scale= .031, background=0, - radius=19.5, thickness=30, length=22, - sld_core=7.105, sld_shell=.291, sld_solvent=7.105, - radius_pd=0.26, radius_pd_n=10, radius_pd_nsigma=3, - length_pd=0.26, length_pd_n=10, length_pd_nsigma=3, - thickness_pd=1, thickness_pd_n=1, thickness_pd_nsigma=1, - theta=theta, phi=phi, - theta_pd=1, theta_pd_n=1, theta_pd_nsigma=3, - phi_pd=0, phi_pd_n=20, phi_pd_nsigma=3, - ) - - # SET THE FITTING PARAMETERS - model.radius.range(115, 1000) - model.length.range(0, 2500) - #model.thickness.range(18, 38) - #model.thickness_pd.range(0, 1) - #model.phi.range(0, 90) - model.phi_pd.range(0,20) - #model.radius_pd.range(0, 1) - #model.length_pd.range(0, 1) - #model.theta_pd.range(0, 360) - #model.background.range(0,5) - model.scale.range(0, 1) - - - -elif name == "capped_cylinder": - model = Model(kernel, - scale=.08, background=35, - radius=20, cap_radius=40, length=400, - sld=1, sld_solvent=6.3, - radius_pd=.1, radius_pd_n=5, radius_pd_nsigma=3, - cap_radius_pd=.1, cap_radius_pd_n=5, cap_radius_pd_nsigma=3, - length_pd=.1, length_pd_n=1, length_pd_nsigma=0, - theta=theta, phi=phi, - theta_pd=0, theta_pd_n=1, theta_pd_nsigma=0, - phi_pd=10, phi_pd_n=20, phi_pd_nsigma=0, - ) - - model.radius.range(115, 1000) - model.length.range(0, 2500) - #model.thickness.range(18, 38) - #model.thickness_pd.range(0, 1) - #model.phi.range(0, 90) - model.phi_pd.range(0,20) - #model.radius_pd.range(0, 1) - #model.length_pd.range(0, 1) - #model.theta_pd.range(0, 360) - #model.background.range(0,5) - model.scale.range(0, 1) - - -elif name == "triaxial_ellipsoid": - pars = dict( - scale=0.08, background=35, - radius_equat_minor=15, radius_equat_major=20, radius_polar=500, - sld=7.105, solvent_sld=.291, - radius_equat_minor_pd=.1, radius_equat_minor_pd_n=1, radius_equat_minor_pd_nsigma=0, - radius_equat_major_pd=.1, radius_equat_major_pd_n=1, radius_equat_major_pd_nsigma=0, - radius_polar_pd=.1, radius_polar_pd_n=1, radius_polar_pd_nsigma=0, - theta=theta, phi=phi, psi=0, - theta_pd=20, theta_pd_n=40, theta_pd_nsigma=3, - phi_pd=.1, phi_pd_n=1, phi_pd_nsigma=0, - psi_pd=30, psi_pd_n=1, psi_pd_nsigma=0, - ) - model = Model(kernel, **pars) - - # SET THE FITTING PARAMETERS - model.radius_equat_minor.range(15, 1000) - model.radius_equat_major.range(15, 1000) - #model.radius_polar.range(15, 1000) - #model.background.range(0,1000) - #model.theta_pd.range(0, 360) - #model.phi_pd.range(0, 360) - #model.psi_pd.range(0, 360) - -else: - print("No parameters for %s"%name) - sys.exit(1) - -model.cutoff = cutoff -M = Experiment(data=data, model=model) -if section == "both": - tan_model = Model(model.sasmodel, **model.parameters()) - tan_model.phi = model.phi - 90 - tan_model.cutoff = cutoff - tan_M = Experiment(data=tan_data, model=tan_model) - problem = FitProblem([M, tan_M]) -else: - problem = FitProblem(M) - -if __name__ == "__main__": - import matplotlib.pyplot as plt - problem.plot() - plt.show() diff --git a/example/multiscatfit.py b/example/multiscatfit.py index fa55770d..c2c11b7c 100644 --- a/example/multiscatfit.py +++ b/example/multiscatfit.py @@ -28,6 +28,8 @@ from bumps.names import FitProblem +from sasdata import data_path + from sasmodels.bumps_model import Experiment, Model from sasmodels.core import load_model from sasmodels.data import load_data @@ -36,7 +38,7 @@ ## Load the data #data = load_data('DEC07267.DAT') #set_beam_stop(data, 0.003, outer=0.025) -data = load_data('latex_smeared.xml', index=0) +data = load_data(str(data_path / '1d_data' / 'latex_smeared.xml'), index=0) ## Define the model kernel = load_model("ellipsoid") @@ -78,5 +80,5 @@ if __name__ == "__main__": #M.theory() M.plot() - import pylab - pylab.show() + import matplotlib.pyplot as plt + plt.show() diff --git a/example/oriented_usans.py b/example/oriented_usans.py index 77ad3bfc..28b588d6 100644 --- a/example/oriented_usans.py +++ b/example/oriented_usans.py @@ -1,12 +1,14 @@ import numpy as np from bumps.names import FitProblem +from sasdata import data_path + from sasmodels.bumps_model import Experiment, Model from sasmodels.core import load_model from sasmodels.data import load_data # Spherical particle data, not ellipsoids -sans, usans = load_data('latex_smeared.xml', index='all') +sans, usans = load_data(str(data_path / '1d_data' / 'latex_smeared.xml'), index='all') usans.qmin, usans.qmax = np.min(usans.x), np.max(usans.x) usans.mask = (usans.x < 0.0) usans.oriented = True diff --git a/example/polynomial.py b/example/polynomial.py deleted file mode 100644 index 06f761d9..00000000 --- a/example/polynomial.py +++ /dev/null @@ -1,13 +0,0 @@ -from numpy import inf - -parameters = [ - ["n", "", 1, [1,5], "", "number of coefficients (or degree+1)"], - ["c[n]", "", 0, [-inf, inf], "", "coefficients to c_n x^n"], -] - -Iq = r""" - int int_n = (int)n; - double result = c[int_n-1]; - for (int k=int_n-2; k >= 0; k--) { result = result*q + c[k]; } - return result; - """ diff --git a/example/simul_fit.py b/example/simul_fit.py index 61b497a8..fe486eb8 100644 --- a/example/simul_fit.py +++ b/example/simul_fit.py @@ -1,13 +1,15 @@ import numpy as np from bumps.names import FitProblem, FreeVariables +from sasdata import data_path + from sasmodels.bumps_model import Experiment, Model from sasmodels.core import load_model from sasmodels.data import load_data # latex data, same sample usans and sans # particles radius ~2300, uniform dispersity -datasets = load_data('latex_smeared.xml', index='all') +datasets = load_data(str(data_path / '1d_data' / 'latex_smeared.xml'), index='all') #[print(data) for data in datasets] # A single sphere model to share between the datasets. We will use @@ -61,3 +63,8 @@ M = [Experiment(data=data, model=model, name=data.run[0]) for data in datasets] problem = FitProblem(M, freevars=free) + +if __name__ == "__main__": + import matplotlib.pyplot as plt + problem.plot() + plt.show() \ No newline at end of file From a5e2b7af93b86e5a6d337dd6fff3e53e24335ae2 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 15:14:01 -0400 Subject: [PATCH 06/16] fix oriented usans example: Slit2D interface changed from qx_width,qy_width to q_width,q_length --- sasmodels/direct_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sasmodels/direct_model.py b/sasmodels/direct_model.py index 7662e17a..c2c0565d 100644 --- a/sasmodels/direct_model.py +++ b/sasmodels/direct_model.py @@ -275,8 +275,8 @@ def _interpret_data(self, data: Data, model: KernelModel) -> None: res = resolution2d.Slit2D( data.x[index], - qx_width=data.dxw[index], - qy_width=data.dxl[index]) + q_width=data.dxw[index], + q_length=data.dxl[index]) else: raise ValueError("Unknown data type") # never gets here From 65c89effb927468dc128141ba456037c6ff884b8 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 15:15:46 -0400 Subject: [PATCH 07/16] Rewrite plotters to a more compact layout, better suited to bumps; include plotly versions --- sasmodels/bumps_model.py | 20 +- sasmodels/compare.py | 6 +- sasmodels/data.py | 860 +++++++++++++++++++++++++++------------ 3 files changed, 628 insertions(+), 258 deletions(-) diff --git a/sasmodels/bumps_model.py b/sasmodels/bumps_model.py index f34dbfdb..f7fe171d 100644 --- a/sasmodels/bumps_model.py +++ b/sasmodels/bumps_model.py @@ -241,7 +241,9 @@ def residuals(self): """ Return theory minus data normalized by uncertainty. """ - #if np.any(self.err ==0): print("zeros in err") + # if np.any(self.err ==0): print("zeros in err") + # from bumps.fitproblem import fitness_show_parameters + # if any(np.isnan(self.theory())): (print("nan in theory for:"),fitness_show_parameters(self)) return (self.theory() - self.Iq) / self.dIq def nllf(self): @@ -258,15 +260,25 @@ def nllf(self): #def __call__(self): # return 2 * self.nllf() / self.dof - def plot(self, view=None): + def _plot(self, view=None, backend='matplotlib'): # type: (str) -> None """ Plot the data and residuals. """ data, theory, resid = self._data, self.theory(), self.residuals() # TODO: hack to display oriented usans 2-D pattern - Iq_calc = self.Iq_calc if isinstance(self.Iq_calc, tuple) else None - plot_theory(data, theory, resid, view, Iq_calc=Iq_calc) + #Iq_calc = self.Iq_calc if isinstance(self.Iq_calc, tuple) else None + label = f"I(q) {self.model.sasmodel.info.name}" + Iq_calc = self.Iq_calc + fig = plot_theory(data, theory, resid, view, Iq_calc=Iq_calc, label=label, backend=backend) + return fig + + def plot(self, view=None): + return self._plot(view=view, backend='matplotlib') + + # # Bumps doesn't yet support 2D plots with plotly. + # def plotly(self, view=None): + # return self._plot(view=view, backend='plotly') def simulate_data(self, noise=None): # type: (float) -> None diff --git a/sasmodels/compare.py b/sasmodels/compare.py index 081e2589..01cc22ac 100755 --- a/sasmodels/compare.py +++ b/sasmodels/compare.py @@ -960,6 +960,9 @@ def plot_models(opts, result, limits=None, setnum=0): plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) + #plt.gca().tick_params(labelbottom=False, labelleft=False) + plt.gca().set_yticks([]) + plt.ylabel('') #cbar_title = "log I" if have_base and have_comp: plt.subplot(133) @@ -977,9 +980,10 @@ def plot_models(opts, result, limits=None, setnum=0): # Note: base_data only since base and comp have same q values (though # perhaps different resolution), and we are plotting the difference # at each q - plot_theory(base_data, None, resid=err, view=errview, use_data=use_data) + plot_theory(base_data, err, view=errview, use_data=use_data) plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') plt.title("max %s = %.3g"%(errstr, abs(err).max())) + plt.ylabel(errstr) #cbar_title = errstr if errview=="linear" else "log "+errstr #if is2D: # h = plt.colorbar() diff --git a/sasmodels/data.py b/sasmodels/data.py index 77fdfbfb..94f28b25 100644 --- a/sasmodels/data.py +++ b/sasmodels/data.py @@ -36,7 +36,7 @@ from functools import wraps import numpy as np # type: ignore -from numpy import cos, pi, sin, sqrt +from numpy import cos, ma, pi, sin, sqrt # pylint: disable=unused-import try: @@ -59,6 +59,7 @@ def load_data(filename, index=0): except ImportError as ie: raise ImportError(f"{ie.name} is not available. Add sasdata to the python path.") loader = Loader() + filename = str(filename) # In case a Path was given. # Allow for one part in multipart file if '[' in filename: filename, indexstr = filename[:-1].split('[') @@ -344,10 +345,14 @@ def empty_data1D(q, resolution=0.0, L=0., dL=0.): r""" Create empty 1D data using the given *q* as the x value. - rms *resolution* $\Delta q/q$ defaults to 0%. If wavelength *L* and rms - wavelength divergence *dL* are defined, then *resolution* defines - rms $\Delta \theta/\theta$ for the lowest *q*, with $\theta$ derived from - $q = 4\pi/\lambda \sin(\theta)$. + rms *resolution* $\Delta q/q$ defaults to 0.0. Note that this is + expressed as a fraction rather than a percentage. + + If wavelength *L* and rms wavelength divergence *dL* are given, then + angle *theta* is infered from $q = 4\pi/\lambda \sin(\theta)$, and + the *resolution* term applies to rms $\Delta \theta/\theta$ instead + of $\Delta q/q$. Resolution $\Delta q$ is then calculated from wavelength + and angular resolution. """ #Iq = 100 * np.ones_like(q) @@ -378,7 +383,7 @@ def empty_data2D(qx, qy=None, resolution=0.0): If *qy* is missing, create a square mesh with *qy=qx*. - *resolution* dq/q defaults to 5%. + *resolution* dq/q defaults to 0.0. """ if qy is None: qy = qx @@ -413,8 +418,8 @@ def empty_data2D(qx, qy=None, resolution=0.0): return data -def plot_data(data, view=None, limits=None): - # type: (Data, str, OptLimits) -> None +def plot_data(data, view=None, limits=None, backend='matplotlib'): + # type: (Data, str, OptLimits, str) -> None """ Plot data loaded by the sasview loader. @@ -433,12 +438,14 @@ def plot_data(data, view=None, limits=None): elif hasattr(data, 'qx_data') and not getattr(data, 'radial', False): _plot_result2D(data, None, None, view, use_data=True, limits=limits) else: - _plot_result1D(data, None, None, view, use_data=True, limits=limits) + return _plot_result1D( + data, None, None, view, use_data=True, + limits=limits, backend=backend) def plot_theory(data, theory, resid=None, view=None, use_data=True, - limits=None, Iq_calc=None): - # type: (Data, OptArray, OptArray, OptString, bool, OptLimits, OptArray) -> None + limits=None, Iq_calc=None, label='theory', backend='matplotlib'): + # type: (Data, OptArray, OptArray, OptString, bool, OptLimits, OptArray, str, str) -> None """ Plot theory calculation. @@ -455,17 +462,20 @@ def plot_theory(data, theory, resid=None, view=None, use_data=True, are inferred from the data. If (-inf, inf) then use auto limits. *Iq_calc* is the raw theory values without resolution smearing + + *label* is the label for the theory curve """ if limits is not None and np.isinf(limits[0]): limits = None if hasattr(data, 'isSesans') and data.isSesans: - _plot_result_sesans(data, theory, resid, view, use_data=True, limits=limits) + fig = _plot_result_sesans(data, theory, resid, view, use_data=True, limits=limits, label=label, backend=backend) elif hasattr(data, 'qx_data') and not getattr(data, 'radial', False): - _plot_result2D(data, theory, resid, view, use_data, limits=limits) + fig = _plot_result2D(data, theory, resid, view, use_data, limits=limits, label=label, backend=backend) else: - _plot_result1D(data, theory, resid, view, use_data, - limits=limits, Iq_calc=Iq_calc) - + fig = _plot_result1D( + data, theory, resid, view, use_data, + limits=limits, Iq_calc=Iq_calc, label=label, backend=backend) + return fig def protect(func): # type: (Callable) -> Callable @@ -480,24 +490,48 @@ def wrapper(*args, **kw): """ try: return func(*args, **kw) - except Exception as exc: - print("Traceback (most recent call last):") - print("".join(traceback.format_list(traceback.extract_stack(limit=4)[:-1]))[:-1]) - print(f"{exc.__class__.__name__}: {exc}") + except Exception: + # TODO: fix traceback printing + # The first bit prints the traceback up to the protect call + # The remaining bit prints the traceback within the protect call + # Ideally we would join these into one traceback + #print("Traceback (most recent call last):") + stack = traceback.extract_stack(limit=4) + lines = traceback.format_list(stack[:-1]) + print("".join(lines)[:-1]) + #print(f"{exc.__class__.__name__}: {exc}") + traceback.print_exc() return wrapper - -@protect -def _plot_result1D(data, theory, resid, view, use_data, - limits=None, Iq_calc=None): - # type: (Data1D, OptArray, OptArray, str, bool, OptLimits, OptArray) -> None +def get_data_label(data): + # TODO: what do we want as data label? + # example_data/1d_data/latex_smeared.xml[0] + # data.filename: "latex_smeared.xml" + # data.run: ["latex_sans"] + # data.title: "latex particles 0.5micron diameter in D2O Qdev" + # example_data/1d_data/latex_smeared.xml[1] + # data.filename: "latex_smeared.xml" + # data.run: ["latex_usans"] + # data.title: "latex particles 0.5micron diameter in D2O slit" + # For this dataset I'm using: + # data.run[0] > data.filename > data.title > "data" + title = getattr(data, 'title', None) + filename = getattr(data, 'filename', None) + run = getattr(data, 'run', []) + if isinstance(run, list) and run: # at least one run name + run = run[0] + return run if run else filename if filename else title if title else "data" + +RESID_RATIO = 0.15 +def _plot_result1D( + data, theory, resid, view, use_data, + limits=None, Iq_calc=None, label='theory', + backend='matplotlib'): + # type: (Data1D, OptArray, OptArray, str, bool, OptLimits, OptArray, str, str) -> None """ Plot the data and residuals for 1D data. """ - import matplotlib.pyplot as plt # type: ignore - from numpy.ma import masked, masked_array # type: ignore - # Default to 'log' view if view is None: view = 'log' @@ -509,202 +543,529 @@ def _plot_result1D(data, theory, resid, view, use_data, use_data = use_data and data.y is not None use_theory = theory is not None use_resid = resid is not None - use_calc = use_theory and Iq_calc is not None - num_plots = (use_data or use_theory) + use_calc + use_resid - non_positive_x = (data.x <= 0.0).any() + use_calc = Iq_calc is not None and len(Iq_calc) == 2 # (q, Iq) + use_calc2D = Iq_calc is not None and len(Iq_calc) == 3 # (qx, qy, Iq) - scale = 1e8 * data.x**4 if view == 'q4' else 1.0 + def ytransform(x): + return 1e8*x**4 if view == 'q4' else 1.0 - if use_data or use_theory: - if num_plots > 1: - plt.subplot(1, num_plots, 1) + graph_ratio = 1.0 if resid is None else 1 - RESID_RATIO - #print(vmin, vmax) - all_positive = True - some_present = False - if use_data: - mdata = masked_array(data.y, data.mask.copy()) - mdata[~np.isfinite(mdata)] = masked - if view == 'log': - mdata[mdata <= 0] = masked - plt.errorbar(data.x, scale*mdata, yerr=scale*data.dy, fmt='.') - all_positive = all_positive and (mdata > 0).all() - some_present = some_present or (mdata.count() > 0) + xlabel = "q/Å" + ylabel = '10⁸q⁴I(q)' if view == 'q4' else 'I(q)' + xscale = 'log' + yscale = 'log' if view == 'log' else 'linear' + + if use_data: + data_label = get_data_label(data) + xdata = data.x # not a copy + if (xdata <= 0).any(): + xscale = 'linear' + ydata = ma.masked_array(data.y*ytransform(xdata), data.mask.copy()) # copy + ydata[~np.isfinite(ydata)] = ma.masked + if yscale == 'log': + ydata[ydata <= 0] = ma.masked + yerr = data.dy * ytransform(xdata) + + if use_theory: + # TODO: provide model name to the plotter + theory_label = label + xtheory = data.x[data.mask == 0] #copy + ytheory = ma.masked_array(theory*ytransform(xtheory)) # copy + if xscale == 'log': + xtheory[xtheory <= 0] = ma.masked + ytheory[~np.isfinite(ytheory)] = ma.masked + if yscale == 'log': + ytheory[ytheory <= 0] = ma.masked + + if use_calc: + calc_label = 'no resolution' + xcalc, ycalc = Iq_calc # not a copy + ycalc = ma.masked_array(ycalc*ytransform(xcalc)) # copy + if xscale == 'log': + xcalc = xcalc.copy() # copy + xcalc[xcalc <= 0] = ma.masked + ycalc[~np.isfinite(ycalc)] = ma.masked + if yscale == 'log': + ycalc[ycalc <= 0] = ma.masked + + if use_resid: + resid_label = '(I(q) - y)/Δy' + xresid = data.x[data.mask == 0] # copy + yresid = ma.masked_array(resid.copy()) # copy + yresid[~np.isfinite(yresid)] = ma.masked + + + if backend == 'plotly': + import plotly.graph_objects as go + from plotly.subplots import make_subplots + + rows = 2 if graph_ratio > 0 else 1 + heights = [graph_ratio, 1 - graph_ratio] if graph_ratio > 0 else None + fig = make_subplots( + cols=1, rows=rows, row_heights=heights, + # TODO: x-axis sharing isn't working + shared_xaxes=True, + vertical_spacing=0.01) + if use_data: + fig.add_trace( + go.Scatter( + x=xdata, y=ydata, error_y=dict(type='data', array=yerr), + name=data_label, mode='markers'), + row=1, col=1) if use_theory: - # Theory values are only calculated where the data is not masked, - # so restrict data.x and scale to only those points. - # Note: masks merge, so any masked theory points will stay masked, - # and the data mask will be added to it. - #mtheory = masked_array(theory, data.mask.copy()) - theory_x = data.x[data.mask == 0] - theory_scale = scale if np.isscalar(scale) else scale[data.mask == 0] - mtheory = masked_array(theory) - mtheory[~np.isfinite(mtheory)] = masked - if view == 'log': - mtheory[mtheory <= 0] = masked - plt.plot(theory_x, theory_scale*mtheory, '-') - all_positive = all_positive and (mtheory > 0).all() - some_present = some_present or (mtheory.count() > 0) + fig.add_trace( + go.Scatter(x=xtheory, y=ytheory, name=theory_label, mode='lines'), + row=1, col=1) + + if use_calc: # (q, Iq), not (qx, qy, Iqxy) + fig.add_trace( + go.Scatter(x=xcalc, y=ycalc, name=calc_label, mode='lines', line=dict(dash='dot')), + row=1, col=1) if limits is not None: - plt.ylim(*limits) + fig.update_yaxes(range=limits, row=1, col=1) + + fig.update_xaxes(type=xscale, row=1, col=1) + fig.update_yaxes(type=yscale, row=1, col=1) + fig.update_layout(yaxis_title=ylabel) + + if use_resid: + fig.add_trace( + go.Scatter(x=xtheory, y=yresid, mode='markers', name=resid_label), + row=2, col=1) + fig.add_hline(y=0, line=dict(color='black'), row=2, col=1) + fig.add_hline(y=1, line=dict(color='black', dash='dot'), row=2, col=1) + fig.add_hline(y=-1, line=dict(color='black', dash='dot'), row=2, col=1) + fig.update_yaxes(title_text='resid', type='linear', row=2, col=1) + fig.update_xaxes(title_text=xlabel, row=2, col=1) + else: + fig.update_xaxes(title_text=xlabel, row=1, col=1) + + # Configure legend with automatic positioning + fig.update_layout( + legend=dict( + x=0.99, y=0.99, # top right + yanchor="top", # Anchor to middle of legend + xanchor="right", # Anchor to center of legend + orientation="v", # Horizontal layout + bordercolor="black", + borderwidth=1, + bgcolor="rgba(255,255,255,0.8)", # Semi-transparent background + #draggable=True # Allow manual adjustment if needed + ), + margin=dict(l=50, r=50, t=50, b=50) # Add space for legend + ) + + margin = fig.layout.margin + margin["t"] *= 1.2 + margin["r"] /= 4 + fig.update_layout(margin=margin) + + return fig + + else: # backend defaults to matplotlib + import matplotlib.gridspec as gridspec + import matplotlib.pyplot as plt + + # Note: matplotlib inset plots do not work properly in mpld3, so + # use the more cumbersome gridspec interface directly. + # ax0 = plt.gca() # for inset + fig = plt.gcf() + if use_resid: + if fig.axes: + ax = fig.gca() + gs = gridspec.GridSpecFromSubplotSpec( + 2, 1, subplot_spec=ax.get_subplotspec(), + height_ratios=[graph_ratio, 1 - graph_ratio]) + else: + gs = gridspec.GridSpec( + 2, 1, height_ratios=[graph_ratio, 1 - graph_ratio]) + ax0 = fig.add_subplot(gs[0]) + else: + ax0 = fig.gca() + if use_data: + ax0.errorbar(xdata, ydata, yerr=yerr, fmt='.', label=data_label) - plt.xscale('linear' if not some_present or non_positive_x else 'log') - plt.yscale('log' if some_present and view == 'log' else 'linear') - plt.xlabel("$q$/A$^{-1}$") - plt.ylabel('$10^8 q^4 I(q)$' if view == 'q4' else '$I(q)$') - title = ("data and model" if use_theory and use_data - else "data" if use_data - else "model") - plt.title(title) + if use_theory: + ax0.plot(xtheory, ytheory, '-', label=theory_label) - if use_calc: - # Only have use_calc if have use_theory - plt.subplot(1, num_plots, 2) - qx, qy, Iqxy = Iq_calc - plt.pcolormesh(qx, qy[qy > 0], np.log10(Iqxy[qy > 0, :])) - plt.xlabel("$q_x$/A$^{-1}$") - plt.xlabel("$q_y$/A$^{-1}$") - plt.xscale('log') - plt.yscale('log') - #plt.axis('equal') + if use_calc: # (q, Iq), not (qx, qy, Iqxy) + ax0.plot(xcalc, ycalc, ':', alpha=0.8, label=calc_label) - if use_resid: - theory_x = data.x[data.mask == 0] - mresid = masked_array(resid) - mresid[~np.isfinite(mresid)] = masked - some_present = (mresid.count() > 0) + if limits is not None: + ax0.set_ylim(*limits) + + ax0.set_xscale(xscale) + ax0.set_yscale(yscale) + ax0.set_ylabel(ylabel) + ax0.legend() + + if use_resid: + # Inset plot would use the following + # location = [0, -resid_ratio/(1-resid_ratio), 1, resid_ratio/(1-resid_ratio)] + # ax1 = ax0.inset_axes(location, transform=ax0.transAxes, sharex=ax0) + # TODO: sharex isn't working right for mpld3; it is also sharing y + ax1 = fig.add_subplot(gs[1]) #, sharex=ax0) + ax1.plot(xresid, yresid, '.', label=resid_label) + # Broken in mpld3 + #ax1.axhline(0, color='black') + #ax1.axhline(1, color='black', linestyle=':') + #ax1.axhline(-1, color='black', linestyle=':') + xlim = min(xresid), max(xresid) + #ax1.hlines([0, 1, -1], xlim[0], xlim[1], linestyles=['-',':',':'], color='black', alpha=0.5) + ax1.plot(xlim, [0, 0], 'k-', label="_none_", alpha=0.5) + ax1.plot(xlim, [-1, -1], 'k:', label="_none_", alpha=0.5) + ax1.plot(xlim, [1, 1], 'k:', label="_none_", alpha=0.5) + # ax1.set_ylabel('(I(q)-y)/Δy') + ax1.set_ylabel('resid') + ax1.set_yscale('linear') + ax1.set_xlabel(xlabel) + # TODO: reduce the tick density in the center when z scores are too high + # The following are useful ticks for residuals. They are a little ugly in the + # middle when the fit is bad, but at least this allows us to work around + # flakiness in mpld3: The number of tick marks in the locator is not being + # honoured by javascript. + forward = [1, 3, 10, 30, 100, 300, 1000, 3000] + reverse = [-v for v in forward[::-1]] + ticks = [*reverse, *forward] + ax1.yaxis.set_major_locator(plt.FixedLocator(ticks)) + #ax1.yaxis.set_major_locator(plt.LinearLocator(numticks=5)) + #ax1.yaxis.set_major_locator(plt.MaxNLocator(5)) # Show maximum 5 ticks + plt.setp(ax0.get_xticklabels(), visible=False) + else: + ax0.set_xlabel(xlabel) + + if use_calc2D: # (qx, qy, Iqxy) + inset_calc_2D(ax0, Iq_calc) + + plt.tight_layout() + return plt.gcf() + + +def inset_calc_2D(ax0, Iqxy_calc, alpha=0.8, portion=0.3, logz=True): + qx, qy, Iqxy = Iqxy_calc + # place in the top right + location = [1-portion, 1-portion, portion, portion] + ax_inset = ax0.inset_axes(location, transform=ax0.transAxes) + ax_inset.pcolormesh(qx, qy, np.log10(Iqxy), alpha=alpha) + ax_inset.set_aspect('equal') + ax_inset.set_xlim(qx.min(), qx.max()) + ax_inset.set_ylim(qy.min(), qy.max()) + ax_inset.axis('off') + return ax_inset - if num_plots > 1: - plt.subplot(1, num_plots, use_calc + 2) - plt.plot(theory_x, mresid, '.') - plt.xlabel("$q$/A$^{-1}$") - plt.ylabel('residuals') - plt.title('(model - Iq)/dIq') - plt.xscale('linear' if not some_present or non_positive_x else 'log') - plt.yscale('linear') @protect -def _plot_result_sesans(data, theory, resid, view, use_data, limits=None): - # type: (SesansData, OptArray, OptArray, OptString, bool, OptLimits) -> None +def _plot_result_sesans(data, theory, resid, view, use_data, limits=None, label='theory', backend="matplotlib"): + # type: (SesansData, OptArray, OptArray, OptString, bool, OptLimits, str, str) -> None """ Plot SESANS results. + + view is "normed" or not "normed" """ import matplotlib.pyplot as plt # type: ignore use_data = use_data and data.y is not None use_theory = theory is not None use_resid = resid is not None - num_plots = (use_data or use_theory) + use_resid + + graph_ratio = 1.0 if resid is None else 1 - RESID_RATIO normed = (view == "normed") #normed = True offset, scale = 0, 1 - if normed and theory is not None: + if normed and use_theory: offset, scale = theory[-1], theory[0] - theory[-1] - if use_data or use_theory: - is_tof = data.lam is not None and (data.lam != data.lam[0]).any() - if num_plots > 1: - plt.subplot(1, num_plots, 1) + is_tof = data.lam is not None and (data.lam != data.lam[0]).any() + xdata = data.x + ydata = data.y + yerr = data.dy + lamsq = data.source.wavelength**2 + if use_data: + data_label = get_data_label(data) + ydata = np.log10(ydata) / lamsq if is_tof else (ydata - offset) / scale + yerr = yerr / ydata / lamsq if is_tof else yerr / scale + if use_theory: + ytheory = np.log10(theory) / lamsq if is_tof else (theory - offset) / scale + + theory_label = label + xlabel = f"spin echo length ({data._xunit})" + ylabel = "log(P)/(tλ²) / (Ų cm)" + resid_label = 'resid' + + if backend == 'plotly': + import plotly.graph_objects as go + from plotly.subplots import make_subplots + + rows = 2 if graph_ratio > 0 else 1 + heights = [graph_ratio, 1 - graph_ratio] if graph_ratio > 0 else None + fig = make_subplots( + cols=1, rows=rows, row_heights=heights, + # TODO: x-axis sharing isn't working + shared_xaxes=True, + vertical_spacing=0.01) + if use_data: - if is_tof: - plt.errorbar(data.x, np.log(data.y)/(data.lam*data.lam), - yerr=data.dy/data.y/(data.lam*data.lam)) - else: - #plt.errorbar(data.x, data.y, yerr=data.dy) - plt.errorbar(data.x, (data.y-offset)/scale, yerr=data.dy/scale) - if theory is not None: - if is_tof: - plt.plot(data.x, np.log(theory)/(data.lam*data.lam), '-') - else: - #plt.plot(data.x, theory, '-') - plt.plot(data.x, (theory-offset)/scale, '-') - if limits is not None: - plt.ylim(*limits) + fig.add_trace( + go.Scatter( + x=xdata, y=ydata, error_y=dict(type='data', array=yerr), + name=data_label, mode='markers'), + row=1, col=1) - plt.xlabel(f'spin echo length ({data._xunit})') - plt.ylabel(r'$\log(P)/(t\lambda^2) (\mathrm{A}^{-2}\mathrm{cm}^{-1})$') - plt.xscale('log') + if use_theory: + fig.add_trace( + go.Scatter(x=xdata, y=ytheory, name=theory_label, mode='lines'), + row=1, col=1) + if limits is not None: + fig.update_yaxes(range=limits, row=1, col=1) + + fig.update_xaxes(type='log', row=1, col=1) + fig.update_yaxes(type='linear', row=1, col=1) + fig.update_layout(yaxis_title=ylabel) + + if use_resid: + fig.add_trace( + go.Scatter(x=xdata, y=resid, mode='markers', name=resid_label), + row=2, col=1) + fig.add_hline(y=0, line=dict(color='black'), row=2, col=1) + fig.add_hline(y=1, line=dict(color='black', dash='dot'), row=2, col=1) + fig.add_hline(y=-1, line=dict(color='black', dash='dot'), row=2, col=1) + fig.update_yaxes(title_text='resid', type='linear', row=2, col=1) + fig.update_xaxes(title_text=xlabel, row=2, col=1) + else: + fig.update_xaxes(title_text=xlabel, row=1, col=1) + + # Configure legend with automatic positioning + fig.update_layout( + legend=dict( + x=0.99, y=0.99, # top right + yanchor="top", # Anchor to middle of legend + xanchor="right", # Anchor to center of legend + orientation="v", # Horizontal layout + bordercolor="black", + borderwidth=1, + bgcolor="rgba(255,255,255,0.8)", # Semi-transparent background + #draggable=True # Allow manual adjustment if needed + ), + margin=dict(l=50, r=50, t=50, b=50) # Add space for legend + ) + + margin = fig.layout.margin + margin["t"] *= 1.2 + margin["r"] /= 4 + fig.update_layout(margin=margin) + + return fig + + else: # backend == "matplotlib": + import matplotlib.gridspec as gridspec + import matplotlib.pyplot as plt + + # Note: matplotlib inset plots do not work properly in mpld3, so + # use the more cumbersome gridspec interface directly. + # ax0 = fig.gca() # for inset + fig = plt.gcf() + if use_resid: + if fig.axes: + ax = fig.gca() + gs = gridspec.GridSpecFromSubplotSpec( + 2, 1, subplot_spec=ax.get_subplotspec(), + height_ratios=[graph_ratio, 1 - graph_ratio]) + else: + gs = gridspec.GridSpec( + 2, 1, height_ratios=[graph_ratio, 1 - graph_ratio]) + ax0 = fig.add_subplot(gs[0]) + else: + ax0 = fig.gca() - if resid is not None: - if num_plots > 1: - plt.subplot(1, num_plots, (use_data or use_theory) + 1) - plt.plot(data.x, resid, 'x') - plt.xlabel(f'spin echo length ({data._xunit})') - plt.ylabel('polarization residuals') - plt.xscale('log') + if use_data: + ax0.errorbar(xdata, ydata, yerr=yerr) + if use_theory: + ax0.plot(xdata, ytheory, '-') + if limits is not None: + ax0.set_ylim(*limits) + ax0.set_ylabel(ylabel) + ax0.set_xscale('log') + + if use_resid: + ax1 = fig.add_subplot(gs[1]) #, sharex=ax0) + ax1.plot(data.x, resid, 'x') + ax1.set_xlabel(xlabel) + ax1.set_ylabel(resid_label) + ax1.set_xscale('log') + + # Broken in mpld3 + #ax1.axhline(0, color='black') + #ax1.axhline(1, color='black', linestyle=':') + #ax1.axhline(-1, color='black', linestyle=':') + xlim = min(data.x), max(data.x) + #ax1.hlines([0, 1, -1], xlim[0], xlim[1], linestyles=['-',':',':'], color='black', alpha=0.5) + ax1.plot(xlim, [0, 0], 'k-', label="_none_", alpha=0.5) + ax1.plot(xlim, [-1, -1], 'k:', label="_none_", alpha=0.5) + ax1.plot(xlim, [1, 1], 'k:', label="_none_", alpha=0.5) + + # TODO: reduce the tick density in the center when z scores are too high + # The following are useful ticks for residuals. They are a little ugly in the + # middle when the fit is bad, but at least this allows us to work around + # flakiness in mpld3: The number of tick marks in the locator is not being + # honoured by javascript. + forward = [1, 3, 10, 30, 100, 300, 1000, 3000] + reverse = [-v for v in forward[::-1]] + ticks = [*reverse, *forward] + ax1.yaxis.set_major_locator(plt.FixedLocator(ticks)) + #ax1.yaxis.set_major_locator(plt.LinearLocator(numticks=5)) + #ax1.yaxis.set_major_locator(plt.MaxNLocator(5)) # Show maximum 5 ticks + plt.setp(ax0.get_xticklabels(), visible=False) + else: + ax0.set_xlabel(xlabel) @protect -def _plot_result2D(data, theory, resid, view, use_data, limits=None): - # type: (Data2D, OptArray, OptArray, str, bool, OptLimits) -> None +def _plot_result2D(data, theory, resid, view, use_data, limits=None, label='theory', backend='matplotlib'): + # type: (Data2D, OptArray, OptArray, str, bool, OptLimits, str, str) -> None """ Plot the data and residuals for 2D data. """ - import matplotlib.pyplot as plt # type: ignore if view is None: view = 'log' + use_data = use_data and data.data is not None use_theory = theory is not None use_resid = resid is not None num_plots = use_data + use_theory + use_resid - # Put theory and data on a common colormap scale - vmin, vmax = np.inf, -np.inf - target = None # type: OptArray - if use_data: - target = data.data[~data.mask] - datamin = target[target > 0].min() if view == 'log' else target.min() - datamax = target.max() - vmin = min(vmin, datamin) - vmax = max(vmax, datamax) - if use_theory: - theorymin = theory[theory > 0].min() if view == 'log' else theory.min() - theorymax = theory.max() - vmin = min(vmin, theorymin) - vmax = max(vmax, theorymax) + # Note: leaving data masked since it may already be gridded. - # Override data limits from the caller - if limits is not None: - vmin, vmax = limits + # Put theory and data on a common scale. Use the range on the data to set + # the map, with 1/4 of the top of the error bars used as the basis of the + # log scale colourmap to avoid spurious near-zero values after background + # subtraction. + if view == "q4": + scale = 1e8*(data.qx_data**2+data.qy_data**2)**2 + else: + scale = 1 + if use_data and limits is None: + # TODO: do we want log(q^4 Iq) = 4 log q + log Iq + if view == 'log': + upper = (abs(data.data) + data.err_data)[~data.mask] + vmax = np.log10(upper.max()) + vmin = np.log10(upper[upper > 0].min()/4) + elif view == 'q4': + vdata = (data.data*scale)[~data.mask] + vmin, vmax = vdata.min(), vdata.max() + else: + vdata = data.data[~data.mask] + vmin, vmax = vdata.min(), vdata.max() + + # Allow 20% above the data to compare theory + # For log data and q^4 data this is applying after the transform + window = 0.2 if use_theory else 1 + vmin, vmax = vmin - (vmax-vmin)*(1+window), vmax + (vmax-vmin)*(1+window) + if limits is None: + limits = vmin, vmax + + if use_theory and limits is None: + if view == 'log': + vtheory = np.log10(theory) + elif view == 'q4': + vtheory = scale[~data.mask]*theory + else: + vtheory = theory + + if limits is None: + limits = vtheory.min(), vtheory.max() + + # Use the full data limits for the residuals data + rlimits = (resid.min(), resid.max()) if use_resid else None + + # _plot_2d_signal knows that the theory is only computed for the unmasked + # data points. When using it to plot data, need to turn the data into a signal + # by applying the mask. We need to keep using the mask so that data that is + # stored as a 2D grid stays as a 2D grid, otherwise we need to apply a complicated + # histogramming and interpolation algorithm to rebuild a grid. + active_data = data.data[~data.mask] + + zlabel = 'log I(q)' if view == 'log' else '10⁸q⁴I(q)' if view == 'q4' else 'I(q)' + theory_label = label + data_label = f"{zlabel} data" + resid_label = '(I(q) - y)/Δy' + + if backend == "plotly": + import plotly.graph_objects as go + from plotly.subplots import make_subplots + + def add_trace(trace, row, col, label): + colorbar = dict( + title=dict(text=label, side='right'), + yanchor="bottom", + xanchor="left", + ) + if num_plots == 1: + trace.update(colorbar=colorbar) + fig.add_trace(trace) + else: + trace.update(colorbar=dict(x=col/2-0.05, y=1-(row/2-0.05), len=0.45, **colorbar)) + fig.add_trace(trace, row=row, col=col) + fig.update_xaxes(row=row, col=col, scaleanchor=f"y{row}", scaleratio=1) + fig.update_yaxes(row=row, col=col, scaleanchor=f"x{col}", scaleratio=1) - # Plot data - if use_data: - if num_plots > 1: - plt.subplot(1, num_plots, 1) - _plot_2d_signal(data, target, view=view, vmin=vmin, vmax=vmax) - plt.title('data') - h = plt.colorbar() - h.set_label('$I(q)$') - - # plot theory - if use_theory: - if num_plots > 1: - plt.subplot(1, num_plots, use_data+1) - _plot_2d_signal(data, theory, view=view, vmin=vmin, vmax=vmax) - plt.title('theory') - h = plt.colorbar() - h.set_label(r'$\log_{10}I(q)$' if view == 'log' - else r'$q^4 I(q)$' if view == 'q4' - else '$I(q)$') - - # plot resid - if use_resid: - if num_plots > 1: - plt.subplot(1, num_plots, use_data+use_theory+1) - _plot_2d_signal(data, resid, view='linear') - plt.title('residuals') - h = plt.colorbar() - h.set_label(r'$\Delta I(q)$') + fig = make_subplots(rows=2, cols=2) if num_plots > 1 else go.Figure() + if use_data: + trace = _plot_2d_signal(data, active_data, view=view, limits=limits, backend=backend) + add_trace(trace, 1, 1, data_label) + + if use_theory: + trace = _plot_2d_signal(data, theory, view=view, limits=limits, backend=backend) + add_trace(trace, 1, 2, theory_label) + + if use_resid: + trace = _plot_2d_signal(data, resid, view='linear', limits=rlimits, backend=backend) + add_trace(trace, 2, 2, resid_label) + + #from pprint import pprint; pprint(fig) + return fig + + else: # backend == "matplotlib" + import matplotlib.pyplot as plt # type: ignore + def maybe_hide_labels(row, col): + if num_plots == 1: + return + if row == 1 and col == 2: + plt.gca().set_xticks([]) + plt.xlabel('') + plt.gca().set_yticks([]) + plt.ylabel('') + + # plot theory + if use_theory: + if num_plots > 1: + plt.subplot(2, 2, 2) + _plot_2d_signal(data, theory, label=theory_label, view=view, limits=limits, backend=backend) + maybe_hide_labels(1, 2) + + # Plot data + if use_data: + if num_plots > 1: + plt.subplot(2, 2, 1) + _plot_2d_signal(data, active_data, label=data_label, view=view, limits=limits, backend=backend) + maybe_hide_labels(1, 1) + + # plot resid + if use_resid: + if num_plots > 1: + plt.subplot(2, 2, 4) + _plot_2d_signal(data, resid, label=resid_label, view='linear', limits=rlimits, backend=backend) + maybe_hide_labels(2, 2) @protect -def _plot_2d_signal(data, signal, vmin=None, vmax=None, view=None): +def _plot_2d_signal(data, signal, limits=None, view=None, label=None, backend='matplotlib'): # type: (Data2D, np.ndarray, Optional[float], Optional[float], str) -> tuple[float, float] """ Plot the target value for the data. This could be the data itself, @@ -712,57 +1073,50 @@ def _plot_2d_signal(data, signal, vmin=None, vmax=None, view=None): *scale* can be 'log' for log scale data, or 'linear'. """ - import matplotlib.pyplot as plt # type: ignore - from numpy.ma import masked_array # type: ignore + assert view is not None - if view is None: - view = 'log' + # Using signal evaluated at data.data[~data.mask], create an image on the regular + # grid defined uniform vectors x, y + # TODO: Divide range by 10 to convert from angstroms to nanometers + xbins, ybins, masked_z = _build_image(data, signal) - image = np.zeros_like(data.qx_data) - image[~data.mask] = signal - valid = np.isfinite(image) & ~data.mask if view == 'log': - valid &= image > 0 - if vmin is None: - vmin = image[valid].min() - if vmax is None: - vmax = image[valid].max() - image[valid] = np.log10(image[valid]) + masked_z[masked_z <= 0] = ma.masked + masked_z = np.ma.log10(masked_z) elif view == 'q4': - image[valid] *= (data.qx_data[valid]**2+data.qy_data[valid]**2)**2 - if vmin is None: - vmin = image[valid].min() - if vmax is None: - vmax = image[valid].max() - else: - if vmin is None: - vmin = image[valid].min() - if vmax is None: - vmax = image[valid].max() - - image[~valid] = 0 - #plottable = Iq - plottable = masked_array(image, ~valid) - # Divide range by 10 to convert from angstroms to nanometers - xmin, xmax = min(data.qx_data), max(data.qx_data) - ymin, ymax = min(data.qy_data), max(data.qy_data) - if view == 'log': - vmin_scaled, vmax_scaled = np.log10(vmin), np.log10(vmax) - else: - vmin_scaled, vmax_scaled = vmin, vmax - #nx, ny = len(data.x_bins), len(data.y_bins) - _, _, image = _build_matrix(data, plottable) - plt.imshow(image, - interpolation='nearest', aspect=1, origin='lower', - extent=[xmin, xmax, ymin, ymax], - vmin=vmin_scaled, vmax=vmax_scaled) - plt.xlabel("$q_x$/A$^{-1}$") - plt.ylabel("$q_y$/A$^{-1}$") - return vmin, vmax + q4 = (ybins[:, None]**2 + xbins[None, :]**2)**2 + masked_z *= 1e8*q4 + + if backend == 'plotly': + import plotly.graph_objects as go + + xc = (xbins[1:] + xbins[:-1])/2 + yc = (ybins[1:] + ybins[:-1])/2 + trace = go.Heatmap( + z=masked_z.filled(np.nan), y=yc, x=xc, + zmin=limits[0], zmax=limits[1], + colorscale='Viridis', showscale=True, + ) + return trace + + else: # backend == 'matplotlib': + import matplotlib.pyplot as plt # type: ignore + + xmin, xmax = xbins[0], xbins[-1] + ymin, ymax = ybins[0], ybins[-1] + + plt.imshow(masked_z, + interpolation='nearest', aspect='equal', origin='lower', + extent=[xmin, xmax, ymin, ymax], + vmin=limits[0], vmax=limits[1]) + plt.xlabel("q_x/Å") + plt.ylabel("q_y/Å") + h = plt.colorbar(location='right') + h.set_label(label) # === The following is modified from sas.sasgui.plottools.PlotPanel -def _build_matrix(self, plottable): +def _build_image(data, signal, radius=1): """ Build a matrix for 2d plot from a vector Returns a matrix (image) with ~ square binning @@ -770,69 +1124,69 @@ def _build_matrix(self, plottable): self.data, self.qx_data, and self.qy_data where each one corresponds to z, x, or y axis values + Holes in the image where there were no data points are filled + using interpolation. Set *radius* to the maximum number + of fill iterations. + + Returns qx, qy, masked image """ + + # Check if data is already gridded. If so, return the gridding. # No qx or qy given in a vector format - if (self.qx_data is None or self.qy_data is None - or self.qx_data.ndim != 1 or self.qy_data.ndim != 1): - return self.x_bins, self.y_bins, plottable + qx, qy, mask = data.qx_data, data.qy_data, data.mask + if (qx is None or qy is None or qx.ndim != 1 or qy.ndim != 1): + # build a masked array to return + mask = mask if mask.ndim == 2 else mask.reshape((len(qy), len(qx))) + image = np.zeros(mask.shape) + image[~mask] = signal + return data.x_bins, data.y_bins, ma.masked_array(image, mask) - # maximum # of loops to fillup_pixels - # otherwise, loop could never stop depending on data - max_loop = 1 # get the x and y_bin arrays. - x_bins, y_bins = _get_bins(self) + x_bins, y_bins = _get_bins(qx, qy) # set zero to None + # TODO: should masking apply before or after x, y step size estimate? + # TODO: don't use number of points to estimate x, y step size + # Apply masking to non-gridded data (signal is already masked) + qx, qy = qx[~mask], qy[~mask] + #Note: Can not use scipy.interpolate.Rbf: # 'cause too many data points (>10000)<=JHC. - # 1d array to use for weighting the data point averaging - #when they fall into a same bin. - weights_data = np.ones([self.data.size]) # get histogram of ones w/len(data); this will provide #the weights of data on each bins - weights, _, _ = np.histogram2d( - x=self.qy_data, y=self.qx_data, bins=[y_bins, x_bins], - weights=weights_data) + # TODO: check that x=qy, y=qx is correct. + weights, _, _ = np.histogram2d(x=qy, y=qx, bins=[y_bins, x_bins]) # get histogram of data, all points into a bin in a way of summing - image, _, _ = np.histogram2d( - x=self.qy_data, y=self.qx_data, bins=[y_bins, x_bins], - weights=plottable) + image, _, _ = np.histogram2d(x=qy, y=qx, bins=[y_bins, x_bins], weights=signal) # Now, normalize the image by weights only for weights>1: # If weight == 1, there is only one data point in the bin so # that no normalization is required. image[weights > 1] = image[weights > 1] / weights[weights > 1] # Set image bins w/o a data point (weight==0) as None (was set to zero # by histogram2d.) - image[weights == 0] = None + image[weights == 0] = np.nan # Fill empty bins with 8 nearest neighbors only when at least - #one None point exists - loop = 0 - - # do while loop until all vacant bins are filled up up - #to loop = max_loop - while (weights == 0).any(): - if loop >= max_loop: # this protects never-ending loop + # one None point exists. Loop until all vacant bins are filled. + for _ in range(radius): + if (weights != 0).all(): break image = _fillup_pixels(image=image, weights=weights) - loop += 1 - return x_bins, y_bins, image + # Data is interpolated to fill the empty pixels + return x_bins, y_bins, ma.masked_array(image, weights == 0) -def _get_bins(self): +def _get_bins(qx, qy): """ get bins set x_bins and y_bins into self, 1d arrays of the index with ~ square binning - Requirement: need 1d array formats of - self.qx_data, and self.qy_data - where each one corresponds to x, or y axis values + Requirement: need 1d array formats for qx, qy + where each one corresponds to x, or y axis values """ # find max and min values of qx and qy - xmax = self.qx_data.max() - xmin = self.qx_data.min() - ymax = self.qy_data.max() - ymin = self.qy_data.min() + xmin, xmax = qx.min(), qx.max() + ymin, ymax = qy.min(), qy.max() # calculate the range of qx and qy: this way, it is a little # more independent @@ -840,8 +1194,8 @@ def _get_bins(self): y_size = ymax - ymin # estimate the # of pixels on each axes - npix_y = int(np.floor(np.sqrt(len(self.qy_data)))) - npix_x = int(np.floor(len(self.qy_data) / npix_y)) + npix_y = int(np.floor(np.sqrt(len(qx)))) + npix_x = int(np.floor(len(qy) / npix_y)) # bin size: x- & y-directions xstep = x_size / (npix_x - 1) From 8daeb6e3a128746f577d1e685c71952e5412794f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci-lite[bot]" <117423508+pre-commit-ci-lite[bot]@users.noreply.github.com> Date: Thu, 18 Sep 2025 19:43:34 +0000 Subject: [PATCH 08/16] [pre-commit.ci lite] apply automatic fixes for ruff linting errors --- sasmodels/TwoYukawa/CalTYSk.py | 4 +- sasmodels/TwoYukawa/CalcRealRoot.py | 1 + sasmodels/TwoYukawa/Ecoefficient.py | 14 +- sasmodels/TwoYukawa/Epoly.py | 277 ++++++++++++++-------------- sasmodels/TwoYukawa/TFourier.py | 3 +- sasmodels/TwoYukawa/TInvFourier.py | 3 +- sasmodels/models/two_yukawa.py | 6 +- 7 files changed, 156 insertions(+), 152 deletions(-) diff --git a/sasmodels/TwoYukawa/CalTYSk.py b/sasmodels/TwoYukawa/CalTYSk.py index 40f50e0c..0714c7b2 100644 --- a/sasmodels/TwoYukawa/CalTYSk.py +++ b/sasmodels/TwoYukawa/CalTYSk.py @@ -1,8 +1,8 @@ import numpy as np -from numpy import pi, mean +from numpy import mean, pi -from .Ecoefficient import TYCoeff from .CalcRealRoot import CalRealRoot +from .Ecoefficient import TYCoeff from .TInvFourier import TInvFourier # Supplied Q vector must go out to at least this value to calculate g(r). diff --git a/sasmodels/TwoYukawa/CalcRealRoot.py b/sasmodels/TwoYukawa/CalcRealRoot.py index 9db225ed..7e7287a5 100644 --- a/sasmodels/TwoYukawa/CalcRealRoot.py +++ b/sasmodels/TwoYukawa/CalcRealRoot.py @@ -3,6 +3,7 @@ from .Ecoefficient import TYCoeff from .Epoly import make_poly + def CalRealRoot(coeff: TYCoeff): Ecoefficient = coeff.Ecoefficient diff --git a/sasmodels/TwoYukawa/Ecoefficient.py b/sasmodels/TwoYukawa/Ecoefficient.py index 3182c01a..9d58f60c 100644 --- a/sasmodels/TwoYukawa/Ecoefficient.py +++ b/sasmodels/TwoYukawa/Ecoefficient.py @@ -1,7 +1,7 @@ from typing import Callable, Tuple import numpy as np -from numpy import exp, pi, cos, sin, cosh +from numpy import cos, cosh, exp, pi, sin from numpy.typing import NDArray # CalCoeff.m @@ -194,17 +194,17 @@ def ABC12(d1, d2): Ccd2_22*Cdd1_12*d1*d2**2 - Ccd1_21*Cdd2_22*d1*d2**2 - Ccd2_22*d2*k1 + Ccd1_21*d1*k2)))/ - ((d1*(( + (d1*( (-Ccd1_21)*Ccd2_12*d2 + - Ccd1_11*Ccd2_22*d2))))) + Ccd1_11*Ccd2_22*d2))) C2 = ((-((Ccd2_12*d2* (((-Cd1_1)*d1 - Cdd1_11*d1**2 - Cdd1_12*d1*d2 + k1)) - Ccd1_11*d1* (((-Cd2_2)*d2 - Cdd2_12*d1*d2 - Cdd2_22*d2**2 + k2))))) / - (((-Ccd1_21)*Ccd2_12*d1*d2 + - Ccd1_11*Ccd2_22*d1*d2))) + ((-Ccd1_21)*Ccd2_12*d1*d2 + + Ccd1_11*Ccd2_22*d1*d2)) return A, B, C1, C2 self.ABC12 = ABC12 @@ -370,10 +370,10 @@ def tau_d21(s): E1d02 = 12*c1F01*phi*sigma_d01(z1) - 12*c1F01*exp(-z1)*phi*tau_d01(z1) - E1d11 = (((12*c1F10*phi*sigma_d01(z1) + \ + E1d11 = (12*c1F10*phi*sigma_d01(z1) + \ 12*c1F01*phi*sigma_d10(z1) - \ 12*c1F10*exp(-z1)*phi*tau_d01(z1) - \ - 12*c1F01*exp(-z1)*phi*tau_d10(z1)))) + 12*c1F01*exp(-z1)*phi*tau_d10(z1)) E1d12 = (-c1F01 + 12*c1F11*phi*sigma_d01(z1) + 12*c1F01*phi*sigma_d11(z1) - 12*c1F11*exp(-z1)*phi*tau_d01(z1) - diff --git a/sasmodels/TwoYukawa/Epoly.py b/sasmodels/TwoYukawa/Epoly.py index 274bbe00..f5260f91 100644 --- a/sasmodels/TwoYukawa/Epoly.py +++ b/sasmodels/TwoYukawa/Epoly.py @@ -1,6 +1,7 @@ import numpy as np from numpy.typing import NDArray + #from numba import njit #@njit(cache=True) def make_poly(Ecoefficient:NDArray) -> NDArray: @@ -432,8 +433,8 @@ def make_poly(Ecoefficient:NDArray) -> NDArray: gE1d02*gE1d11*gE2d23*gE2d33**6) polyd2_19 = (7*gE1d02**2*gE2d31*gE2d33**6 + - 3*gE1d13**2*gE2d31*gE2d33**4*((5*gE2d13*gE2d31 + 2*gE2d11*gE2d33)) + - gE2d33*(((-9)*gE1d12*gE1d42*gE2d13*gE2d23*gE2d24**2*gE2d31**2 - + 3*gE1d13**2*gE2d31*gE2d33**4*(5*gE2d13*gE2d31 + 2*gE2d11*gE2d33) + + gE2d33*((-9)*gE1d12*gE1d42*gE2d13*gE2d23*gE2d24**2*gE2d31**2 - 3*gE1d11*gE1d42*gE2d13*gE2d24**3*gE2d31**2 + 3*gE1d42**2*gE2d13**4*gE2d31*gE2d33 - 3*gE1d32*gE1d42*gE2d13**3*gE2d23*gE2d31*gE2d33 - @@ -498,38 +499,38 @@ def make_poly(Ecoefficient:NDArray) -> NDArray: 2*gE1d11*gE1d31*gE2d13**2*gE2d33**4 + 6*gE1d12**2*gE2d13*gE2d31*gE2d33**4 + gE1d12**2*gE2d11*gE2d33**5 + gE1d11**2*gE2d13*gE2d33**5 + - gE1d22**2*gE2d13*gE2d33**3*((5*gE2d13*gE2d31 + - 2*gE2d11*gE2d33)) + - 3*gE1d33**2*gE2d13*gE2d33*((2*gE2d13**2*gE2d31**2 + - 4*gE2d11*gE2d13*gE2d31*gE2d33 + gE2d11**2*gE2d33**2)) - - gE1d22*((gE2d33**2*((gE1d32*gE2d13*((4*gE2d13*gE2d23*gE2d31 + - gE2d13*gE2d21*gE2d33 + 2*gE2d11*gE2d23*gE2d33)) + - gE1d31*gE2d13*((4*gE2d13*gE2d24*gE2d31 + - gE2d13*gE2d22*gE2d33 + 2*gE2d11*gE2d24*gE2d33)) + - gE2d33*((5*gE1d12*gE2d13*gE2d23*gE2d31 + + gE1d22**2*gE2d13*gE2d33**3*(5*gE2d13*gE2d31 + + 2*gE2d11*gE2d33) + + 3*gE1d33**2*gE2d13*gE2d33*(2*gE2d13**2*gE2d31**2 + + 4*gE2d11*gE2d13*gE2d31*gE2d33 + gE2d11**2*gE2d33**2) - + gE1d22*(gE2d33**2*(gE1d32*gE2d13*(4*gE2d13*gE2d23*gE2d31 + + gE2d13*gE2d21*gE2d33 + 2*gE2d11*gE2d23*gE2d33) + + gE1d31*gE2d13*(4*gE2d13*gE2d24*gE2d31 + + gE2d13*gE2d22*gE2d33 + 2*gE2d11*gE2d24*gE2d33) + + gE2d33*(5*gE1d12*gE2d13*gE2d23*gE2d31 + 5*gE1d11*gE2d13*gE2d24*gE2d31 + gE1d12*gE2d13*gE2d21*gE2d33 + gE1d11*gE2d13*gE2d22*gE2d33 + gE1d12*gE2d11*gE2d23*gE2d33 + - gE1d11*gE2d11*gE2d24*gE2d33)))) + - gE1d42*(((-gE2d11**2)*gE2d24**2*gE2d33**2 + + gE1d11*gE2d11*gE2d24*gE2d33)) + + gE1d42*((-gE2d11**2)*gE2d24**2*gE2d33**2 + 8*gE2d13**3*gE2d31*gE2d33**2 - - 2*gE2d11*gE2d13*gE2d33*((3*gE2d24**2*gE2d31 + - gE2d23**2*gE2d33 + 2*gE2d22*gE2d24*gE2d33)) - - gE2d13**2*((3*gE2d24**2*gE2d31**2 + - 2*gE2d24*gE2d33*((3*gE2d22*gE2d31 + - gE2d20*gE2d33)) + - gE2d33*((3*gE2d23**2*gE2d31 + + 2*gE2d11*gE2d13*gE2d33*(3*gE2d24**2*gE2d31 + + gE2d23**2*gE2d33 + 2*gE2d22*gE2d24*gE2d33) - + gE2d13**2*(3*gE2d24**2*gE2d31**2 + + 2*gE2d24*gE2d33*(3*gE2d22*gE2d31 + + gE2d20*gE2d33) + + gE2d33*(3*gE2d23**2*gE2d31 + 2*gE2d21*gE2d23*gE2d33 + - gE2d33*((gE2d22**2 - - 6*gE2d11*gE2d33)))))))))) + - gE1d33*(((-gE1d42)*gE2d13*((3*gE2d11**2*gE2d24*gE2d33**2 + - 3*gE2d11*gE2d13*gE2d33*((3*gE2d24*gE2d31 + - gE2d22*gE2d33)) + - gE2d13**2*((3*gE2d24*gE2d31**2 + - gE2d33*((3*gE2d22*gE2d31 + - gE2d20*gE2d33)))))) + - gE2d33*((6*gE1d11*gE2d13*gE2d24**2*gE2d31**2 + + gE2d33*(gE2d22**2 - + 6*gE2d11*gE2d33))))) + + gE1d33*((-gE1d42)*gE2d13*(3*gE2d11**2*gE2d24*gE2d33**2 + + 3*gE2d11*gE2d13*gE2d33*(3*gE2d24*gE2d31 + + gE2d22*gE2d33) + + gE2d13**2*(3*gE2d24*gE2d31**2 + + gE2d33*(3*gE2d22*gE2d31 + + gE2d20*gE2d33))) + + gE2d33*(6*gE1d11*gE2d13*gE2d24**2*gE2d31**2 + 8*gE1d31*gE2d13**3*gE2d31*gE2d33 + 4*gE1d11*gE2d13*gE2d23**2*gE2d31*gE2d33 + 8*gE1d11*gE2d13*gE2d22*gE2d24*gE2d31*gE2d33 + @@ -542,148 +543,148 @@ def make_poly(Ecoefficient:NDArray) -> NDArray: 2*gE1d11*gE2d11*gE2d22*gE2d24*gE2d33**2 - 10*gE1d11*gE2d13**2*gE2d31*gE2d33**2 - 4*gE1d11*gE2d11*gE2d13*gE2d33**3 - - gE1d22*((gE2d11**2*gE2d24*gE2d33**2 + - 2*gE2d11*gE2d13*gE2d33*((4*gE2d24*gE2d31 + - gE2d22*gE2d33)) + - gE2d13**2*((6*gE2d24*gE2d31**2 + + gE1d22*(gE2d11**2*gE2d24*gE2d33**2 + + 2*gE2d11*gE2d13*gE2d33*(4*gE2d24*gE2d31 + + gE2d22*gE2d33) + + gE2d13**2*(6*gE2d24*gE2d31**2 + 4*gE2d22*gE2d31*gE2d33 + - gE2d20*gE2d33**2)))) + - 2*gE1d12*((gE2d11*gE2d33*((4*gE2d23*gE2d24*gE2d31 + + gE2d20*gE2d33**2)) + + 2*gE1d12*(gE2d11*gE2d33*(4*gE2d23*gE2d24*gE2d31 + gE2d22*gE2d23*gE2d33 + - gE2d21*gE2d24*gE2d33)) + - gE2d13*((gE2d21*gE2d33*((4*gE2d24*gE2d31 + - gE2d22*gE2d33)) + - gE2d23*((6*gE2d24*gE2d31**2 + + gE2d21*gE2d24*gE2d33) + + gE2d13*(gE2d21*gE2d33*(4*gE2d24*gE2d31 + + gE2d22*gE2d33) + + gE2d23*(6*gE2d24*gE2d31**2 + 4*gE2d22*gE2d31*gE2d33 + - gE2d20*gE2d33**2)))))))))))) + - gE1d13*((gE1d42*((3*gE2d13**2*gE2d33**2*((6*gE2d24*gE2d31**2 + - gE2d33*((4*gE2d22*gE2d31 + gE2d20*gE2d33)))) + - 3*gE2d11*gE2d33*(((-gE2d24**3)*gE2d31**2 - + gE2d20*gE2d33**2)))))) + + gE1d13*(gE1d42*(3*gE2d13**2*gE2d33**2*(6*gE2d24*gE2d31**2 + + gE2d33*(4*gE2d22*gE2d31 + gE2d20*gE2d33)) + + 3*gE2d11*gE2d33*((-gE2d24**3)*gE2d31**2 - gE2d22*gE2d23**2*gE2d33**2 - - gE2d24**2*gE2d33*((3*gE2d22*gE2d31 + gE2d20*gE2d33)) - - gE2d24*gE2d33*((3*gE2d23**2*gE2d31 + + gE2d24**2*gE2d33*(3*gE2d22*gE2d31 + gE2d20*gE2d33) - + gE2d24*gE2d33*(3*gE2d23**2*gE2d31 + 2*gE2d21*gE2d23*gE2d33 + - gE2d33*((gE2d22**2 - gE2d11*gE2d33)))))) - - gE2d13*((gE2d24**3*gE2d31**3 + - 9*gE2d24**2*gE2d31*gE2d33*((gE2d22*gE2d31 + - gE2d20*gE2d33)) + - 3*gE2d24*gE2d33*((3*gE2d23**2*gE2d31**2 + + gE2d33*(gE2d22**2 - gE2d11*gE2d33))) - + gE2d13*(gE2d24**3*gE2d31**3 + + 9*gE2d24**2*gE2d31*gE2d33*(gE2d22*gE2d31 + + gE2d20*gE2d33) + + 3*gE2d24*gE2d33*(3*gE2d23**2*gE2d31**2 + 6*gE2d21*gE2d23*gE2d31*gE2d33 + - gE2d33*((3*gE2d22**2*gE2d31 + gE2d21**2*gE2d33 + + gE2d33*(3*gE2d22**2*gE2d31 + gE2d21**2*gE2d33 + 2*gE2d20*gE2d22*gE2d33 - - 8*gE2d11*gE2d31*gE2d33)))) + - gE2d33**2*((gE2d22**3*gE2d33 + + 8*gE2d11*gE2d31*gE2d33)) + + gE2d33**2*(gE2d22**3*gE2d33 + 3*gE2d20*gE2d23**2*gE2d33 + - gE2d22*((9*gE2d23**2*gE2d31 + + gE2d22*(9*gE2d23**2*gE2d31 + 6*gE2d21*gE2d23*gE2d33 - - 6*gE2d11*gE2d33**2)))))))) + - gE2d33*((gE1d33*(((-20)*gE2d13**2*gE2d31**2*gE2d33**2 + - gE2d11*gE2d33*((6*gE2d24**2*gE2d31**2 + - 2*gE2d24*gE2d33*((4*gE2d22*gE2d31 + - gE2d20*gE2d33)) + - gE2d33*((4*gE2d23**2*gE2d31 + + 6*gE2d11*gE2d33**2)))) + + gE2d33*(gE1d33*((-20)*gE2d13**2*gE2d31**2*gE2d33**2 + + gE2d11*gE2d33*(6*gE2d24**2*gE2d31**2 + + 2*gE2d24*gE2d33*(4*gE2d22*gE2d31 + + gE2d20*gE2d33) + + gE2d33*(4*gE2d23**2*gE2d31 + 2*gE2d21*gE2d23*gE2d33 + - gE2d33*((gE2d22**2 - - 2*gE2d11*gE2d33)))))) + - gE2d13*((4*gE2d24**2*gE2d31**3 + - 4*gE2d24*gE2d31*gE2d33*((3*gE2d22*gE2d31 + - 2*gE2d20*gE2d33)) + - gE2d33*((6*gE2d23**2*gE2d31**2 + + gE2d33*(gE2d22**2 - + 2*gE2d11*gE2d33))) + + gE2d13*(4*gE2d24**2*gE2d31**3 + + 4*gE2d24*gE2d31*gE2d33*(3*gE2d22*gE2d31 + + 2*gE2d20*gE2d33) + + gE2d33*(6*gE2d23**2*gE2d31**2 + 8*gE2d21*gE2d23*gE2d31*gE2d33 + - gE2d33*((4*gE2d22**2*gE2d31 + + gE2d33*(4*gE2d22**2*gE2d31 + gE2d21**2*gE2d33 + 2*gE2d20*gE2d22*gE2d33 - - 20*gE2d11*gE2d31*gE2d33)))))))) + - gE2d33*((2*gE1d32*((gE2d11*gE2d33*((4*gE2d23*gE2d24* -gE2d31 + gE2d22*gE2d23*gE2d33 + gE2d21*gE2d24*gE2d33)) + - gE2d13*((gE2d21*gE2d33*((4*gE2d24*gE2d31 + - gE2d22*gE2d33)) + - gE2d23*((6*gE2d24*gE2d31**2 + + 20*gE2d11*gE2d31*gE2d33)))) + + gE2d33*(2*gE1d32*(gE2d11*gE2d33*(4*gE2d23*gE2d24* +gE2d31 + gE2d22*gE2d23*gE2d33 + gE2d21*gE2d24*gE2d33) + + gE2d13*(gE2d21*gE2d33*(4*gE2d24*gE2d31 + + gE2d22*gE2d33) + + gE2d23*(6*gE2d24*gE2d31**2 + 4*gE2d22*gE2d31*gE2d33 + - gE2d20*gE2d33**2)))))) + - gE2d33*(((-gE1d22)*((gE2d11*gE2d33*((5*gE2d24* -gE2d31 + gE2d22*gE2d33)) + gE2d13*((10*gE2d24*gE2d31**2 + + gE2d20*gE2d33**2))) + + gE2d33*((-gE1d22)*(gE2d11*gE2d33*(5*gE2d24* +gE2d31 + gE2d22*gE2d33) + gE2d13*(10*gE2d24*gE2d31**2 + 5*gE2d22*gE2d31*gE2d33 + - gE2d20*gE2d33**2)))) - - gE2d33*(((-2)*gE1d11*gE2d33*((6*gE2d13* -gE2d31 + gE2d11*gE2d33)) + gE1d02*((15*gE2d24*gE2d31**2 + + gE2d20*gE2d33**2)) - + gE2d33*((-2)*gE1d11*gE2d33*(6*gE2d13* +gE2d31 + gE2d11*gE2d33) + gE1d02*(15*gE2d24*gE2d31**2 + 6*gE2d22*gE2d31*gE2d33 + - gE2d20*gE2d33**2)))))) + - gE1d31*(((-10)*gE2d13**2*gE2d31*gE2d33**2 + - gE2d11*gE2d33*((4*gE2d24**2*gE2d31 + + gE2d20*gE2d33**2))) + + gE1d31*((-10)*gE2d13**2*gE2d31*gE2d33**2 + + gE2d11*gE2d33*(4*gE2d24**2*gE2d31 + gE2d23**2*gE2d33 + - 2*gE2d22*gE2d24*gE2d33)) + - gE2d13*((6*gE2d24**2*gE2d31**2 + - 2*gE2d24*gE2d33*((4*gE2d22*gE2d31 + - gE2d20*gE2d33)) + - gE2d33*((4*gE2d23**2*gE2d31 + + 2*gE2d22*gE2d24*gE2d33) + + gE2d13*(6*gE2d24**2*gE2d31**2 + + 2*gE2d24*gE2d33*(4*gE2d22*gE2d31 + + gE2d20*gE2d33) + + gE2d33*(4*gE2d23**2*gE2d31 + 2*gE2d21*gE2d23*gE2d33 + - gE2d33*((gE2d22**2 - - 4*gE2d11*gE2d33)))))))))))))) -+ gE1d02*((gE1d42*((gE2d24**4*gE2d31**3 + - 12*gE2d24**3*gE2d31*gE2d33*((gE2d22*gE2d31 + - gE2d20*gE2d33)) + - gE2d33**2*((3*gE2d23**4*gE2d31 + 4*gE2d21*gE2d23**3*gE2d33 - + gE2d33*(gE2d22**2 - + 4*gE2d11*gE2d33))))))) ++ gE1d02*(gE1d42*(gE2d24**4*gE2d31**3 + + 12*gE2d24**3*gE2d31*gE2d33*(gE2d22*gE2d31 + + gE2d20*gE2d33) + + gE2d33**2*(3*gE2d23**4*gE2d31 + 4*gE2d21*gE2d23**3*gE2d33 - 8*gE2d13*gE2d21*gE2d23*gE2d33**2 + - 2*gE2d13*gE2d33**2*(((-2)*gE2d22**2 + - 5*gE2d13*gE2d31 + 2*gE2d11*gE2d33)) - - 2*gE2d23**2*gE2d33*(((-3)*gE2d22**2 + - 8*gE2d13*gE2d31 + 2*gE2d11*gE2d33)))) + - 2*gE2d24**2*gE2d33*((9*gE2d23**2*gE2d31**2 + + 2*gE2d13*gE2d33**2*((-2)*gE2d22**2 + + 5*gE2d13*gE2d31 + 2*gE2d11*gE2d33) - + 2*gE2d23**2*gE2d33*((-3)*gE2d22**2 + + 8*gE2d13*gE2d31 + 2*gE2d11*gE2d33)) + + 2*gE2d24**2*gE2d33*(9*gE2d23**2*gE2d31**2 + 18*gE2d21*gE2d23*gE2d31*gE2d33 + - gE2d33*((9*gE2d22**2*gE2d31 - 12*gE2d13*gE2d31**2 + + gE2d33*(9*gE2d22**2*gE2d31 - 12*gE2d13*gE2d31**2 + 3*gE2d21**2*gE2d33 + 6*gE2d20*gE2d22*gE2d33 - - 8*gE2d11*gE2d31*gE2d33)))) + - 4*gE2d24*gE2d33**2*((gE2d22**3*gE2d33 + - gE2d20*gE2d33*((3*gE2d23**2 - 2*gE2d13*gE2d33)) + - gE2d22*((9*gE2d23**2*gE2d31 + 6*gE2d21*gE2d23*gE2d33 - - 2*gE2d33*((4*gE2d13*gE2d31 + - gE2d11*gE2d33)))))))) - - gE2d33*((gE1d33*((4*gE2d24**3*gE2d31**3 + - 6*gE2d24**2*gE2d31*gE2d33*((3*gE2d22*gE2d31 + - 2*gE2d20*gE2d33)) + - 3*gE2d24*gE2d33*((6*gE2d23**2*gE2d31**2 + + 8*gE2d11*gE2d31*gE2d33)) + + 4*gE2d24*gE2d33**2*(gE2d22**3*gE2d33 + + gE2d20*gE2d33*(3*gE2d23**2 - 2*gE2d13*gE2d33) + + gE2d22*(9*gE2d23**2*gE2d31 + 6*gE2d21*gE2d23*gE2d33 - + 2*gE2d33*(4*gE2d13*gE2d31 + + gE2d11*gE2d33)))) - + gE2d33*(gE1d33*(4*gE2d24**3*gE2d31**3 + + 6*gE2d24**2*gE2d31*gE2d33*(3*gE2d22*gE2d31 + + 2*gE2d20*gE2d33) + + 3*gE2d24*gE2d33*(6*gE2d23**2*gE2d31**2 + 8*gE2d21*gE2d23*gE2d31*gE2d33 + - gE2d33*((4*gE2d22**2*gE2d31 - + gE2d33*(4*gE2d22**2*gE2d31 - 10*gE2d13*gE2d31**2 + gE2d21**2*gE2d33 + 2*gE2d20*gE2d22*gE2d33 - - 5*gE2d11*gE2d31*gE2d33)))) + - gE2d33**2*((gE2d22**3*gE2d33 + - 3*gE2d20*gE2d33*((gE2d23**2 - gE2d13*gE2d33)) + - 3*gE2d22*((4*gE2d23**2*gE2d31 + + 5*gE2d11*gE2d31*gE2d33)) + + gE2d33**2*(gE2d22**3*gE2d33 + + 3*gE2d20*gE2d33*(gE2d23**2 - gE2d13*gE2d33) + + 3*gE2d22*(4*gE2d23**2*gE2d31 + 2*gE2d21*gE2d23*gE2d33 - - gE2d33*((5*gE2d13*gE2d31 + - gE2d11*gE2d33)))))))) + - gE2d33*((3*gE1d31*((2*gE2d24**3*gE2d31**2 + - gE2d22*gE2d33**2*((gE2d23**2 - gE2d13*gE2d33)) + - gE2d24**2*gE2d33*((4*gE2d22*gE2d31 + - gE2d20*gE2d33)) + - gE2d24*gE2d33*((4*gE2d23**2*gE2d31 + + gE2d33*(5*gE2d13*gE2d31 + + gE2d11*gE2d33)))) + + gE2d33*(3*gE1d31*(2*gE2d24**3*gE2d31**2 + + gE2d22*gE2d33**2*(gE2d23**2 - gE2d13*gE2d33) + + gE2d24**2*gE2d33*(4*gE2d22*gE2d31 + + gE2d20*gE2d33) + + gE2d24*gE2d33*(4*gE2d23**2*gE2d31 + 2*gE2d21*gE2d23*gE2d33 + - gE2d33*((gE2d22**2 - 5*gE2d13*gE2d31 - - gE2d11*gE2d33)))))) + - gE1d32*((4*gE2d23**3*gE2d31*gE2d33 + + gE2d33*(gE2d22**2 - 5*gE2d13*gE2d31 - + gE2d11*gE2d33))) + + gE1d32*(4*gE2d23**3*gE2d31*gE2d33 + 3*gE2d21*gE2d23**2*gE2d33**2 + - 3*gE2d21*gE2d33*((4*gE2d24**2*gE2d31 + + 3*gE2d21*gE2d33*(4*gE2d24**2*gE2d31 + 2*gE2d22*gE2d24*gE2d33 - - gE2d13*gE2d33**2)) + - 3*gE2d23*((6*gE2d24**2*gE2d31**2 + - gE2d33**2*((gE2d22**2 - 5*gE2d13*gE2d31 - - gE2d11*gE2d33)) + - 2*gE2d24*gE2d33*((4*gE2d22*gE2d31 + - gE2d20*gE2d33)))))) + - gE2d33*((gE2d33**2*((6*gE1d12*gE2d23*gE2d31 + + gE2d13*gE2d33**2) + + 3*gE2d23*(6*gE2d24**2*gE2d31**2 + + gE2d33**2*(gE2d22**2 - 5*gE2d13*gE2d31 - + gE2d11*gE2d33) + + 2*gE2d24*gE2d33*(4*gE2d22*gE2d31 + + gE2d20*gE2d33))) + + gE2d33*(gE2d33**2*(6*gE1d12*gE2d23*gE2d31 + 6*gE1d11*gE2d24*gE2d31 + gE1d12*gE2d21*gE2d33 + - gE1d11*gE2d22*gE2d33)) - - gE1d22*((10*gE2d24**2*gE2d31**2 + - 2*gE2d24*gE2d33*((5*gE2d22*gE2d31 + - gE2d20*gE2d33)) + - gE2d33*((5*gE2d23**2*gE2d31 + + gE1d11*gE2d22*gE2d33) - + gE1d22*(10*gE2d24**2*gE2d31**2 + + 2*gE2d24*gE2d33*(5*gE2d22*gE2d31 + + gE2d20*gE2d33) + + gE2d33*(5*gE2d23**2*gE2d31 + 2*gE2d21*gE2d23*gE2d33 + - gE2d33*((gE2d22**2 - + gE2d33*(gE2d22**2 - 12*gE2d13*gE2d31 - - 2*gE2d11*gE2d33))))))))))))))) + 2*gE2d11*gE2d33)))))))) polyd2_18 = ((-3)*gE1d13*gE1d42*gE2d13*gE2d23*gE2d24**2*gE2d31**3 - gE1d12*gE1d42*gE2d13*gE2d24**3*gE2d31**3 + diff --git a/sasmodels/TwoYukawa/TFourier.py b/sasmodels/TwoYukawa/TFourier.py index 7eda2947..05a935e6 100644 --- a/sasmodels/TwoYukawa/TFourier.py +++ b/sasmodels/TwoYukawa/TFourier.py @@ -1,6 +1,7 @@ -from numpy import exp, pi, arange, linspace, abs, ceil, log2, interp +from numpy import abs, arange, ceil, exp, interp, linspace, log2, pi from numpy.fft import fft + def TFourier(x, y, deltaX): """ Compute the Fourier transform of a function y(x) with sampling interval deltaX. diff --git a/sasmodels/TwoYukawa/TInvFourier.py b/sasmodels/TwoYukawa/TInvFourier.py index 21829935..20978e1c 100644 --- a/sasmodels/TwoYukawa/TInvFourier.py +++ b/sasmodels/TwoYukawa/TInvFourier.py @@ -1,6 +1,7 @@ -from numpy import exp, ceil, log2, pi, arange, interp +from numpy import arange, ceil, exp, interp, log2, pi from numpy.fft import fft + def TInvFourier(x, y, deltaX): """ Inverse Fourier transform implementation diff --git a/sasmodels/models/two_yukawa.py b/sasmodels/models/two_yukawa.py index f1f22c4d..00d110d9 100644 --- a/sasmodels/models/two_yukawa.py +++ b/sasmodels/models/two_yukawa.py @@ -94,7 +94,7 @@ from numpy import inf # TODO: pep8 says packages and modules should not use camel case -from sasmodels.TwoYukawa.CalTYSk import CalTYSk, K_MIN, Z_MIN, Z_MIN_DIFF +from sasmodels.TwoYukawa.CalTYSk import K_MIN, Z_MIN, Z_MIN_DIFF, CalTYSk # If you want a customized version of two_yukawa as a plugin (for example, # because you want to use the high precision polynomial root solver from mpmath) @@ -104,9 +104,9 @@ # https://github.com/SasView/sasmodels/tree/master/sasmodels/models/two_yukawa.py # https://github.com/SasView/sasmodels/tree/master/sasmodels/TwoYukawa if 0: + import importlib.util import sys from pathlib import Path - import importlib.util # Remove existing TwoYukawa from sys.modules to force a reload remove = [modname for modname in sys.modules if modname.startswith('TwoYukawa.') or modname == 'TwoYukawa'] @@ -121,7 +121,7 @@ sys.modules['TwoYukawa'] = module # Override sasmodels library symbols with the local symbols. - from TwoYukawa.CalTYSk import CalTYSk, K_MIN, Z_MIN, Z_MIN_DIFF + from TwoYukawa.CalTYSk import K_MIN, Z_MIN, Z_MIN_DIFF, CalTYSk name = "two_yukawa" title = "User model for two Yukawa structure factor (S(q))" From 80b023f74579781e87c5b725c166be780a1badb7 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 21:15:41 -0400 Subject: [PATCH 09/16] nicer layout for sasmodels.compare plots --- sasmodels/compare.py | 80 +++++++++++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 23 deletions(-) diff --git a/sasmodels/compare.py b/sasmodels/compare.py index 01cc22ac..a3c94000 100755 --- a/sasmodels/compare.py +++ b/sasmodels/compare.py @@ -854,6 +854,14 @@ def run_models(opts, verbose=False): base_time = comp_time = None base_value = comp_value = resid = relerr = None + base_name = comp_name = None + def name_pair(base_test, comp_test): + """ + iterate over name pairs in reverse order, keeping the first that are different + """ + nonlocal base_name, comp_name + if base_test != comp_test: + base_name, comp_name = base_test, comp_test # Base calculation try: @@ -879,21 +887,35 @@ def run_models(opts, verbose=False): except ImportError: traceback.print_exc() + # Find a string pair that describes the difference between base and comp. + # Go from least interesting to most interesting, updating if different. + name_pair("base", "comp") + name_pair( + " ".join(f"{k}={v}" for k, v in base_pars.items() if comp_pars.get(k, v) != v), + " ".join(f"{k}={v}" for k, v in comp_pars.items() if base_pars.get(k, v) != v), + ) + name_pair(base.engine, comp.engine) + name_pair(base.model.info.name, comp.model.info.name) + else: + name_pair(f"{base.model.info.name}:{base.engine}", None) + # Compare, but only if computing both forms if comparison: resid = (base_value - comp_value) relerr = resid/np.where(comp_value != 0., abs(comp_value), 1.0) if verbose: _print_stats("|%s-%s|" - % (base.engine, comp.engine) + (" "*(3+len(comp.engine))), + % (base_name, comp_name) + (" "*(3+len(comp_name))), resid) _print_stats("|(%s-%s)/%s|" - % (base.engine, comp.engine, comp.engine), + % (base_name, comp_name, comp_name), relerr) - return dict(base_value=base_value, comp_value=comp_value, - base_time=base_time, comp_time=comp_time, - resid=resid, relerr=relerr) + return dict( + base_name=base_name, comp_name=comp_name, + base_value=base_value, comp_value=comp_value, + base_time=base_time, comp_time=comp_time, + resid=resid, relerr=relerr) def _print_stats(label, err): @@ -923,6 +945,7 @@ def plot_models(opts, result, limits=None, setnum=0): """ import matplotlib.pyplot as plt + base_name, comp_name = result['base_name'], result['comp_name'] base_value, comp_value = result['base_value'], result['comp_value'] base_time, comp_time = result['base_time'], result['comp_time'] resid, relerr = result['resid'], result['relerr'] @@ -947,25 +970,30 @@ def plot_models(opts, result, limits=None, setnum=0): if have_base: if have_comp: - plt.subplot(131) - plot_theory(base_data, base_value, view=view, use_data=use_data, limits=limits) + plt.subplot(221) + plot_theory(base_data, base_value, label=base_name, view=view, use_data=use_data, limits=limits) if setnum > 0: plt.legend([f"Set {k+1}" for k in range(setnum+1)], loc='best') - plt.title("%s t=%.2f ms"%(base.engine, base_time)) + plt.title("%s t=%.2f ms"%(base.model.info.name, base_time)) + if have_comp: + plt.gca().tick_params(labelbottom=False) + plt.gca().set_xticks([]) + plt.xlabel('') + #cbar_title = "log I" if have_comp: if have_base: - plt.subplot(132) + plt.subplot(223) if not opts['is2d'] and have_base: - plot_theory(comp_data, base_value, view=view, use_data=use_data, limits=limits) - plot_theory(comp_data, comp_value, view=view, use_data=use_data, limits=limits) - plt.title("%s t=%.2f ms"%(comp.engine, comp_time)) + plot_theory(comp_data, base_value, label=base_name, view=view, use_data=use_data, limits=limits) + plot_theory(comp_data, comp_value, label=comp_name, view=view, use_data=use_data, limits=limits) + plt.title("%s t=%.2f ms"%(comp.model.info.name, comp_time)) #plt.gca().tick_params(labelbottom=False, labelleft=False) - plt.gca().set_yticks([]) - plt.ylabel('') + #plt.gca().set_yticks([]) + #plt.ylabel('') #cbar_title = "log I" if have_base and have_comp: - plt.subplot(133) + plt.subplot(222) if not opts['rel_err']: err, errstr, errview = resid, "abs err", "linear" else: @@ -980,17 +1008,23 @@ def plot_models(opts, result, limits=None, setnum=0): # Note: base_data only since base and comp have same q values (though # perhaps different resolution), and we are plotting the difference # at each q - plot_theory(base_data, err, view=errview, use_data=use_data) + plot_theory(base_data, err, view=errview, label=errstr, use_data=use_data) plt.xscale('log' if view == 'log' and not opts['is2d'] else 'linear') plt.title("max %s = %.3g"%(errstr, abs(err).max())) - plt.ylabel(errstr) + if opts['is2d']: + plt.gca().tick_params(labelleft=False) + plt.gca().set_yticks([]) + plt.ylabel('') + else: + plt.ylabel(errstr) + #cbar_title = errstr if errview=="linear" else "log "+errstr - #if is2D: - # h = plt.colorbar() - # h.ax.set_title(cbar_title) - fig = plt.gcf() - extra_title = ' '+opts['title'] if opts['title'] else '' - fig.suptitle(":".join(opts['name']) + extra_title) + # if is2D: + # h = plt.colorbar() + # h.ax.set_title(cbar_title) + # fig = plt.gcf() + # extra_title = ' '+opts['title'] if opts['title'] else '' + # fig.suptitle(":".join(opts['name']) + extra_title) if have_base and have_comp and opts['show_hist']: plt.figure() From 7dce76efe135492023396214cc7f58cbc21ad966 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Thu, 18 Sep 2025 21:35:43 -0400 Subject: [PATCH 10/16] correct limits for 2d log plots --- sasmodels/data.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sasmodels/data.py b/sasmodels/data.py index 94f28b25..924a9867 100644 --- a/sasmodels/data.py +++ b/sasmodels/data.py @@ -950,6 +950,8 @@ def _plot_result2D(data, theory, resid, view, use_data, limits=None, label='theo scale = 1e8*(data.qx_data**2+data.qy_data**2)**2 else: scale = 1 + if view == "log" and limits is not None: + limits = np.log10(limits[0]), np.log10(limits[1]) if use_data and limits is None: # TODO: do we want log(q^4 Iq) = 4 log q + log Iq if view == 'log': From 6e88cbb9c0fa053ac34e366dccfe240c6d9e02d6 Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Fri, 19 Sep 2025 17:26:14 -0400 Subject: [PATCH 11/16] don't return matplotlib figure; it confuses jupyter --- sasmodels/data.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/sasmodels/data.py b/sasmodels/data.py index 924a9867..68f639c8 100644 --- a/sasmodels/data.py +++ b/sasmodels/data.py @@ -703,7 +703,6 @@ def ytransform(x): ax0.set_yscale(yscale) ax0.set_ylabel(ylabel) ax0.legend() - if use_resid: # Inset plot would use the following # location = [0, -resid_ratio/(1-resid_ratio), 1, resid_ratio/(1-resid_ratio)] @@ -743,7 +742,6 @@ def ytransform(x): inset_calc_2D(ax0, Iq_calc) plt.tight_layout() - return plt.gcf() def inset_calc_2D(ax0, Iqxy_calc, alpha=0.8, portion=0.3, logz=True): From 767ef0ceba97c9828fbcac73f69b0c9154be859c Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Fri, 19 Sep 2025 19:49:57 -0400 Subject: [PATCH 12/16] make sure residuals plot uses same xscale as data; leave more room for the residuals plot --- sasmodels/bumps_model.py | 11 ++++++----- sasmodels/data.py | 6 +++++- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/sasmodels/bumps_model.py b/sasmodels/bumps_model.py index f7fe171d..967da198 100644 --- a/sasmodels/bumps_model.py +++ b/sasmodels/bumps_model.py @@ -217,6 +217,7 @@ def parameters(self): """ Return a dictionary of parameters """ + # TODO: tie the background and scale to the data rather than the model pars = self.model.parameters() if self.extra_pars is not None: pars.update(self.extra_pars) @@ -273,12 +274,12 @@ def _plot(self, view=None, backend='matplotlib'): fig = plot_theory(data, theory, resid, view, Iq_calc=Iq_calc, label=label, backend=backend) return fig - def plot(self, view=None): - return self._plot(view=view, backend='matplotlib') + # def plot(self, view=None): + # return self._plot(view=view, backend='matplotlib') - # # Bumps doesn't yet support 2D plots with plotly. - # def plotly(self, view=None): - # return self._plot(view=view, backend='plotly') + # Bumps doesn't yet support 2D plots with plotly. + def plotly(self, view=None): + return self._plot(view=view, backend='plotly') def simulate_data(self, noise=None): # type: (float) -> None diff --git a/sasmodels/data.py b/sasmodels/data.py index 68f639c8..0429fe9d 100644 --- a/sasmodels/data.py +++ b/sasmodels/data.py @@ -49,6 +49,10 @@ pass # pylint: enable=unused-import +RESID_RATIO = 0.25 +"""Portion of the graph area to use for the residuals plot""" + + def load_data(filename, index=0): # type: (str, int) -> Data """ @@ -523,7 +527,6 @@ def get_data_label(data): run = run[0] return run if run else filename if filename else title if title else "data" -RESID_RATIO = 0.15 def _plot_result1D( data, theory, resid, view, use_data, limits=None, Iq_calc=None, label='theory', @@ -640,6 +643,7 @@ def ytransform(x): fig.add_hline(y=1, line=dict(color='black', dash='dot'), row=2, col=1) fig.add_hline(y=-1, line=dict(color='black', dash='dot'), row=2, col=1) fig.update_yaxes(title_text='resid', type='linear', row=2, col=1) + fig.update_xaxes(type=xscale, row=2, col=1) fig.update_xaxes(title_text=xlabel, row=2, col=1) else: fig.update_xaxes(title_text=xlabel, row=1, col=1) From 2099f2951fc85eb4c9401826e2ee8ea7e5fc4e8a Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Fri, 19 Sep 2025 20:01:45 -0400 Subject: [PATCH 13/16] use matplotlib backend in bumps fitter for now --- sasmodels/bumps_model.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sasmodels/bumps_model.py b/sasmodels/bumps_model.py index 967da198..3818f72a 100644 --- a/sasmodels/bumps_model.py +++ b/sasmodels/bumps_model.py @@ -274,12 +274,12 @@ def _plot(self, view=None, backend='matplotlib'): fig = plot_theory(data, theory, resid, view, Iq_calc=Iq_calc, label=label, backend=backend) return fig - # def plot(self, view=None): - # return self._plot(view=view, backend='matplotlib') + def plot(self, view=None): + return self._plot(view=view, backend='matplotlib') - # Bumps doesn't yet support 2D plots with plotly. - def plotly(self, view=None): - return self._plot(view=view, backend='plotly') + # # Bumps doesn't yet support 2D plots with plotly. + # def plotly(self, view=None): + # return self._plot(view=view, backend='plotly') def simulate_data(self, noise=None): # type: (float) -> None From cbb1e3267fc0bcb37e52f6d418cf30a34ca10d4c Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Fri, 26 Sep 2025 15:44:33 -0400 Subject: [PATCH 14/16] better 2D plots for matplotlib --- sasmodels/data.py | 43 ++++++++++++++++++++++++------------------- 1 file changed, 24 insertions(+), 19 deletions(-) diff --git a/sasmodels/data.py b/sasmodels/data.py index 0429fe9d..5bfa1986 100644 --- a/sasmodels/data.py +++ b/sasmodels/data.py @@ -995,9 +995,9 @@ def _plot_result2D(data, theory, resid, view, use_data, limits=None, label='theo # histogramming and interpolation algorithm to rebuild a grid. active_data = data.data[~data.mask] - zlabel = 'log I(q)' if view == 'log' else '10⁸q⁴I(q)' if view == 'q4' else 'I(q)' theory_label = label - data_label = f"{zlabel} data" + zprefix = 'log10 ' if view == 'log' else '10⁸q⁴ ' if view == 'q4' else '' + data_label = f"{zprefix}{get_data_label(data)}" resid_label = '(I(q) - y)/Δy' if backend == "plotly": @@ -1013,11 +1013,13 @@ def add_trace(trace, row, col, label): if num_plots == 1: trace.update(colorbar=colorbar) fig.add_trace(trace) + fig.update_xaxes(scaleanchor=f"y{row}", scaleratio=1) + fig.update_yaxes(scaleanchor=f"x{col}", scaleratio=1) else: trace.update(colorbar=dict(x=col/2-0.05, y=1-(row/2-0.05), len=0.45, **colorbar)) fig.add_trace(trace, row=row, col=col) - fig.update_xaxes(row=row, col=col, scaleanchor=f"y{row}", scaleratio=1) - fig.update_yaxes(row=row, col=col, scaleanchor=f"x{col}", scaleratio=1) + fig.update_xaxes(row=row, col=col, scaleanchor=f"y{row}", scaleratio=1) + fig.update_yaxes(row=row, col=col, scaleanchor=f"x{col}", scaleratio=1) fig = make_subplots(rows=2, cols=2) if num_plots > 1 else go.Figure() if use_data: @@ -1037,35 +1039,37 @@ def add_trace(trace, row, col, label): else: # backend == "matplotlib" import matplotlib.pyplot as plt # type: ignore - def maybe_hide_labels(row, col): - if num_plots == 1: - return - if row == 1 and col == 2: + def maybe_hide_labels(): + if num_plots > 1: plt.gca().set_xticks([]) plt.xlabel('') plt.gca().set_yticks([]) plt.ylabel('') - # plot theory - if use_theory: - if num_plots > 1: - plt.subplot(2, 2, 2) - _plot_2d_signal(data, theory, label=theory_label, view=view, limits=limits, backend=backend) - maybe_hide_labels(1, 2) + fig = plt.gcf() + if num_plots > 1: + # add grid specifications + gs = fig.add_gridspec(2, 3) # Plot data if use_data: if num_plots > 1: - plt.subplot(2, 2, 1) + fig.add_subplot(gs[0:2, 0:2]) _plot_2d_signal(data, active_data, label=data_label, view=view, limits=limits, backend=backend) - maybe_hide_labels(1, 1) + + # plot theory + if use_theory: + if num_plots > 1: + fig.add_subplot(gs[0, 2]) + _plot_2d_signal(data, theory, label=theory_label, view=view, limits=limits, backend=backend) + maybe_hide_labels() # plot resid if use_resid: if num_plots > 1: - plt.subplot(2, 2, 4) + fig.add_subplot(gs[1, 2]) _plot_2d_signal(data, resid, label=resid_label, view='linear', limits=rlimits, backend=backend) - maybe_hide_labels(2, 2) + maybe_hide_labels() @protect @@ -1115,8 +1119,9 @@ def _plot_2d_signal(data, signal, limits=None, view=None, label=None, backend='m vmin=limits[0], vmax=limits[1]) plt.xlabel("q_x/Å") plt.ylabel("q_y/Å") + plt.title(label) h = plt.colorbar(location='right') - h.set_label(label) + #h.set_label(label) # === The following is modified from sas.sasgui.plottools.PlotPanel From f80b388825096914a13e1659b273377fb6ff449f Mon Sep 17 00:00:00 2001 From: Paul Kienzle Date: Fri, 26 Sep 2025 16:19:08 -0400 Subject: [PATCH 15/16] better 2D plots for matplotlib in bumps webview --- sasmodels/data.py | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/sasmodels/data.py b/sasmodels/data.py index 5bfa1986..741eff67 100644 --- a/sasmodels/data.py +++ b/sasmodels/data.py @@ -1056,6 +1056,8 @@ def maybe_hide_labels(): if num_plots > 1: fig.add_subplot(gs[0:2, 0:2]) _plot_2d_signal(data, active_data, label=data_label, view=view, limits=limits, backend=backend) + if num_plots == 1: + plt.colorbar(location='right') # plot theory if use_theory: @@ -1063,6 +1065,7 @@ def maybe_hide_labels(): fig.add_subplot(gs[0, 2]) _plot_2d_signal(data, theory, label=theory_label, view=view, limits=limits, backend=backend) maybe_hide_labels() + plt.colorbar(location='right') # plot resid if use_resid: @@ -1070,6 +1073,7 @@ def maybe_hide_labels(): fig.add_subplot(gs[1, 2]) _plot_2d_signal(data, resid, label=resid_label, view='linear', limits=rlimits, backend=backend) maybe_hide_labels() + plt.colorbar(location='right') @protect @@ -1119,8 +1123,25 @@ def _plot_2d_signal(data, signal, limits=None, view=None, label=None, backend='m vmin=limits[0], vmax=limits[1]) plt.xlabel("q_x/Å") plt.ylabel("q_y/Å") - plt.title(label) - h = plt.colorbar(location='right') + + # When plotting in bumps webview with mpld3 the chisq value is printed + # above the right hand corner of the data graph. Move the labels for the + # graphs to the left hand side to avoid collision. Unfortunately, mpld3 + # doesn't understand title with loc="left", so do the label as a text string + # on the axes. + if 0: + #plt.title(label, loc="left") + plt.title(label) + else: + ax = plt.gca() + transform = ax.transAxes + h_ex = 30 # assume we are 50 lines tall, so that 2/30 ~ 0.08 + text_offset = 0.5 / h_ex # 1/2 ex above and below the text + x, y = text_offset, 1 + text_offset + ha, va = "left", "bottom" + ax.text(x, y, label, transform=transform, va=va, ha=ha) + + #h = plt.colorbar(location='right') #h.set_label(label) From 1892f03daa98c157135679bb6c3752759fdfe384 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci-lite[bot]" <117423508+pre-commit-ci-lite[bot]@users.noreply.github.com> Date: Thu, 18 Dec 2025 14:24:03 +0000 Subject: [PATCH 16/16] [pre-commit.ci lite] apply automatic fixes for ruff linting errors --- example/sesansfit_CodeCampIII.py | 2 +- example/simul_fit.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/example/sesansfit_CodeCampIII.py b/example/sesansfit_CodeCampIII.py index 64d83952..62fa3b90 100644 --- a/example/sesansfit_CodeCampIII.py +++ b/example/sesansfit_CodeCampIII.py @@ -54,4 +54,4 @@ if __name__ == "__main__": import matplotlib.pyplot as plt problem.plot() - plt.show() \ No newline at end of file + plt.show() diff --git a/example/simul_fit.py b/example/simul_fit.py index fe486eb8..09205c4d 100644 --- a/example/simul_fit.py +++ b/example/simul_fit.py @@ -67,4 +67,4 @@ if __name__ == "__main__": import matplotlib.pyplot as plt problem.plot() - plt.show() \ No newline at end of file + plt.show()