From 24e16b6cdb291077d25bf73ecf98e05e20deb6ff Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Tue, 25 Nov 2025 15:51:52 +0000 Subject: [PATCH 01/12] first commit --- heracles/twopoint.py | 10 ++++++++-- heracles/unmixing.py | 25 +++++++++++++++++-------- tests/test_dices.py | 17 ----------------- tests/test_transforms.py | 21 ++++++++++++++++----- tests/test_twopoint.py | 12 ++++++++++++ 5 files changed, 53 insertions(+), 32 deletions(-) diff --git a/heracles/twopoint.py b/heracles/twopoint.py index b55aad5a..40ca49b3 100644 --- a/heracles/twopoint.py +++ b/heracles/twopoint.py @@ -398,6 +398,7 @@ def mixing_matrices( def invert_mixing_matrix( M, + options: dict = {}, rtol: float = 1e-5, progress: Progress | None = None, ): @@ -427,6 +428,11 @@ def invert_mixing_matrix( *_, _n, _m = _M.shape new_ell = np.arange(_m) + if key in list(options.keys()): + _rtol = options[key].get("rtol", rtol) + else: + _rtol = rtol + with progress.task(f"invert {key}"): if (s1 != 0) and (s2 != 0): _inv_m = np.linalg.pinv( @@ -435,10 +441,10 @@ def invert_mixing_matrix( ) _inv_M_EEEE = _inv_m[:_m, :_n] _inv_M_EEBB = _inv_m[_m:, :_n] - _inv_M_EBEB = np.linalg.pinv(_M[2], rcond=rtol) + _inv_M_EBEB = np.linalg.pinv(_M[2], rcond=_rtol) _inv_M = np.array([_inv_M_EEEE, _inv_M_EEBB, _inv_M_EBEB]) else: - _inv_M = np.linalg.pinv(_M, rcond=rtol) + _inv_M = np.linalg.pinv(_M, rcond=_rtol) inv_M[key] = Result(_inv_M, axis=value.axis, ell=new_ell) diff --git a/heracles/unmixing.py b/heracles/unmixing.py index fb875893..016281aa 100644 --- a/heracles/unmixing.py +++ b/heracles/unmixing.py @@ -27,19 +27,25 @@ from dataclasses import replace -def natural_unmixing(d, m, x0=-2, k=50, patch_hole=True, lmax=None): +def natural_unmixing(d, m, options={}, rtol=0.2, smoothing=50): wm = {} m_keys = list(m.keys()) for m_key in m_keys: + if m_key in list(options.keys()): + _rtol = options[m_key].get("rtol", rtol) + _smoothing = options[m_key].get("smoothing", smoothing) + else: + _rtol = rtol + _smoothing = smoothing _m = m[m_key].array _wm = cl2corr(_m).T[0] - if patch_hole: - _wm *= logistic(np.log10(abs(_wm)), x0=x0, k=k) + _tol = _rtol * np.max(abs(_wm)) + _wm *= logistic(np.log10(abs(_wm)), tol=np.log10(_tol), smoothing=_smoothing) wm[m_key] = _wm - return _natural_unmixing(d, wm, lmax=lmax) + return _natural_unmixing(d, wm) -def _natural_unmixing(d, wm, lmax=None): +def _natural_unmixing(d, wm): """ Natural unmixing of the data Cl. Args: @@ -54,8 +60,11 @@ def _natural_unmixing(d, wm, lmax=None): wm_keys = list(wm.keys()) for d_key, wm_key in zip(d_keys, wm_keys): a, b, i, j = d_key - if lmax is None: + ell = d[d_key].ell + if ell is None: *_, lmax = d[d_key].shape + else: + lmax = ell[-1] + 1 s1, s2 = d[d_key].spin _d = np.atleast_2d(d[d_key]) _wm = wm[wm_key] @@ -115,5 +124,5 @@ def _natural_unmixing(d, wm, lmax=None): return corr_d -def logistic(x, x0=-5, k=50): - return 1.0 + np.exp(-k * (x - x0)) +def logistic(x, tol=-5, smoothing=50): + return 1.0 + np.exp(-smoothing * (x - tol)) diff --git a/tests/test_dices.py b/tests/test_dices.py index b52d88d4..eb575634 100644 --- a/tests/test_dices.py +++ b/tests/test_dices.py @@ -100,23 +100,6 @@ def test_mask_correction(cls0, mls0): assert np.isclose(cl[2:], _cl[2:]).all() -def test_polspice(cls0): - from heracles.dices.utils import get_cl - - cls = np.array( - [ - get_cl(("POS", "POS", 1, 1), cls0), - get_cl(("SHE", "SHE", 1, 1), cls0)[0, 0], - get_cl(("SHE", "SHE", 1, 1), cls0)[1, 1], - get_cl(("POS", "SHE", 1, 1), cls0)[0], - ] - ).T - corrs = heracles.cl2corr(cls) - _cls = heracles.corr2cl(corrs) - for cl, _cl in zip(cls.T, _cls.T): - assert np.isclose(cl[2:], _cl[2:]).all() - - def test_jackknife(nside, njk, cov_jk, cls0, cls1): assert len(cls1) == njk for key in cls1.keys(): diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 6513713b..e74cf0f3 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -2,9 +2,9 @@ import heracles -def test_cl2corr(): - # Is there something more clever we can do here? - # Like transforming the legendre nodes and return ones? +def test_cl_transform(cls0): + from heracles.dices.utils import get_cl + cl = np.array( [ np.ones(10), @@ -16,8 +16,6 @@ def test_cl2corr(): corr = heracles.cl2corr(cl.T).T assert corr.shape == cl.shape - -def test_corr2cl(): corr = np.array( [ np.ones(10), @@ -28,3 +26,16 @@ def test_corr2cl(): ) cl = heracles.corr2cl(corr.T).T assert corr.shape == cl.shape + + cls = np.array( + [ + get_cl(("POS", "POS", 1, 1), cls0), + get_cl(("SHE", "SHE", 1, 1), cls0)[0, 0], + get_cl(("SHE", "SHE", 1, 1), cls0)[1, 1], + get_cl(("POS", "SHE", 1, 1), cls0)[0], + ] + ).T + corrs = heracles.cl2corr(cls) + _cls = heracles.corr2cl(corrs) + for cl, _cl in zip(cls.T, _cls.T): + assert np.isclose(cl[2:], _cl[2:]).all() diff --git a/tests/test_twopoint.py b/tests/test_twopoint.py index bc6c337d..20dbf9bd 100644 --- a/tests/test_twopoint.py +++ b/tests/test_twopoint.py @@ -402,3 +402,15 @@ def test_inverting_mixing_matrices(): _mixed_cls = np.sum(mixed_cls[key].array) print(key, _mixed_cls) np.testing.assert_allclose(_mixed_cls, (n + 1) * 1.0) + + # test options + options = {("POS", "POS", 1, 1): {"rtol": 1}} + opt_inv_mms = invert_mixing_matrix(mms, options=options) + opt_mixed_cls = apply_mixing_matrix(cls, opt_inv_mms) + for key in opt_mixed_cls: + if key in list(options.keys()): + assert np.all( + opt_mixed_cls[key].array == np.zeros_like(mixed_cls[key].array) + ) + else: + assert np.all(opt_mixed_cls[key].array == mixed_cls[key].array) From e3f473d07296467954632d51069727d4f60805e7 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Tue, 25 Nov 2025 20:04:14 +0000 Subject: [PATCH 02/12] bug --- heracles/dices/jackknife.py | 13 +++++--- heracles/unmixing.py | 59 ++++++++++++++++++++++++++++++------- 2 files changed, 58 insertions(+), 14 deletions(-) diff --git a/heracles/dices/jackknife.py b/heracles/dices/jackknife.py index 1a657595..815d554a 100644 --- a/heracles/dices/jackknife.py +++ b/heracles/dices/jackknife.py @@ -25,7 +25,7 @@ from ..result import Result, get_result_array from ..mapping import transform from ..twopoint import angular_power_spectra -from ..unmixing import _natural_unmixing, logistic +from ..unmixing import _natural_unmixing, correct_correlation from ..transforms import cl2corr try: @@ -205,7 +205,7 @@ def correct_bias(cls, jkmaps, fields, jk=0, jk2=0): return cls -def mask_correction(Mljk, Mls0): +def mask_correction(Mljk, Mls0, options={}, rtol=0.2, smoothing=50): """ Internal method to compute the mask correction. input: @@ -225,9 +225,14 @@ def mask_correction(Mljk, Mls0): wmljk = wmljk.T[0] # Compute alpha alpha = wmljk / wmls0 - alpha *= logistic(np.log10(abs(wmljk))) alphas[key] = alpha - return alphas + corr_alphas = correct_correlation( + alphas, + options=options, + rtol=rtol, + smoothing=smoothing, + ) + return corr_alphas def jackknife_covariance(dict, nd=1): diff --git a/heracles/unmixing.py b/heracles/unmixing.py index 016281aa..172339d3 100644 --- a/heracles/unmixing.py +++ b/heracles/unmixing.py @@ -27,21 +27,61 @@ from dataclasses import replace +def correct_correlation(wm, options={}, rtol=0.2, smoothing=50): + """ + Correct the correlation function of the mask to avoid + dividing by very small numbers during unmixing. + Args: + wm: mask correlation functions + options: dictionary of options for each mask + rtol: relative tolerance to apply + smoothing: smoothing parameter for the logistic function + Returns: + wm: corrected mask correlation functions + """ + wm_keys = list(wm.keys()) + corr_wm = {} + for wm_key in wm_keys: + if wm_key in list(options.keys()): + _rtol = options[wm_key].get("rtol", rtol) + _smoothing = options[wm_key].get("smoothing", smoothing) + else: + _rtol = rtol + _smoothing = smoothing + _wm = wm[wm_key] + _tol = _rtol * np.max(abs(_wm)) + _wm *= logistic( + np.log10(abs(_wm)), + tol=np.log10(_tol), + smoothing=_smoothing, + ) + corr_wm[wm_key] = _wm + return corr_wm + + def natural_unmixing(d, m, options={}, rtol=0.2, smoothing=50): + """ + Natural unmixing of the data Cl. + Args: + d: Data Cl + m: mask cls + patch_hole: If True, apply the patch hole correction + Returns: + corr_d: Corrected Cl + """ wm = {} m_keys = list(m.keys()) for m_key in m_keys: - if m_key in list(options.keys()): - _rtol = options[m_key].get("rtol", rtol) - _smoothing = options[m_key].get("smoothing", smoothing) - else: - _rtol = rtol - _smoothing = smoothing _m = m[m_key].array _wm = cl2corr(_m).T[0] - _tol = _rtol * np.max(abs(_wm)) - _wm *= logistic(np.log10(abs(_wm)), tol=np.log10(_tol), smoothing=_smoothing) wm[m_key] = _wm + + wm = correct_correlation( + wm, + options=options, + rtol=rtol, + smoothing=smoothing, + ) return _natural_unmixing(d, wm) @@ -50,7 +90,7 @@ def _natural_unmixing(d, wm): Natural unmixing of the data Cl. Args: d: Data Cl - m: mask cls + m: mask correlation function patch_hole: If True, apply the patch hole correction Returns: corr_d: Corrected Cl @@ -59,7 +99,6 @@ def _natural_unmixing(d, wm): d_keys = list(d.keys()) wm_keys = list(wm.keys()) for d_key, wm_key in zip(d_keys, wm_keys): - a, b, i, j = d_key ell = d[d_key].ell if ell is None: *_, lmax = d[d_key].shape From 8542b966251f25f5dd3751b1abf1fda6198d19f2 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 09:36:04 +0000 Subject: [PATCH 03/12] more tests --- heracles/__init__.py | 2 ++ tests/test_dices.py | 16 ++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/heracles/__init__.py b/heracles/__init__.py index f1502ad0..1514fffd 100644 --- a/heracles/__init__.py +++ b/heracles/__init__.py @@ -76,6 +76,7 @@ "cl2corr", "corr2cl", # unmixing + "correct_correlation", "natural_unmixing", ] @@ -157,5 +158,6 @@ ) from .unmixing import ( + correct_correlation, natural_unmixing, ) diff --git a/tests/test_dices.py b/tests/test_dices.py index eb575634..0487fcbf 100644 --- a/tests/test_dices.py +++ b/tests/test_dices.py @@ -92,6 +92,22 @@ def test_get_delete2_fsky(jk_maps, njk): def test_mask_correction(cls0, mls0): + # test logistic correction + wm = {} + m_keys = list(mls0.keys()) + for m_key in m_keys: + _m = mls0[m_key].array + _wm = heracles.transforms.cl2corr(_m).T[0] + _wm = np.abs(_wm) + _wm /= np.max(_wm) + wm[m_key] = _wm + _wm = heracles.correct_correlation(wm, rtol= _wm) + for m_key in m_keys: + _w = wm[m_key] + __w = _wm[m_key] + assert np.isclose(__w, _w).all() + + # test dices mask correction alphas = dices.mask_correction(mls0, mls0) _cls = heracles.unmixing._natural_unmixing(cls0, alphas) for key in list(cls0.keys()): From f86f7101a26fcebff0bcf3a384af14a7c2686a09 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 09:41:00 +0000 Subject: [PATCH 04/12] more tests --- tests/test_dices.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_dices.py b/tests/test_dices.py index 0487fcbf..e08fff01 100644 --- a/tests/test_dices.py +++ b/tests/test_dices.py @@ -101,7 +101,7 @@ def test_mask_correction(cls0, mls0): _wm = np.abs(_wm) _wm /= np.max(_wm) wm[m_key] = _wm - _wm = heracles.correct_correlation(wm, rtol= _wm) + _wm = heracles.correct_correlation(wm, rtol=_wm) for m_key in m_keys: _w = wm[m_key] __w = _wm[m_key] From 9c87969212362db2beb26359f9ab52a7f2fe4c85 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 09:50:38 +0000 Subject: [PATCH 05/12] bug --- tests/test_dices.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/test_dices.py b/tests/test_dices.py index e08fff01..dbf3af32 100644 --- a/tests/test_dices.py +++ b/tests/test_dices.py @@ -92,6 +92,20 @@ def test_get_delete2_fsky(jk_maps, njk): def test_mask_correction(cls0, mls0): + # test natural umixing + wm = {} + m_keys = list(mls0.keys()) + for m_key in m_keys: + _m = mls0[m_key].array + _wm = heracles.transforms.cl2corr(_m).T[0] + wm[m_key] = _wm + cls = heracles.unmixing.natural_unmixing(cls0, mls0) + _cls = heracles.unmixing._natural_unmixing(cls0, wm) + for key in list(cls0.keys()): + cl = cls[key].array + _cl = _cls[key].array + assert np.isclose(cl[2:], _cl[2:]).all() + # test logistic correction wm = {} m_keys = list(mls0.keys()) From 9c74b6ce7e42bc345e9b2021fbbfc78e04f7ee08 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 10:45:00 +0000 Subject: [PATCH 06/12] upd --- heracles/unmixing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/heracles/unmixing.py b/heracles/unmixing.py index 172339d3..3fb5bfdb 100644 --- a/heracles/unmixing.py +++ b/heracles/unmixing.py @@ -63,7 +63,7 @@ def natural_unmixing(d, m, options={}, rtol=0.2, smoothing=50): """ Natural unmixing of the data Cl. Args: - d: Data Cl + d: data cls m: mask cls patch_hole: If True, apply the patch hole correction Returns: @@ -89,7 +89,7 @@ def _natural_unmixing(d, wm): """ Natural unmixing of the data Cl. Args: - d: Data Cl + d: data cls m: mask correlation function patch_hole: If True, apply the patch hole correction Returns: From 18c5ffbb42362ccc344bf01ce72da6f834c794a7 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 14:50:41 +0000 Subject: [PATCH 07/12] upd --- heracles/dices/jackknife.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/heracles/dices/jackknife.py b/heracles/dices/jackknife.py index 815d554a..da6083da 100644 --- a/heracles/dices/jackknife.py +++ b/heracles/dices/jackknife.py @@ -209,10 +209,10 @@ def mask_correction(Mljk, Mls0, options={}, rtol=0.2, smoothing=50): """ Internal method to compute the mask correction. input: - Mljk (np.array): mask of delete1 Cls - Mls0 (np.array): mask Cls + Mljk: mask of delete1 Cls + Mls0: mask Cls returns: - alpha (Float64): Mask correction factor + alpha: Mask correction factor """ alphas = {} for key in list(Mljk.keys()): From 63d234d94441caff63c09f3b6f034018723c8965 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 17:12:56 +0000 Subject: [PATCH 08/12] make utils general --- heracles/dices/__init__.py | 2 +- heracles/dices/jackknife.py | 2 +- heracles/dices/shrinkage.py | 4 +- heracles/dices/utils.py | 215 ------------------------------------ tests/test_dices.py | 6 +- tests/test_transforms.py | 2 +- tests/test_utils.py | 16 +-- 7 files changed, 16 insertions(+), 231 deletions(-) delete mode 100644 heracles/dices/utils.py diff --git a/heracles/dices/__init__.py b/heracles/dices/__init__.py index f3a72604..92cf85ec 100644 --- a/heracles/dices/__init__.py +++ b/heracles/dices/__init__.py @@ -58,7 +58,7 @@ shrinkage_factor, gaussian_covariance, ) -from .utils import ( +from ..utils import ( impose_correlation, get_cl, flatten, diff --git a/heracles/dices/jackknife.py b/heracles/dices/jackknife.py index da6083da..49698038 100644 --- a/heracles/dices/jackknife.py +++ b/heracles/dices/jackknife.py @@ -20,7 +20,7 @@ import itertools from copy import deepcopy from itertools import combinations -from .utils import add_to_Cls, sub_to_Cls +from ..utils import add_to_Cls, sub_to_Cls from ..core import update_metadata from ..result import Result, get_result_array from ..mapping import transform diff --git a/heracles/dices/shrinkage.py b/heracles/dices/shrinkage.py index de0f118f..9aea834d 100644 --- a/heracles/dices/shrinkage.py +++ b/heracles/dices/shrinkage.py @@ -18,7 +18,7 @@ # License along with DICES. If not, see . import numpy as np import itertools -from .utils import ( +from ..utils import ( expand_spin0_dims, squeeze_spin0_dims, ) @@ -29,7 +29,7 @@ from .jackknife import ( bias, ) -from .utils import ( +from ..utils import ( add_to_Cls, impose_correlation, get_cl, diff --git a/heracles/dices/utils.py b/heracles/dices/utils.py deleted file mode 100644 index 96a76fc9..00000000 --- a/heracles/dices/utils.py +++ /dev/null @@ -1,215 +0,0 @@ -# DICES: Euclid code for harmonic-space statistics on the sphere -# -# Copyright (C) 2023-2024 Euclid Science Ground Segment -# -# This file is part of DICES. -# -# DICES is free software: you can redistribute it and/or modify it -# under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# DICES is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with DICES. If not, see . -import numpy as np - -try: - from copy import replace -except ImportError: - # Python < 3.13 - from dataclasses import replace - - -def get_cl(key, cls): - """ - Internal method to get a Cl from a dictionary of Cls. - Check if the key exists if not tries to find the symmetric key. - input: - key: key of the Cl - cls: dictionary of Cls - returns: - cl: Cl - """ - if key in cls: - return cls[key] - else: - a, b, i, j = key - key_sym = (b, a, j, i) - if key_sym in cls: - arr = cls[key_sym].array - s1, s2 = cls[key_sym].spin - if s1 != 0 and s2 != 0: - arr = np.transpose(arr, axes=(1, 0, 2)) - # always transpose spins - s1, s2 = s2, s1 - else: - raise KeyError(f"Key {key} not found in Cls.") - return replace(cls[key_sym], array=arr, spin=(s1, s2)) - - -def add_to_Cls(cls, x): - """ - Adds a dictionary of Cl values to another. - input: - Cls: dictionary of Cl values - x: dictionary of Cl values - returns: - Cls: updated dictionary of Cl values - """ - _cls = {} - for key in cls.keys(): - arr = cls[key].array + x[key] - _cls[key] = replace(cls[key], array=arr) - return _cls - - -def sub_to_Cls(cls, x): - """ - Substracts a dictionary of Cl values to another. - input: - Cls: dictionary of Cl values - x: dictionary of Cl values - returns: - Cls: updated dictionary of Cl values - """ - _cls = {} - for key in cls.keys(): - arr = cls[key].array - x[key] - _cls[key] = replace(cls[key], array=arr) - return _cls - - -def expand_spin0_dims(result): - """ - Expands spin-0 dimensions into axes of length 1. - """ - offset = 0 - shape = list(result.shape) - for i, s in enumerate(result.spin): - if s == 0: - shape.insert(i, 1) - offset += 1 - arr = result.array.reshape(*shape) - new_axes = tuple(a + offset for a in result.axis) - return replace(result, array=arr, axis=new_axes) - - -def squeeze_spin0_dims(result): - """ - Remove spin-0 dimensions. - """ - offset = 0 - shape = list(result.shape) - for i, s in enumerate(result.spin): - if s == 0: - dim = shape.pop(i - offset) - assert dim == 1, "found spin-0 axis of size != 1" - offset += 1 - arr = result.array.reshape(*shape) - new_axes = tuple(a - offset for a in result.axis) - return replace(result, array=arr, axis=new_axes) - - -def impose_correlation(cov_a, cov_b): - """ - Imposes the correlation of b to a. - input: - a: dictionary of covariance matrices - b: dictionary of covariance matrices - returns: - a: updated dictionary of covariance matrices - """ - cov_c = {} - for key in cov_a: - a = cov_a[key] - b = cov_b[key] - a_v = np.diagonal(a, axis1=-2, axis2=-1) - b_v = np.diagonal(b, axis1=-2, axis2=-1) - a_std = np.sqrt(a_v[..., None, :]) - b_std = np.sqrt(b_v[..., None, :]) - c = a * (b_std * np.swapaxes(b_std, -1, -2)) - c /= a_std * np.swapaxes(a_std, -1, -2) - cov_c[key] = replace(a, array=c) - return cov_c - - -def _flatten(result): - a = result.array - axis = len(result.axis) - if axis == 1: - s1, s2 = result.spin - dof1 = 1 if s1 == 0 else 2 - dof2 = 1 if s2 == 0 else 2 - ell = a.shape[-1] - b = a.reshape(dof1 * dof2, ell) - b = b.transpose(0, 1).reshape(dof1 * dof2 * ell) - elif axis == 2: - s1, s2, s3, s4 = result.spin - dof1 = 1 if s1 == 0 else 2 - dof2 = 1 if s2 == 0 else 2 - dof3 = 1 if s3 == 0 else 2 - dof4 = 1 if s4 == 0 else 2 - ell = a.shape[-1] - b = ( - a.reshape(dof1 * dof2, dof3 * dof4, ell, ell) - .transpose(0, 2, 1, 3) - .reshape(dof1 * dof2 * ell, dof3 * dof4 * ell) - ) - else: - raise NotImplementedError("Flattening for >2 axes not implemented yet.") - return b - - -def flatten(results, order=None): - # Flatten each block - blocks_dict = {} - for key, result in results.items(): - blocks_dict[key] = _flatten(result) - - # check that all results have the same length axis - axis = [len(result.axis) for result in results.values()] - axis = np.unique(axis) - if len(axis) != 1: - raise ValueError("All results must have the same length axis to flatten.") - else: - axis = axis[0] - - if axis == 1: - # Stack all blocks vertically - return np.concatenate(list(blocks_dict.values())) - elif axis == 2: - # Infer order if not provided - if order is None: - keys = list(blocks_dict.keys()) - keys = [(key[0], key[1], key[4], key[5]) for key in keys] - order = list(set(keys)) - - # Build row by row - block_rows = [] - for key_i in order: - row_blocks = [] - for key_j in order: - a1, b1, i1, j1 = key_i - a2, b2, i2, j2 = key_j - cov_key = (a1, b1, a2, b2, i1, j1, i2, j2) - block = blocks_dict.get((cov_key)) - if block is None: - # If only the transpose block exists, use it transposed - sym_cov_key = (a2, b2, a1, b1, i2, j2, i1, j1) - transpose_block = blocks_dict.get((sym_cov_key)) - if transpose_block is not None: - block = transpose_block.T - else: - raise KeyError(f"Missing block for {cov_key}") - row_blocks.append(block) - block_rows.append(row_blocks) - - # Use np.block to assemble the full covariance matrix - return np.block(block_rows) - else: - raise NotImplementedError("Flattening for axis != 2 not implemented yet.") diff --git a/tests/test_dices.py b/tests/test_dices.py index dbf3af32..7acfc3a7 100644 --- a/tests/test_dices.py +++ b/tests/test_dices.py @@ -267,7 +267,7 @@ def test_shrinkage(cov_jk): def test_flatten_cls(nside, cls0): - from heracles.dices.utils import _flatten, flatten + from heracles.utils import _flatten, flatten # Check that the individual blocks are flattened correctly for key in cls0.keys(): @@ -285,7 +285,7 @@ def test_flatten_cls(nside, cls0): def test_flatten_cov(nside, cov_jk): - from heracles.dices.utils import _flatten, flatten + from heracles.utils import _flatten, flatten # Check that the individual blocks are flattened correctly for key in cov_jk.keys(): @@ -320,7 +320,7 @@ def test_gauss_cov(cls0, cov_jk): # We want to undo the bias that we will add later # for an easy check bias = dices.jackknife.bias(_cls0) - _cls0 = dices.utils.sub_to_Cls(_cls0, bias) + _cls0 = heracles.utils.sub_to_Cls(_cls0, bias) # Compute Gaussian covariance gauss_cov = dices.gaussian_covariance(_cls0) diff --git a/tests/test_transforms.py b/tests/test_transforms.py index e74cf0f3..5e9b9a55 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -3,7 +3,7 @@ def test_cl_transform(cls0): - from heracles.dices.utils import get_cl + from heracles.utils import get_cl cl = np.array( [ diff --git a/tests/test_utils.py b/tests/test_utils.py index 20883890..c36f76e9 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -8,8 +8,8 @@ def test_add_to_cls(): cls[("P", "P", 1, 1)] = heracles.Result(np.ones(10)) x = {} x[("P", "P", 1, 1)] = -1.0 - _cls = dices.utils.add_to_Cls(cls, x) - __cls = dices.utils.sub_to_Cls(_cls, x) + _cls = heracles.utils.add_to_Cls(cls, x) + __cls = heracles.utils.sub_to_Cls(_cls, x) for key in list(cls.keys()): assert np.all(_cls[key] == np.zeros(10)) assert np.all(cls[key].__array__() == __cls[key].__array__()) @@ -25,9 +25,9 @@ def test_get_cl(): np.array([[a, ab], [ba, a]]), spin=(2, 2) ) - cl = dices.utils.get_cl(("POS", "SHE", 1, 1), cls) + cl = heracles.utils.get_cl(("POS", "SHE", 1, 1), cls) assert np.all(cl == np.array([a, a])) - cl = dices.utils.get_cl(("SHE", "SHE", 1, 2), cls) + cl = heracles.utils.get_cl(("SHE", "SHE", 1, 2), cls) assert np.all(cl == np.array([[a, ba], [ab, a]])) @@ -37,11 +37,11 @@ def test_expand_squeeze_spin0_dims(cls0, cov_jk): s1, s2 = cl.spin dof1 = 1 if s1 == 0 else 2 dof2 = 1 if s2 == 0 else 2 - _cl = dices.utils.expand_spin0_dims(cl) + _cl = heracles.utils.expand_spin0_dims(cl) (_ax,) = _cl.axis assert _cl.shape == (dof1, dof2, cl.shape[-1]) assert _ax == 2 - __cl = dices.utils.squeeze_spin0_dims(_cl) + __cl = heracles.utils.squeeze_spin0_dims(_cl) assert np.all(cl.__array__() == __cl.__array__()) assert cl.axis == __cl.axis @@ -52,7 +52,7 @@ def test_expand_squeeze_spin0_dims(cls0, cov_jk): dof_b1 = 1 if sb1 == 0 else 2 dof_a2 = 1 if sa2 == 0 else 2 dof_b2 = 1 if sb2 == 0 else 2 - _cov = dices.utils.expand_spin0_dims(cov) + _cov = heracles.utils.expand_spin0_dims(cov) _ax1, _ax2 = _cov.axis assert _cov.shape == ( dof_a1, @@ -63,6 +63,6 @@ def test_expand_squeeze_spin0_dims(cls0, cov_jk): cov.shape[-1], ) assert (_ax1, _ax2) == (4, 5) - __cov = dices.utils.squeeze_spin0_dims(_cov) + __cov = heracles.utils.squeeze_spin0_dims(_cov) assert np.all(cov.__array__() == __cov.__array__()) assert cov.axis == __cov.axis From f6ae99661a8d87d7e8f654ac61a9880f35a85ca9 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 17:13:20 +0000 Subject: [PATCH 09/12] moving utls --- heracles/utils.py | 215 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 heracles/utils.py diff --git a/heracles/utils.py b/heracles/utils.py new file mode 100644 index 00000000..96a76fc9 --- /dev/null +++ b/heracles/utils.py @@ -0,0 +1,215 @@ +# DICES: Euclid code for harmonic-space statistics on the sphere +# +# Copyright (C) 2023-2024 Euclid Science Ground Segment +# +# This file is part of DICES. +# +# DICES is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# DICES is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with DICES. If not, see . +import numpy as np + +try: + from copy import replace +except ImportError: + # Python < 3.13 + from dataclasses import replace + + +def get_cl(key, cls): + """ + Internal method to get a Cl from a dictionary of Cls. + Check if the key exists if not tries to find the symmetric key. + input: + key: key of the Cl + cls: dictionary of Cls + returns: + cl: Cl + """ + if key in cls: + return cls[key] + else: + a, b, i, j = key + key_sym = (b, a, j, i) + if key_sym in cls: + arr = cls[key_sym].array + s1, s2 = cls[key_sym].spin + if s1 != 0 and s2 != 0: + arr = np.transpose(arr, axes=(1, 0, 2)) + # always transpose spins + s1, s2 = s2, s1 + else: + raise KeyError(f"Key {key} not found in Cls.") + return replace(cls[key_sym], array=arr, spin=(s1, s2)) + + +def add_to_Cls(cls, x): + """ + Adds a dictionary of Cl values to another. + input: + Cls: dictionary of Cl values + x: dictionary of Cl values + returns: + Cls: updated dictionary of Cl values + """ + _cls = {} + for key in cls.keys(): + arr = cls[key].array + x[key] + _cls[key] = replace(cls[key], array=arr) + return _cls + + +def sub_to_Cls(cls, x): + """ + Substracts a dictionary of Cl values to another. + input: + Cls: dictionary of Cl values + x: dictionary of Cl values + returns: + Cls: updated dictionary of Cl values + """ + _cls = {} + for key in cls.keys(): + arr = cls[key].array - x[key] + _cls[key] = replace(cls[key], array=arr) + return _cls + + +def expand_spin0_dims(result): + """ + Expands spin-0 dimensions into axes of length 1. + """ + offset = 0 + shape = list(result.shape) + for i, s in enumerate(result.spin): + if s == 0: + shape.insert(i, 1) + offset += 1 + arr = result.array.reshape(*shape) + new_axes = tuple(a + offset for a in result.axis) + return replace(result, array=arr, axis=new_axes) + + +def squeeze_spin0_dims(result): + """ + Remove spin-0 dimensions. + """ + offset = 0 + shape = list(result.shape) + for i, s in enumerate(result.spin): + if s == 0: + dim = shape.pop(i - offset) + assert dim == 1, "found spin-0 axis of size != 1" + offset += 1 + arr = result.array.reshape(*shape) + new_axes = tuple(a - offset for a in result.axis) + return replace(result, array=arr, axis=new_axes) + + +def impose_correlation(cov_a, cov_b): + """ + Imposes the correlation of b to a. + input: + a: dictionary of covariance matrices + b: dictionary of covariance matrices + returns: + a: updated dictionary of covariance matrices + """ + cov_c = {} + for key in cov_a: + a = cov_a[key] + b = cov_b[key] + a_v = np.diagonal(a, axis1=-2, axis2=-1) + b_v = np.diagonal(b, axis1=-2, axis2=-1) + a_std = np.sqrt(a_v[..., None, :]) + b_std = np.sqrt(b_v[..., None, :]) + c = a * (b_std * np.swapaxes(b_std, -1, -2)) + c /= a_std * np.swapaxes(a_std, -1, -2) + cov_c[key] = replace(a, array=c) + return cov_c + + +def _flatten(result): + a = result.array + axis = len(result.axis) + if axis == 1: + s1, s2 = result.spin + dof1 = 1 if s1 == 0 else 2 + dof2 = 1 if s2 == 0 else 2 + ell = a.shape[-1] + b = a.reshape(dof1 * dof2, ell) + b = b.transpose(0, 1).reshape(dof1 * dof2 * ell) + elif axis == 2: + s1, s2, s3, s4 = result.spin + dof1 = 1 if s1 == 0 else 2 + dof2 = 1 if s2 == 0 else 2 + dof3 = 1 if s3 == 0 else 2 + dof4 = 1 if s4 == 0 else 2 + ell = a.shape[-1] + b = ( + a.reshape(dof1 * dof2, dof3 * dof4, ell, ell) + .transpose(0, 2, 1, 3) + .reshape(dof1 * dof2 * ell, dof3 * dof4 * ell) + ) + else: + raise NotImplementedError("Flattening for >2 axes not implemented yet.") + return b + + +def flatten(results, order=None): + # Flatten each block + blocks_dict = {} + for key, result in results.items(): + blocks_dict[key] = _flatten(result) + + # check that all results have the same length axis + axis = [len(result.axis) for result in results.values()] + axis = np.unique(axis) + if len(axis) != 1: + raise ValueError("All results must have the same length axis to flatten.") + else: + axis = axis[0] + + if axis == 1: + # Stack all blocks vertically + return np.concatenate(list(blocks_dict.values())) + elif axis == 2: + # Infer order if not provided + if order is None: + keys = list(blocks_dict.keys()) + keys = [(key[0], key[1], key[4], key[5]) for key in keys] + order = list(set(keys)) + + # Build row by row + block_rows = [] + for key_i in order: + row_blocks = [] + for key_j in order: + a1, b1, i1, j1 = key_i + a2, b2, i2, j2 = key_j + cov_key = (a1, b1, a2, b2, i1, j1, i2, j2) + block = blocks_dict.get((cov_key)) + if block is None: + # If only the transpose block exists, use it transposed + sym_cov_key = (a2, b2, a1, b1, i2, j2, i1, j1) + transpose_block = blocks_dict.get((sym_cov_key)) + if transpose_block is not None: + block = transpose_block.T + else: + raise KeyError(f"Missing block for {cov_key}") + row_blocks.append(block) + block_rows.append(row_blocks) + + # Use np.block to assemble the full covariance matrix + return np.block(block_rows) + else: + raise NotImplementedError("Flattening for axis != 2 not implemented yet.") From aebaf697ebc8d5a0ce35214e410c67ea46ecaa72 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 18:02:04 +0000 Subject: [PATCH 10/12] horrible PR --- heracles/dices/jackknife.py | 17 ++++++++++--- heracles/unmixing.py | 48 +++++++++++++++++++++++++++---------- tests/test_dices.py | 30 ++++++++++++++++------- tests/test_utils.py | 1 - 4 files changed, 71 insertions(+), 25 deletions(-) diff --git a/heracles/dices/jackknife.py b/heracles/dices/jackknife.py index 49698038..fb7db66f 100644 --- a/heracles/dices/jackknife.py +++ b/heracles/dices/jackknife.py @@ -57,7 +57,7 @@ def jackknife_cls(data_maps, vis_maps, jk_maps, fields, nd=1): _cls = get_cls(data_maps, jk_maps, fields, *regions) _cls_mm = get_cls(vis_maps, jk_maps, fields, *regions) # Mask correction - alphas = mask_correction(_cls_mm, mls0) + alphas = mask_correction(_cls_mm, mls0, fields) _cls = _natural_unmixing(_cls, alphas) # Bias correction _cls = correct_bias(_cls, jk_maps, fields, *regions) @@ -205,7 +205,7 @@ def correct_bias(cls, jkmaps, fields, jk=0, jk2=0): return cls -def mask_correction(Mljk, Mls0, options={}, rtol=0.2, smoothing=50): +def mask_correction(Mljk, Mls0, fields, options={}, rtol=0.2, smoothing=50): """ Internal method to compute the mask correction. input: @@ -214,8 +214,19 @@ def mask_correction(Mljk, Mls0, options={}, rtol=0.2, smoothing=50): returns: alpha: Mask correction factor """ + # inverse mapping of masks to fields + masks = {} + for key, field in fields.items(): + if field.mask is not None: + masks[field.mask] = key + + alphas = {} for key in list(Mljk.keys()): + a, b, i, j = key + # Get corresponding mask keys + a = masks[a] + b = masks[b] mljk = Mljk[key] mls0 = Mls0[key] # Transform to real space @@ -225,7 +236,7 @@ def mask_correction(Mljk, Mls0, options={}, rtol=0.2, smoothing=50): wmljk = wmljk.T[0] # Compute alpha alpha = wmljk / wmls0 - alphas[key] = alpha + alphas[(a, b, i, j)] = alpha corr_alphas = correct_correlation( alphas, options=options, diff --git a/heracles/unmixing.py b/heracles/unmixing.py index 3fb5bfdb..b5e762aa 100644 --- a/heracles/unmixing.py +++ b/heracles/unmixing.py @@ -19,6 +19,7 @@ import numpy as np from .result import truncated from .transforms import cl2corr, corr2cl +from .utils import get_cl try: from copy import replace @@ -59,22 +60,34 @@ def correct_correlation(wm, options={}, rtol=0.2, smoothing=50): return corr_wm -def natural_unmixing(d, m, options={}, rtol=0.2, smoothing=50): +def natural_unmixing(d, m, fields, options={}, rtol=0.2, smoothing=50): """ Natural unmixing of the data Cl. Args: d: data cls m: mask cls + fields: list of fields patch_hole: If True, apply the patch hole correction Returns: corr_d: Corrected Cl """ + # inverse mapping of masks to fields + masks = {} + for key, field in fields.items(): + if field.mask is not None: + masks[field.mask] = key + wm = {} m_keys = list(m.keys()) for m_key in m_keys: + a, b, i, j = m_key + # Get corresponding mask keys + a = masks[a] + b = masks[b] + # Transform to real space _m = m[m_key].array _wm = cl2corr(_m).T[0] - wm[m_key] = _wm + wm[(a, b, i, j)] = _wm wm = correct_correlation( wm, @@ -96,24 +109,33 @@ def _natural_unmixing(d, wm): corr_d: Corrected Cl """ corr_d = {} - d_keys = list(d.keys()) - wm_keys = list(wm.keys()) - for d_key, wm_key in zip(d_keys, wm_keys): - ell = d[d_key].ell + for key in list(d.keys()): + ell = d[key].ell if ell is None: - *_, lmax = d[d_key].shape + *_, lmax = d[key].shape else: lmax = ell[-1] + 1 - s1, s2 = d[d_key].spin - _d = np.atleast_2d(d[d_key]) - _wm = wm[wm_key] - lmax_mask = len(wm[wm_key]) + s1, s2 = d[key].spin + _d = np.atleast_2d(d[key]) + # Get corresponding mask correlation function + if key in wm: + _wm = wm[key] + else: + a, b, i, j = key + if a==b and (a, b, j, i) in wm: + _wm = wm[(a, b, j, i)] + elif (b, a, j, i) in wm: + _wm = wm[(b, a, j, i)] + else: + raise KeyError(f"Key {key} not found in mask correlation functions.") + + lmax_mask = len(_wm) # pad cls pad_width = [(0, 0)] * _d.ndim # no padding for other dims pad_width[-1] = (0, lmax_mask - lmax) # pad only last dim _d = np.pad(_d, pad_width, mode="constant", constant_values=0) # Grab metadata - dtype = d[d_key].array.dtype + dtype = d[key].array.dtype if (s1 != 0) and (s2 != 0): __d = np.array( [ @@ -157,7 +179,7 @@ def _natural_unmixing(d, wm): _corr_d = np.squeeze(_corr_d) # Add metadata back _corr_d = np.array(list(_corr_d), dtype=dtype) - corr_d[d_key] = replace(d[d_key], array=_corr_d) + corr_d[key] = replace(d[key], array=_corr_d) # truncate to lmax corr_d = truncated(corr_d, lmax) return corr_d diff --git a/tests/test_dices.py b/tests/test_dices.py index 7acfc3a7..de2892c0 100644 --- a/tests/test_dices.py +++ b/tests/test_dices.py @@ -91,15 +91,24 @@ def test_get_delete2_fsky(jk_maps, njk): assert alpha == pytest.approx(_alpha, rel=1e-1) -def test_mask_correction(cls0, mls0): +def test_mask_correction(cls0, mls0, fields): # test natural umixing + masks = {} + for key, field in fields.items(): + if field.mask is not None: + masks[field.mask] = key wm = {} m_keys = list(mls0.keys()) for m_key in m_keys: + a, b, i, j = m_key + # Get corresponding mask keys + a = masks[a] + b = masks[b] + # Transform to real space _m = mls0[m_key].array _wm = heracles.transforms.cl2corr(_m).T[0] - wm[m_key] = _wm - cls = heracles.unmixing.natural_unmixing(cls0, mls0) + wm[(a, b, i, j)] = _wm + cls = heracles.unmixing.natural_unmixing(cls0, mls0, fields) _cls = heracles.unmixing._natural_unmixing(cls0, wm) for key in list(cls0.keys()): cl = cls[key].array @@ -110,19 +119,24 @@ def test_mask_correction(cls0, mls0): wm = {} m_keys = list(mls0.keys()) for m_key in m_keys: + a, b, i, j = m_key + # Get corresponding mask keys + a = masks[a] + b = masks[b] + # Transform to real space _m = mls0[m_key].array _wm = heracles.transforms.cl2corr(_m).T[0] _wm = np.abs(_wm) _wm /= np.max(_wm) - wm[m_key] = _wm + wm[(a, b, i, j)] = _wm _wm = heracles.correct_correlation(wm, rtol=_wm) - for m_key in m_keys: - _w = wm[m_key] - __w = _wm[m_key] + for key in wm.keys(): + _w = wm[key] + __w = _wm[key] assert np.isclose(__w, _w).all() # test dices mask correction - alphas = dices.mask_correction(mls0, mls0) + alphas = dices.mask_correction(mls0, mls0, fields) _cls = heracles.unmixing._natural_unmixing(cls0, alphas) for key in list(cls0.keys()): cl = cls0[key].array diff --git a/tests/test_utils.py b/tests/test_utils.py index c36f76e9..a66be030 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,6 +1,5 @@ import numpy as np import heracles -import heracles.dices as dices def test_add_to_cls(): From 3895cf8f039839b92b8717236b43212e3d74f004 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 18:18:24 +0000 Subject: [PATCH 11/12] ruff --- heracles/dices/__init__.py | 2 +- heracles/dices/jackknife.py | 3 +- heracles/dices/shrinkage.py | 4 +- heracles/unmixing.py | 3 +- heracles/utils.py | 215 ------------------------------------ tests/test_dices.py | 6 +- tests/test_transforms.py | 2 +- tests/test_utils.py | 17 +-- 8 files changed, 18 insertions(+), 234 deletions(-) delete mode 100644 heracles/utils.py diff --git a/heracles/dices/__init__.py b/heracles/dices/__init__.py index 92cf85ec..f3a72604 100644 --- a/heracles/dices/__init__.py +++ b/heracles/dices/__init__.py @@ -58,7 +58,7 @@ shrinkage_factor, gaussian_covariance, ) -from ..utils import ( +from .utils import ( impose_correlation, get_cl, flatten, diff --git a/heracles/dices/jackknife.py b/heracles/dices/jackknife.py index fb7db66f..83407f3e 100644 --- a/heracles/dices/jackknife.py +++ b/heracles/dices/jackknife.py @@ -20,7 +20,7 @@ import itertools from copy import deepcopy from itertools import combinations -from ..utils import add_to_Cls, sub_to_Cls +from .utils import add_to_Cls, sub_to_Cls from ..core import update_metadata from ..result import Result, get_result_array from ..mapping import transform @@ -220,7 +220,6 @@ def mask_correction(Mljk, Mls0, fields, options={}, rtol=0.2, smoothing=50): if field.mask is not None: masks[field.mask] = key - alphas = {} for key in list(Mljk.keys()): a, b, i, j = key diff --git a/heracles/dices/shrinkage.py b/heracles/dices/shrinkage.py index 9aea834d..de0f118f 100644 --- a/heracles/dices/shrinkage.py +++ b/heracles/dices/shrinkage.py @@ -18,7 +18,7 @@ # License along with DICES. If not, see . import numpy as np import itertools -from ..utils import ( +from .utils import ( expand_spin0_dims, squeeze_spin0_dims, ) @@ -29,7 +29,7 @@ from .jackknife import ( bias, ) -from ..utils import ( +from .utils import ( add_to_Cls, impose_correlation, get_cl, diff --git a/heracles/unmixing.py b/heracles/unmixing.py index b5e762aa..7476d182 100644 --- a/heracles/unmixing.py +++ b/heracles/unmixing.py @@ -19,7 +19,6 @@ import numpy as np from .result import truncated from .transforms import cl2corr, corr2cl -from .utils import get_cl try: from copy import replace @@ -122,7 +121,7 @@ def _natural_unmixing(d, wm): _wm = wm[key] else: a, b, i, j = key - if a==b and (a, b, j, i) in wm: + if a == b and (a, b, j, i) in wm: _wm = wm[(a, b, j, i)] elif (b, a, j, i) in wm: _wm = wm[(b, a, j, i)] diff --git a/heracles/utils.py b/heracles/utils.py deleted file mode 100644 index 96a76fc9..00000000 --- a/heracles/utils.py +++ /dev/null @@ -1,215 +0,0 @@ -# DICES: Euclid code for harmonic-space statistics on the sphere -# -# Copyright (C) 2023-2024 Euclid Science Ground Segment -# -# This file is part of DICES. -# -# DICES is free software: you can redistribute it and/or modify it -# under the terms of the GNU Lesser General Public License as published -# by the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# DICES is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -# Lesser General Public License for more details. -# -# You should have received a copy of the GNU Lesser General Public -# License along with DICES. If not, see . -import numpy as np - -try: - from copy import replace -except ImportError: - # Python < 3.13 - from dataclasses import replace - - -def get_cl(key, cls): - """ - Internal method to get a Cl from a dictionary of Cls. - Check if the key exists if not tries to find the symmetric key. - input: - key: key of the Cl - cls: dictionary of Cls - returns: - cl: Cl - """ - if key in cls: - return cls[key] - else: - a, b, i, j = key - key_sym = (b, a, j, i) - if key_sym in cls: - arr = cls[key_sym].array - s1, s2 = cls[key_sym].spin - if s1 != 0 and s2 != 0: - arr = np.transpose(arr, axes=(1, 0, 2)) - # always transpose spins - s1, s2 = s2, s1 - else: - raise KeyError(f"Key {key} not found in Cls.") - return replace(cls[key_sym], array=arr, spin=(s1, s2)) - - -def add_to_Cls(cls, x): - """ - Adds a dictionary of Cl values to another. - input: - Cls: dictionary of Cl values - x: dictionary of Cl values - returns: - Cls: updated dictionary of Cl values - """ - _cls = {} - for key in cls.keys(): - arr = cls[key].array + x[key] - _cls[key] = replace(cls[key], array=arr) - return _cls - - -def sub_to_Cls(cls, x): - """ - Substracts a dictionary of Cl values to another. - input: - Cls: dictionary of Cl values - x: dictionary of Cl values - returns: - Cls: updated dictionary of Cl values - """ - _cls = {} - for key in cls.keys(): - arr = cls[key].array - x[key] - _cls[key] = replace(cls[key], array=arr) - return _cls - - -def expand_spin0_dims(result): - """ - Expands spin-0 dimensions into axes of length 1. - """ - offset = 0 - shape = list(result.shape) - for i, s in enumerate(result.spin): - if s == 0: - shape.insert(i, 1) - offset += 1 - arr = result.array.reshape(*shape) - new_axes = tuple(a + offset for a in result.axis) - return replace(result, array=arr, axis=new_axes) - - -def squeeze_spin0_dims(result): - """ - Remove spin-0 dimensions. - """ - offset = 0 - shape = list(result.shape) - for i, s in enumerate(result.spin): - if s == 0: - dim = shape.pop(i - offset) - assert dim == 1, "found spin-0 axis of size != 1" - offset += 1 - arr = result.array.reshape(*shape) - new_axes = tuple(a - offset for a in result.axis) - return replace(result, array=arr, axis=new_axes) - - -def impose_correlation(cov_a, cov_b): - """ - Imposes the correlation of b to a. - input: - a: dictionary of covariance matrices - b: dictionary of covariance matrices - returns: - a: updated dictionary of covariance matrices - """ - cov_c = {} - for key in cov_a: - a = cov_a[key] - b = cov_b[key] - a_v = np.diagonal(a, axis1=-2, axis2=-1) - b_v = np.diagonal(b, axis1=-2, axis2=-1) - a_std = np.sqrt(a_v[..., None, :]) - b_std = np.sqrt(b_v[..., None, :]) - c = a * (b_std * np.swapaxes(b_std, -1, -2)) - c /= a_std * np.swapaxes(a_std, -1, -2) - cov_c[key] = replace(a, array=c) - return cov_c - - -def _flatten(result): - a = result.array - axis = len(result.axis) - if axis == 1: - s1, s2 = result.spin - dof1 = 1 if s1 == 0 else 2 - dof2 = 1 if s2 == 0 else 2 - ell = a.shape[-1] - b = a.reshape(dof1 * dof2, ell) - b = b.transpose(0, 1).reshape(dof1 * dof2 * ell) - elif axis == 2: - s1, s2, s3, s4 = result.spin - dof1 = 1 if s1 == 0 else 2 - dof2 = 1 if s2 == 0 else 2 - dof3 = 1 if s3 == 0 else 2 - dof4 = 1 if s4 == 0 else 2 - ell = a.shape[-1] - b = ( - a.reshape(dof1 * dof2, dof3 * dof4, ell, ell) - .transpose(0, 2, 1, 3) - .reshape(dof1 * dof2 * ell, dof3 * dof4 * ell) - ) - else: - raise NotImplementedError("Flattening for >2 axes not implemented yet.") - return b - - -def flatten(results, order=None): - # Flatten each block - blocks_dict = {} - for key, result in results.items(): - blocks_dict[key] = _flatten(result) - - # check that all results have the same length axis - axis = [len(result.axis) for result in results.values()] - axis = np.unique(axis) - if len(axis) != 1: - raise ValueError("All results must have the same length axis to flatten.") - else: - axis = axis[0] - - if axis == 1: - # Stack all blocks vertically - return np.concatenate(list(blocks_dict.values())) - elif axis == 2: - # Infer order if not provided - if order is None: - keys = list(blocks_dict.keys()) - keys = [(key[0], key[1], key[4], key[5]) for key in keys] - order = list(set(keys)) - - # Build row by row - block_rows = [] - for key_i in order: - row_blocks = [] - for key_j in order: - a1, b1, i1, j1 = key_i - a2, b2, i2, j2 = key_j - cov_key = (a1, b1, a2, b2, i1, j1, i2, j2) - block = blocks_dict.get((cov_key)) - if block is None: - # If only the transpose block exists, use it transposed - sym_cov_key = (a2, b2, a1, b1, i2, j2, i1, j1) - transpose_block = blocks_dict.get((sym_cov_key)) - if transpose_block is not None: - block = transpose_block.T - else: - raise KeyError(f"Missing block for {cov_key}") - row_blocks.append(block) - block_rows.append(row_blocks) - - # Use np.block to assemble the full covariance matrix - return np.block(block_rows) - else: - raise NotImplementedError("Flattening for axis != 2 not implemented yet.") diff --git a/tests/test_dices.py b/tests/test_dices.py index de2892c0..87989330 100644 --- a/tests/test_dices.py +++ b/tests/test_dices.py @@ -281,7 +281,7 @@ def test_shrinkage(cov_jk): def test_flatten_cls(nside, cls0): - from heracles.utils import _flatten, flatten + from heracles.dices.utils import _flatten, flatten # Check that the individual blocks are flattened correctly for key in cls0.keys(): @@ -299,7 +299,7 @@ def test_flatten_cls(nside, cls0): def test_flatten_cov(nside, cov_jk): - from heracles.utils import _flatten, flatten + from heracles.dices.utils import _flatten, flatten # Check that the individual blocks are flattened correctly for key in cov_jk.keys(): @@ -334,7 +334,7 @@ def test_gauss_cov(cls0, cov_jk): # We want to undo the bias that we will add later # for an easy check bias = dices.jackknife.bias(_cls0) - _cls0 = heracles.utils.sub_to_Cls(_cls0, bias) + _cls0 = dices.utils.sub_to_Cls(_cls0, bias) # Compute Gaussian covariance gauss_cov = dices.gaussian_covariance(_cls0) diff --git a/tests/test_transforms.py b/tests/test_transforms.py index 5e9b9a55..e74cf0f3 100644 --- a/tests/test_transforms.py +++ b/tests/test_transforms.py @@ -3,7 +3,7 @@ def test_cl_transform(cls0): - from heracles.utils import get_cl + from heracles.dices.utils import get_cl cl = np.array( [ diff --git a/tests/test_utils.py b/tests/test_utils.py index a66be030..20883890 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,5 +1,6 @@ import numpy as np import heracles +import heracles.dices as dices def test_add_to_cls(): @@ -7,8 +8,8 @@ def test_add_to_cls(): cls[("P", "P", 1, 1)] = heracles.Result(np.ones(10)) x = {} x[("P", "P", 1, 1)] = -1.0 - _cls = heracles.utils.add_to_Cls(cls, x) - __cls = heracles.utils.sub_to_Cls(_cls, x) + _cls = dices.utils.add_to_Cls(cls, x) + __cls = dices.utils.sub_to_Cls(_cls, x) for key in list(cls.keys()): assert np.all(_cls[key] == np.zeros(10)) assert np.all(cls[key].__array__() == __cls[key].__array__()) @@ -24,9 +25,9 @@ def test_get_cl(): np.array([[a, ab], [ba, a]]), spin=(2, 2) ) - cl = heracles.utils.get_cl(("POS", "SHE", 1, 1), cls) + cl = dices.utils.get_cl(("POS", "SHE", 1, 1), cls) assert np.all(cl == np.array([a, a])) - cl = heracles.utils.get_cl(("SHE", "SHE", 1, 2), cls) + cl = dices.utils.get_cl(("SHE", "SHE", 1, 2), cls) assert np.all(cl == np.array([[a, ba], [ab, a]])) @@ -36,11 +37,11 @@ def test_expand_squeeze_spin0_dims(cls0, cov_jk): s1, s2 = cl.spin dof1 = 1 if s1 == 0 else 2 dof2 = 1 if s2 == 0 else 2 - _cl = heracles.utils.expand_spin0_dims(cl) + _cl = dices.utils.expand_spin0_dims(cl) (_ax,) = _cl.axis assert _cl.shape == (dof1, dof2, cl.shape[-1]) assert _ax == 2 - __cl = heracles.utils.squeeze_spin0_dims(_cl) + __cl = dices.utils.squeeze_spin0_dims(_cl) assert np.all(cl.__array__() == __cl.__array__()) assert cl.axis == __cl.axis @@ -51,7 +52,7 @@ def test_expand_squeeze_spin0_dims(cls0, cov_jk): dof_b1 = 1 if sb1 == 0 else 2 dof_a2 = 1 if sa2 == 0 else 2 dof_b2 = 1 if sb2 == 0 else 2 - _cov = heracles.utils.expand_spin0_dims(cov) + _cov = dices.utils.expand_spin0_dims(cov) _ax1, _ax2 = _cov.axis assert _cov.shape == ( dof_a1, @@ -62,6 +63,6 @@ def test_expand_squeeze_spin0_dims(cls0, cov_jk): cov.shape[-1], ) assert (_ax1, _ax2) == (4, 5) - __cov = heracles.utils.squeeze_spin0_dims(_cov) + __cov = dices.utils.squeeze_spin0_dims(_cov) assert np.all(cov.__array__() == __cov.__array__()) assert cov.axis == __cov.axis From f10c97b535cb16dd1689b3267c4eebe4343b1c92 Mon Sep 17 00:00:00 2001 From: jaimerzp Date: Wed, 26 Nov 2025 18:19:05 +0000 Subject: [PATCH 12/12] missing file somehow --- heracles/dices/utils.py | 215 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 215 insertions(+) create mode 100644 heracles/dices/utils.py diff --git a/heracles/dices/utils.py b/heracles/dices/utils.py new file mode 100644 index 00000000..96a76fc9 --- /dev/null +++ b/heracles/dices/utils.py @@ -0,0 +1,215 @@ +# DICES: Euclid code for harmonic-space statistics on the sphere +# +# Copyright (C) 2023-2024 Euclid Science Ground Segment +# +# This file is part of DICES. +# +# DICES is free software: you can redistribute it and/or modify it +# under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# DICES is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with DICES. If not, see . +import numpy as np + +try: + from copy import replace +except ImportError: + # Python < 3.13 + from dataclasses import replace + + +def get_cl(key, cls): + """ + Internal method to get a Cl from a dictionary of Cls. + Check if the key exists if not tries to find the symmetric key. + input: + key: key of the Cl + cls: dictionary of Cls + returns: + cl: Cl + """ + if key in cls: + return cls[key] + else: + a, b, i, j = key + key_sym = (b, a, j, i) + if key_sym in cls: + arr = cls[key_sym].array + s1, s2 = cls[key_sym].spin + if s1 != 0 and s2 != 0: + arr = np.transpose(arr, axes=(1, 0, 2)) + # always transpose spins + s1, s2 = s2, s1 + else: + raise KeyError(f"Key {key} not found in Cls.") + return replace(cls[key_sym], array=arr, spin=(s1, s2)) + + +def add_to_Cls(cls, x): + """ + Adds a dictionary of Cl values to another. + input: + Cls: dictionary of Cl values + x: dictionary of Cl values + returns: + Cls: updated dictionary of Cl values + """ + _cls = {} + for key in cls.keys(): + arr = cls[key].array + x[key] + _cls[key] = replace(cls[key], array=arr) + return _cls + + +def sub_to_Cls(cls, x): + """ + Substracts a dictionary of Cl values to another. + input: + Cls: dictionary of Cl values + x: dictionary of Cl values + returns: + Cls: updated dictionary of Cl values + """ + _cls = {} + for key in cls.keys(): + arr = cls[key].array - x[key] + _cls[key] = replace(cls[key], array=arr) + return _cls + + +def expand_spin0_dims(result): + """ + Expands spin-0 dimensions into axes of length 1. + """ + offset = 0 + shape = list(result.shape) + for i, s in enumerate(result.spin): + if s == 0: + shape.insert(i, 1) + offset += 1 + arr = result.array.reshape(*shape) + new_axes = tuple(a + offset for a in result.axis) + return replace(result, array=arr, axis=new_axes) + + +def squeeze_spin0_dims(result): + """ + Remove spin-0 dimensions. + """ + offset = 0 + shape = list(result.shape) + for i, s in enumerate(result.spin): + if s == 0: + dim = shape.pop(i - offset) + assert dim == 1, "found spin-0 axis of size != 1" + offset += 1 + arr = result.array.reshape(*shape) + new_axes = tuple(a - offset for a in result.axis) + return replace(result, array=arr, axis=new_axes) + + +def impose_correlation(cov_a, cov_b): + """ + Imposes the correlation of b to a. + input: + a: dictionary of covariance matrices + b: dictionary of covariance matrices + returns: + a: updated dictionary of covariance matrices + """ + cov_c = {} + for key in cov_a: + a = cov_a[key] + b = cov_b[key] + a_v = np.diagonal(a, axis1=-2, axis2=-1) + b_v = np.diagonal(b, axis1=-2, axis2=-1) + a_std = np.sqrt(a_v[..., None, :]) + b_std = np.sqrt(b_v[..., None, :]) + c = a * (b_std * np.swapaxes(b_std, -1, -2)) + c /= a_std * np.swapaxes(a_std, -1, -2) + cov_c[key] = replace(a, array=c) + return cov_c + + +def _flatten(result): + a = result.array + axis = len(result.axis) + if axis == 1: + s1, s2 = result.spin + dof1 = 1 if s1 == 0 else 2 + dof2 = 1 if s2 == 0 else 2 + ell = a.shape[-1] + b = a.reshape(dof1 * dof2, ell) + b = b.transpose(0, 1).reshape(dof1 * dof2 * ell) + elif axis == 2: + s1, s2, s3, s4 = result.spin + dof1 = 1 if s1 == 0 else 2 + dof2 = 1 if s2 == 0 else 2 + dof3 = 1 if s3 == 0 else 2 + dof4 = 1 if s4 == 0 else 2 + ell = a.shape[-1] + b = ( + a.reshape(dof1 * dof2, dof3 * dof4, ell, ell) + .transpose(0, 2, 1, 3) + .reshape(dof1 * dof2 * ell, dof3 * dof4 * ell) + ) + else: + raise NotImplementedError("Flattening for >2 axes not implemented yet.") + return b + + +def flatten(results, order=None): + # Flatten each block + blocks_dict = {} + for key, result in results.items(): + blocks_dict[key] = _flatten(result) + + # check that all results have the same length axis + axis = [len(result.axis) for result in results.values()] + axis = np.unique(axis) + if len(axis) != 1: + raise ValueError("All results must have the same length axis to flatten.") + else: + axis = axis[0] + + if axis == 1: + # Stack all blocks vertically + return np.concatenate(list(blocks_dict.values())) + elif axis == 2: + # Infer order if not provided + if order is None: + keys = list(blocks_dict.keys()) + keys = [(key[0], key[1], key[4], key[5]) for key in keys] + order = list(set(keys)) + + # Build row by row + block_rows = [] + for key_i in order: + row_blocks = [] + for key_j in order: + a1, b1, i1, j1 = key_i + a2, b2, i2, j2 = key_j + cov_key = (a1, b1, a2, b2, i1, j1, i2, j2) + block = blocks_dict.get((cov_key)) + if block is None: + # If only the transpose block exists, use it transposed + sym_cov_key = (a2, b2, a1, b1, i2, j2, i1, j1) + transpose_block = blocks_dict.get((sym_cov_key)) + if transpose_block is not None: + block = transpose_block.T + else: + raise KeyError(f"Missing block for {cov_key}") + row_blocks.append(block) + block_rows.append(row_blocks) + + # Use np.block to assemble the full covariance matrix + return np.block(block_rows) + else: + raise NotImplementedError("Flattening for axis != 2 not implemented yet.")