From ae7e9b0cc7d6eeef9ea25e109417a95b64f3333a Mon Sep 17 00:00:00 2001 From: Nitin Tony Paul <108007300+nitintonypaul@users.noreply.github.com> Date: Thu, 22 Jan 2026 21:41:23 +0530 Subject: [PATCH 1/2] [ENH] Added `HierarchicalRiskParity` - Added HRP optimizer - fixed minor bug which returns portfolio weights reference instead of copying - Improved documentation with minimal import syntax - Updated README/Documentation --- README.md | 4 +- docs/docs/backtesting.md | 4 +- docs/docs/objectives/dro.md | 10 +- docs/docs/objectives/heuristics.md | 193 ++++++++++++++++- docs/docs/objectives/markowitz.md | 16 +- docs/docs/objectives/online_learning.md | 8 +- docs/docs/objectives/risk_measures.md | 26 +-- docs/docs/objectives/utility_theory.md | 20 +- docs/docs/regularization.md | 7 +- docs/docs/resources/references.md | 1 + opes/__init__.py | 2 +- opes/objectives/__init__.py | 1 + opes/objectives/base_optimizer.py | 2 +- opes/objectives/distributionally_robust.py | 20 +- opes/objectives/heuristics.py | 231 +++++++++++++++++++-- opes/objectives/markowitz.py | 24 +-- opes/objectives/online.py | 14 +- opes/objectives/risk_measures.py | 40 ++-- opes/objectives/utility_theory.py | 30 +-- opes/regularizer.py | 1 + pyproject.toml | 2 +- 21 files changed, 516 insertions(+), 140 deletions(-) diff --git a/README.md b/README.md index 5db42d7..c84b200 100644 --- a/README.md +++ b/README.md @@ -33,6 +33,7 @@ The information provided by OPES is for educational, research and informational | | Maximum Sharpe Ratio | | **Principled Heuristics** | Uniform (1/N) | | | Risk Parity | +| | Hierarchical Risk Parity | | | Inverse Volatility | | | Softmax Mean | | | Maximum Diversification | @@ -201,7 +202,6 @@ Also it eats up RAM like pac-man. These features are still in the works and may or may not appear in later updates: | **Objective Name (Category)** | -| ------------------------------------------------ | -| Hierarchical Risk Parity (Principled Heuristics) | +| ------------------------------------------------ | | Online Newton Step (Online Learning) | | ADA-BARRONS (Online Learning) | diff --git a/docs/docs/backtesting.md b/docs/docs/backtesting.md index 36a34ce..34aa631 100644 --- a/docs/docs/backtesting.md +++ b/docs/docs/backtesting.md @@ -252,8 +252,8 @@ def plot_wealth( OPES ships with a basic plotting utility for visualizing portfolio wealth over time. This method exists for quick inspection and debugging, not for deep performance analysis. -It visualizes cumulative wealth for one or multiple strategies using their periodic -returns. It also provides a breakeven reference line and optional saving of the plot to +It visualizes cumulative wealth for one or multiple strategies using their periodic +returns. It also provides a breakeven reference line and optional saving of the plot to a file. !!! tip "Recommendation:" diff --git a/docs/docs/objectives/dro.md b/docs/docs/objectives/dro.md index 38ac480..3490ade 100644 --- a/docs/docs/objectives/dro.md +++ b/docs/docs/objectives/dro.md @@ -132,7 +132,7 @@ $$ !!! example "Example:" ```python # Importing the dro Kelly module - from opes.objectives.distributionally_robust import KLRobustKelly + from opes.objectives import KLRobustKelly # Let this be your ticker data training_data = some_data() @@ -289,7 +289,7 @@ Uses the log-sum-exp technique to solve for numerical stability. !!! example "Example:" ```python # Importing the dro maximum mean module - from opes.objectives.distributionally_robust import KLRobustMaxMean + from opes.objectives import KLRobustMaxMean # Let this be your ticker data training_data = some_data() @@ -449,7 +449,7 @@ $$ !!! example "Example:" ```python # Importing the dro maximum mean module - from opes.objectives.distributionally_robust import WassRobustMaxMean + from opes.objectives import WassRobustMaxMean # Let this be your ticker data training_data = some_data() @@ -623,7 +623,7 @@ $$ !!! example "Example:" ```python # Importing the dro mean-variance module - from opes.objectives.distributionally_robust import WassRobustMeanVariance + from opes.objectives import WassRobustMeanVariance # Let this be your ticker data training_data = some_data() @@ -792,7 +792,7 @@ $$ !!! example "Example:" ```python # Importing the dro minimum variance module - from opes.objectives.distributionally_robust import WassRobustMinVariance + from opes.objectives import WassRobustMinVariance # Let this be your ticker data training_data = some_data() diff --git a/docs/docs/objectives/heuristics.md b/docs/docs/objectives/heuristics.md index edcf287..be77498 100644 --- a/docs/docs/objectives/heuristics.md +++ b/docs/docs/objectives/heuristics.md @@ -15,6 +15,179 @@ to outperform their theoretically optimal cousins outside the textbook. +## `HierarchicalRiskParity` + +```python +class HierarchicalRiskParity(cluster_method='average') +``` + +Hierarchical Risk Parity (HRP) optimization. + +Hierarchical Risk Parity (HRP), introduced by L≤pez de Prado, +is a portfolio construction methodology that allocates capital +through hierarchical clustering and recursive risk balancing +rather than direct optimization of a scalar objective. HRP +addresses several structural weaknesses of traditional +mean-variance and risk parity approaches, including sensitivity +to covariance matrix estimation error, numerical instability +arising from matrix inversion and over-concentration in highly +correlated assets. By organizing assets into a hierarchy based +on correlation structure and allocating weights inversely to +cluster-level variance, HRP achieves diversification across +both individual assets and correlated groups. This procedure +yields stable, fully invested portfolios that are robust +out-of-sample and well-suited for high-dimensional or noisy +return environments, particularly when covariance estimates are +ill-conditioned. + +**Args:** + +- `cluster_method` (*str, optional*): Clustering method to be used for hierarchical clustering. Defaults to `'average'`. Available methods are: + - '`average`': Merges clusters based on the minimum pairwise distance, often producing elongated, chain-like clusters. + - '`single'`: Merges clusters based on the maximum pairwise distance, favoring compact and tightly bound clusters. + - `'complete'`: Merges clusters using the average pairwise distance between all points in each cluster, providing a balanced compromise between single and complete linkage. + - '`ward`': Merges clusters to minimize the increase in within-cluster variance, producing balanced clusters aligned with variance-based portfolio construction. + +### Methods + +#### `clean_weights` + +```python +def clean_weights(threshold=1e-08) +``` + +Cleans the portfolio weights by setting very small positions to zero. + +Any weight whose absolute value is below the specified `threshold` is replaced with zero. +This helps remove negligible allocations while keeping the array structure intact. This method +requires portfolio optimization (`optimize()` method) to take place for `self.weights` to be +defined other than `None`. + +!!! warning "Warning:" + This method modifies the existing portfolio weights in place. After cleaning, re-optimization + is required to recover the original weights. + +**Args** + +- `threshold` (*float, optional*): Float specifying the minimum absolute weight to retain. Defaults to `1e-8`. + + +**Returns:** + +- `numpy.ndarray`: Cleaned and re-normalized portfolio weight vector. + +**Raises** + +- `PortfolioError`: If weights have not been calculated via optimization. + + +!!! note "Notes:" + - Weights are cleaned using absolute values, making this method compatible with long-short portfolios. + - Re-normalization ensures the portfolio remains properly scaled after cleaning. + - Increasing threshold promotes sparsity but may materially alter the portfolio composition. + +#### `optimize` + +```python +def optimize(data, custom_cov=None) +``` + +Computes the Hierarchical Risk Parity portfolio: + +$$ +\mathbf{w}_i \propto \frac{1}{\sigma^2_{\text{cluster}}} +$$ + +!!! note "Note" + Asset weight bounds are defaulted to (0,1). + +**Args** + +- `data` (*pd.DataFrame*): Ticker price data in either multi-index or single-index formats. Examples are given below: + ``` + # Single-Index Example + Ticker TSLA NVDA GME PFE AAPL ... + Date + 2015-01-02 14.620667 0.483011 6.288958 18.688917 24.237551 ... + 2015-01-05 14.006000 0.474853 6.460137 18.587513 23.554741 ... + 2015-01-06 14.085333 0.460456 6.268492 18.742599 23.556952 ... + 2015-01-07 14.063333 0.459257 6.195926 18.999102 23.887287 ... + 2015-01-08 14.041333 0.476533 6.268492 19.386841 24.805082 ... + ... + + # Multi-Index Example Structure (OHLCV) + Columns: + + Ticker (e.g. GME, PFE, AAPL, ...) + - Open + - High + - Low + - Close + - Volume + ``` +- `custom_cov` (*None or array-like of shape (n_assets, n_assets), optional*): Custom covariance matrix. Can be used to inject externally generated covariance matrices (eg. Ledoit-Wolf). Defaults to `None`. + + +**Returns:** + +- `np.ndarray`: Vector of optimized portfolio weights. + +**Raises** + +- `DataError`: For any data mismatch during integrity check. +- `PortfolioError`: For any invalid portfolio variable inputs during integrity check. + + +!!! example "Example:" + ```python + # Importing the HRP module + from opes.objectives import HierarchicalRiskParity as HRP + + # Let this be your ticker data + training_data = some_data() + + # Let this be your custom covariance matrix + cov_m = covMatrix() + + # Initialize with custom clustering method + hrp_portfolio = HRP(cluster_method='ward') + + # Optimize portfolio with custom covariance matrix + weights = hrp_portfolio.optimize(data=training_data, custom_cov=cov_m) + ``` + +#### `stats` + +```python +def stats() +``` + +Calculates and returns portfolio concentration and diversification statistics. + +These statistics help users to inspect portfolio's overall concentration in +allocation. For the method to work, the optimizer must have been initialized, i.e. +the `optimize()` method should have been called at least once for `self.weights` +to be defined other than `None`. + +**Returns:** + +- A `dict` containing the following keys: + - `'tickers'` (*list*): A list of tickers used for optimization. + - `'weights'` (*np.ndarry*): Portfolio weights, output from optimization. + - `'portfolio_entropy'` (*float*): Shannon entropy computed on portfolio weights. + - `'herfindahl_index'` (*float*): Herfindahl Index value, computed on portfolio weights. + - `'gini_coefficient'` (*float*): Gini Coefficient value, computed on portfolio weights. + - `'absolute_max_weight'` (*float*): Absolute maximum allocation for an asset. + +**Raises** + +- `PortfolioError`: If weights have not been calculated via optimization. + + +!!! note "Notes:" + - All statistics are computed on absolute normalized weights (within the simplex), ensuring compatibility with long-short portfolios. + - This method is diagnostic only and does not modify portfolio weights. + - For meaningful interpretation, use these metrics in conjunction with risk and performance measures. + ## `InverseVolatility` ```python @@ -125,7 +298,7 @@ $$ !!! example "Example:" ```python # Importing the Inverse Volatility Portfolio (IVP) module - from opes.objectives.heuristics import InverseVolatility as IVP + from opes.objectives import InverseVolatility as IVP # Let this be your ticker data training_data = some_data() @@ -297,7 +470,7 @@ $$ !!! example "Example:" ```python # Importing the maximum diversification module - from opes.objectives.heuristics import MaxDiversification + from opes.objectives import MaxDiversification # Let this be your ticker data training_data = some_data() @@ -333,7 +506,7 @@ initiating a new one. !!! example "Example:" ```python # Import the MaxDiversification class - from opes.objectives.heuristics import MaxDiversification + from opes.objectives import MaxDiversification # Set with 'entropy' regularization optimizer = MaxDiversification(reg='entropy', strength=0.01) @@ -511,12 +684,12 @@ $$ !!! example "Example:" ```python # Importing the REPO module - from opes.objectives.heuristics import REPO + from opes.objectives import REPO # Let this be your ticker data training_data = some_data() - # Let these be your custom mean vector + # Let this be your custom mean vector mean_v = customMean() # Initialize with custom regularization @@ -547,7 +720,7 @@ initiating a new one. !!! example "Example:" ```python # Import the REPO class - from opes.objectives.heuristics import REPO + from opes.objectives import REPO # Set with 'entropy' regularization optimizer = REPO(reg='entropy', strength=0.01) @@ -714,7 +887,7 @@ $$ !!! example "Example:" ```python # Importing the risk parity module - from opes.objectives.heuristics import RiskParity + from opes.objectives import RiskParity # Let this be your ticker data training_data = some_data() @@ -750,7 +923,7 @@ initiating a new one. !!! example "Example:" ```python # Import the RiskParity class - from opes.objectives.heuristics import RiskParity + from opes.objectives import RiskParity # Set with 'entropy' regularization optimizer = RiskParity(reg='entropy', strength=0.01) @@ -911,7 +1084,7 @@ $$ !!! example "Example:" ```python # Importing the softmax mean module - from opes.objectives.heuristics import SoftmaxMean + from opes.objectives import SoftmaxMean # Let this be your ticker data training_data = some_data() @@ -1070,7 +1243,7 @@ $$ !!! example "Example:" ```python # Importing the equal-weight module - from opes.objectives.heuristics import Uniform + from opes.objectives import Uniform # Let this be your ticker data training_data = some_data() diff --git a/docs/docs/objectives/markowitz.md b/docs/docs/objectives/markowitz.md index 405e015..1367729 100644 --- a/docs/docs/objectives/markowitz.md +++ b/docs/docs/objectives/markowitz.md @@ -133,7 +133,7 @@ $$ !!! example "Example:" ```python # Importing the maximum mean module - from opes.objectives.markowitz import MaxMean + from opes.objectives import MaxMean # Let this be your ticker data training_data = some_data() @@ -170,7 +170,7 @@ initiating a new one. !!! example "Example:" ```python # Import the MaxMean class - from opes.objectives.markowitz import MaxMean + from opes.objectives import MaxMean # Set with 'entropy' regularization optimizer = MaxMean(reg='entropy', strength=0.01) @@ -346,7 +346,7 @@ $$ !!! example "Example:" ```python # Importing the maximum sharpe module - from opes.objectives.markowitz import MaxSharpe + from opes.objectives import MaxSharpe # Let this be your ticker data training_data = some_data() @@ -384,7 +384,7 @@ initiating a new one. !!! example "Example:" ```python # Import the MaxSharpe class - from opes.objectives.markowitz import MaxSharpe + from opes.objectives import MaxSharpe # Set with 'entropy' regularization optimizer = MaxSharpe(reg='entropy', strength=0.01) @@ -552,7 +552,7 @@ $$ !!! example "Example:" ```python # Importing the mean variance module - from opes.objectives.markowitz import MeanVariance + from opes.objectives import MeanVariance # Let this be your ticker data training_data = some_data() @@ -589,7 +589,7 @@ initiating a new one. !!! example "Example:" ```python # Import the MeanVariance class - from opes.objectives.markowitz import MeanVariance + from opes.objectives import MeanVariance # Set with 'entropy' regularization optimizer = MeanVariance(reg='entropy', strength=0.01) @@ -759,7 +759,7 @@ $$ !!! example "Example:" ```python # Importing the Global Minimum Variance (GMV) module - from opes.objectives.markowitz import MinVariance + from opes.objectives import MinVariance # Let this be your ticker data training_data = some_data() @@ -796,7 +796,7 @@ initiating a new one. !!! example "Example:" ```python # Import the MinVariance class - from opes.objectives.markowitz import MinVariance + from opes.objectives import MinVariance # Set with 'entropy' regularization optimizer = MinVariance(reg='entropy', strength=0.01) diff --git a/docs/docs/objectives/online_learning.md b/docs/docs/objectives/online_learning.md index e833502..e37bce7 100644 --- a/docs/docs/objectives/online_learning.md +++ b/docs/docs/objectives/online_learning.md @@ -134,7 +134,7 @@ $$ !!! example "Example:" ```python # Importing the BCRP module - from opes.objectives.online import BCRP + from opes.objectives import BCRP # Let this be your ticker data training_data = some_data() @@ -170,7 +170,7 @@ Leader (FTRL) or other adaptive optimization procedures. !!! example "Example:" ```python # Import the BCRP class - from opes.objectives.online import BCRP + from opes.objectives import BCRP # Set with 'entropy' regularization ftrl = BCRP(reg='entropy', strength=0.01) @@ -330,7 +330,7 @@ For this implementation, we have taken the reward function $f_{t} = \log \left(1 !!! example "Example:" ```python # Importing the exponential gradient module - from opes.objectives.online import ExponentialGradient as EG + from opes.objectives import ExponentialGradient as EG # Let this be your ticker data training_data = some_data() @@ -506,7 +506,7 @@ With $\mathcal P \sim \text{Grid}(k)$ or $\mathcal P \sim \text{Dirichlet}(\alph !!! example "Example:" ```python # Importing the universal portfolios module - from opes.objectives.online import UniversalPortfolios as UP + from opes.objectives import UniversalPortfolios as UP # Let this be your ticker data training_data = some_data() diff --git a/docs/docs/objectives/risk_measures.md b/docs/docs/objectives/risk_measures.md index f3d7ecb..1af09e5 100644 --- a/docs/docs/objectives/risk_measures.md +++ b/docs/docs/objectives/risk_measures.md @@ -142,7 +142,7 @@ $$ !!! example "Example:" ```python # Importing the CVaR module - from opes.objectives.risk_measures import CVaR + from opes.objectives import CVaR # Let this be your ticker data training_data = some_data() @@ -175,7 +175,7 @@ initiating a new one. !!! example "Example:" ```python # Import the CVaR class - from opes.objectives.risk_measures import CVaR + from opes.objectives import CVaR # Set with 'entropy' regularization optimizer = CVaR(reg='entropy', strength=0.01) @@ -343,7 +343,7 @@ $$ !!! example "Example:" ```python # Importing the EVaR module - from opes.objectives.risk_measures import EVaR + from opes.objectives import EVaR # Let this be your ticker data training_data = some_data() @@ -376,7 +376,7 @@ initiating a new one. !!! example "Example:" ```python # Import the EVaR class - from opes.objectives.risk_measures import EVaR + from opes.objectives import EVaR # Set with 'entropy' regularization optimizer = EVaR(reg='entropy', strength=0.01) @@ -546,7 +546,7 @@ $$ !!! example "Example:" ```python # Importing the ERM module - from opes.objectives.risk_measures import EntropicRisk + from opes.objectives import EntropicRisk # Let this be your ticker data training_data = some_data() @@ -579,7 +579,7 @@ initiating a new one. !!! example "Example:" ```python # Import the EntropicRisk class - from opes.objectives.risk_measures import EntropicRisk + from opes.objectives import EntropicRisk # Set with 'entropy' regularization optimizer = EntropicRisk(reg='entropy', strength=0.01) @@ -753,7 +753,7 @@ $$ !!! example "Example:" ```python # Importing the Mean-CVaR module - from opes.objectives.risk_measures import MeanCVaR + from opes.objectives import MeanCVaR # Let this be your ticker data training_data = some_data() @@ -790,7 +790,7 @@ initiating a new one. !!! example "Example:" ```python # Import the MeanCVaR class - from opes.objectives.risk_measures import MeanCVaR + from opes.objectives import MeanCVaR # Set with 'entropy' regularization optimizer = MeanCVaR(reg='entropy', strength=0.01) @@ -966,7 +966,7 @@ $$ !!! example "Example:" ```python # Importing the Mean-EVaR module - from opes.objectives.risk_measures import MeanEVaR + from opes.objectives import MeanEVaR # Let this be your ticker data training_data = some_data() @@ -1003,7 +1003,7 @@ initiating a new one. !!! example "Example:" ```python # Import the MeanEVaR class - from opes.objectives.risk_measures import MeanEVaR + from opes.objectives import MeanEVaR # Set with 'entropy' regularization optimizer = MeanEVaR(reg='entropy', strength=0.01) @@ -1171,7 +1171,7 @@ $$ !!! example "Example:" ```python # Importing the VaR module - from opes.objectives.risk_measures import VaR + from opes.objectives import VaR # Let this be your ticker data training_data = some_data() @@ -1374,7 +1374,7 @@ $$ !!! example "Example:" ```python # Importing the worst-case loss module - from opes.objectives.risk_measures import WorstCaseLoss + from opes.objectives import WorstCaseLoss # Let this be your ticker data training_data = some_data() @@ -1407,7 +1407,7 @@ initiating a new one. !!! example "Example:" ```python # Import the WorstCaseLoss class - from opes.objectives.risk_measures import WorstCaseLoss + from opes.objectives import WorstCaseLoss # Set with 'entropy' regularization optimizer = WorstCaseLoss(reg='entropy', strength=0.01) diff --git a/docs/docs/objectives/utility_theory.md b/docs/docs/objectives/utility_theory.md index 674aa0e..8d885f3 100644 --- a/docs/docs/objectives/utility_theory.md +++ b/docs/docs/objectives/utility_theory.md @@ -126,7 +126,7 @@ $$ !!! example "Example:" ```python # Importing the CARA class - from opes.objectives.utility_theory import CARA + from opes.objectives import CARA # Let this be your ticker data training_data = some_data() @@ -159,7 +159,7 @@ initiating a new one. !!! example "Example:" ```python # Import the CARA class - from opes.objectives.utility_theory import CARA + from opes.objectives import CARA # Set with 'entropy' regularization optimizer = CARA(reg='entropy', strength=0.01) @@ -319,7 +319,7 @@ $$ !!! example "Example:" ```python # Importing the CRRA class - from opes.objectives.utility_theory import CRRA + from opes.objectives import CRRA # Let this be your ticker data training_data = some_data() @@ -352,7 +352,7 @@ initiating a new one. !!! example "Example:" ```python # Import the CRRA class - from opes.objectives.utility_theory import CRRA + from opes.objectives import CRRA # Set with 'entropy' regularization optimizer = CRRA(reg='entropy', strength=0.01) @@ -526,7 +526,7 @@ $$ !!! example "Example:" ```python # Importing the HARA class - from opes.objectives.utility_theory import HARA + from opes.objectives import HARA # Let this be your ticker data training_data = some_data() @@ -559,7 +559,7 @@ initiating a new one. !!! example "Example:" ```python # Import the HARA class - from opes.objectives.utility_theory import HARA + from opes.objectives import HARA # Set with 'entropy' regularization optimizer = HARA(reg='entropy', strength=0.01) @@ -720,7 +720,7 @@ $$ !!! example "Example:" ```python # Importing the kelly criterion module - from opes.objectives.utility_theory import Kelly + from opes.objectives import Kelly # Let this be your ticker data training_data = some_data() @@ -753,7 +753,7 @@ initiating a new one. !!! example "Example:" ```python # Import the Kelly Criterion class - from opes.objectives.utility_theory import Kelly + from opes.objectives import Kelly # Set with 'entropy' regularization optimizer = Kelly(reg='entropy', strength=0.01) @@ -912,7 +912,7 @@ $$ !!! example "Example:" ```python # Importing the Quadratic Utility class - from opes.objectives.utility_theory import QuadraticUtility as QU + from opes.objectives import QuadraticUtility as QU # Let this be your ticker data training_data = some_data() @@ -945,7 +945,7 @@ initiating a new one. !!! example "Example:" ```python # Import the Quadratic Utility class - from opes.objectives.utility_theory import QuadraticUtility + from opes.objectives import QuadraticUtility # Set with 'entropy' regularization optimizer = QuadraticUtility(reg='entropy', strength=0.01) diff --git a/docs/docs/regularization.md b/docs/docs/regularization.md index 81531a4..97ed296 100644 --- a/docs/docs/regularization.md +++ b/docs/docs/regularization.md @@ -29,10 +29,10 @@ where $R(\mathbf{w})$ encodes structural preferences over the weights $\mathbf{w | Wasserstein-1 Distance from Uniform Weights | `wass-1` | $\text{W}_{1}(\mathbf{w}, \mathbf{u})$ | !!! note "Notes" - - `l1` regularization is mainly used for long-short portfolios to encourage less extreme + - `l1` regularization is mainly used for long-short portfolios to encourage less extreme allocations to meet the net exposure of 1. Using it on long-only portfolios is redundant. - - For long-short portfolios, mathematically grounded regularizers such as `entropy`, `jsd` - and `wass-1` first normalize the weights and constrain them to the simplex before applying + - For long-short portfolios, mathematically grounded regularizers such as `entropy`, `jsd` + and `wass-1` first normalize the weights and constrain them to the simplex before applying the regularization, ensuring mathematical coherence is not violated. --- @@ -46,6 +46,7 @@ The following objectives do not support regularization: - `Uniform` - `InverseVolatility` - `SoftmaxMean` +- `HierarchicalRiskParity` - `UniversalPortfolios` - `ExponentialGradient` - `KLRobustMaxMean` diff --git a/docs/docs/resources/references.md b/docs/docs/resources/references.md index cf62ecc..6615b08 100644 --- a/docs/docs/resources/references.md +++ b/docs/docs/resources/references.md @@ -27,6 +27,7 @@ * **Choueifaty, Y., & Coignard, Y. (2008).** Toward Maximum Diversification. *Journal of Portfolio Management*, 35(1), 40–51. * **Mercurio, P. J., Wu, Y., & Xie, H. (2020).** An Entropy-Based Approach to Portfolio Optimization. *Entropy*, 22(3), 332. https://doi.org/10.3390/e22030332 * **Mercurio, P. J., Wu, Y., & Xie, H. (2020).** Portfolio Optimization for Binary Options Based on Relative Entropy. *Entropy*, 22(7), 752. https://doi.org/10.3390/e22070752 +* **López de Prado, Marcos**, Building Diversified Portfolios that Outperform Out-of-Sample (May 23, 2016). *The Journal of Portfolio Management Summer 2016*, 42 (4) 59 - 69. https://doi.org/10.3905/jpm.2016.42.4.059 --- diff --git a/opes/__init__.py b/opes/__init__.py index 1cfce80..14a6b0b 100644 --- a/opes/__init__.py +++ b/opes/__init__.py @@ -1,5 +1,5 @@ # Version Log -__version__ = "0.9.1" +__version__ = "0.10.0" # Backtester easy import from .backtester import Backtester diff --git a/opes/objectives/__init__.py b/opes/objectives/__init__.py index 12dbf18..e64837d 100644 --- a/opes/objectives/__init__.py +++ b/opes/objectives/__init__.py @@ -23,6 +23,7 @@ MaxDiversification, RiskParity, REPO, + HierarchicalRiskParity, ) # Online Portfolios diff --git a/opes/objectives/base_optimizer.py b/opes/objectives/base_optimizer.py index 9dad131..9a0a780 100644 --- a/opes/objectives/base_optimizer.py +++ b/opes/objectives/base_optimizer.py @@ -144,4 +144,4 @@ def clean_weights(self, threshold=1e-8): else: self.weights[np.abs(self.weights) < threshold] = 0 self.weights /= np.abs(self.weights).sum() - return self.weights + return self.weights.copy() diff --git a/opes/objectives/distributionally_robust.py b/opes/objectives/distributionally_robust.py index 1a62326..043a299 100644 --- a/opes/objectives/distributionally_robust.py +++ b/opes/objectives/distributionally_robust.py @@ -111,7 +111,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the dro maximum mean module - from opes.objectives.distributionally_robust import KLRobustMaxMean + from opes.objectives import KLRobustMaxMean # Let this be your ticker data training_data = some_data() @@ -147,7 +147,7 @@ def f(x): ) if result.success: self.weights = result.x[:-1] - return self.weights + return self.weights.copy() else: raise OptimizationError( f"KL robust maximum mean optimization failed: {result.message}" @@ -242,7 +242,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the dro Kelly module - from opes.objectives.distributionally_robust import KLRobustKelly + from opes.objectives import KLRobustKelly # Let this be your ticker data training_data = some_data() @@ -277,7 +277,7 @@ def f(x): ) if result.success: self.weights = result.x[:-1] - return self.weights + return self.weights.copy() else: raise OptimizationError( f"KL robust kelly criterion optimization failed: {result.message}" @@ -396,7 +396,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None, custom_mean=None): !!! example "Example:" ```python # Importing the dro maximum mean module - from opes.objectives.distributionally_robust import WassRobustMaxMean + from opes.objectives import WassRobustMaxMean # Let this be your ticker data training_data = some_data() @@ -431,7 +431,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Wasserstein Robust Maximum Mean optimization failed: {result.message}" @@ -564,7 +564,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None, custom_cov=None): !!! example "Example:" ```python # Importing the dro minimum variance module - from opes.objectives.distributionally_robust import WassRobustMinVariance + from opes.objectives import WassRobustMinVariance # Let this be your ticker data training_data = some_data() @@ -600,7 +600,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Wasserstein Robust Minimum Variance optimization failed: {result.message}" @@ -746,7 +746,7 @@ def optimize( !!! example "Example:" ```python # Importing the dro mean-variance module - from opes.objectives.distributionally_robust import WassRobustMeanVariance + from opes.objectives import WassRobustMeanVariance # Let this be your ticker data training_data = some_data() @@ -791,7 +791,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Wasserstein Robust Mean-Variance Optimization failed: {result.message}" diff --git a/opes/objectives/heuristics.py b/opes/objectives/heuristics.py index 421c540..bf85897 100644 --- a/opes/objectives/heuristics.py +++ b/opes/objectives/heuristics.py @@ -16,6 +16,8 @@ import numpy as np import pandas as pd from scipy.optimize import minimize, differential_evolution +from scipy.cluster.hierarchy import linkage, dendrogram +from scipy.spatial.distance import squareform from opes.objectives.base_optimizer import Optimizer from ..utils import extract_trim, test_integrity, find_constraint @@ -104,7 +106,7 @@ def optimize(self, data=None, n_assets=None, **kwargs): !!! example "Example:" ```python # Importing the equal-weight module - from opes.objectives.heuristics import Uniform + from opes.objectives import Uniform # Let this be your ticker data training_data = some_data() @@ -123,7 +125,7 @@ def optimize(self, data=None, n_assets=None, **kwargs): # Assigning weights and returning the same self.weights = np.array(np.ones(len(self.tickers)) / len(self.tickers)) - return self.weights + return self.weights.copy() class InverseVolatility(Optimizer): @@ -203,7 +205,7 @@ def optimize(self, data=None, **kwargs): !!! example "Example:" ```python # Importing the Inverse Volatility Portfolio (IVP) module - from opes.objectives.heuristics import InverseVolatility as IVP + from opes.objectives import InverseVolatility as IVP # Let this be your ticker data training_data = some_data() @@ -218,7 +220,7 @@ def optimize(self, data=None, **kwargs): # Preparing inputs for finding weights self._prepare_inputs(data) self.weights = (1 / self.volarray) / (1 / self.volarray).sum() - return self.weights + return self.weights.copy() class SoftmaxMean(Optimizer): @@ -306,7 +308,7 @@ def optimize(self, data=None, custom_mean=None, **kwargs): !!! example "Example:" ```python # Importing the softmax mean module - from opes.objectives.heuristics import SoftmaxMean + from opes.objectives import SoftmaxMean # Let this be your ticker data training_data = some_data() @@ -330,7 +332,7 @@ def optimize(self, data=None, custom_mean=None, **kwargs): self.mean / self.temperature - np.max(self.mean / self.temperature) ) self.weights /= self.weights.sum() - return self.weights + return self.weights.copy() class MaxDiversification(Optimizer): @@ -443,7 +445,7 @@ def optimize(self, data=None, custom_cov=None, seed=100, **kwargs): !!! example "Example:" ```python # Importing the maximum diversification module - from opes.objectives.heuristics import MaxDiversification + from opes.objectives import MaxDiversification # Let this be your ticker data training_data = some_data() @@ -476,7 +478,7 @@ def f(w): ) if result.success: self.weights = result.x / (result.x.sum() + 1e-12) - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Maximum diversification optimization failed: {result.message}" @@ -497,7 +499,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the MaxDiversification class - from opes.objectives.heuristics import MaxDiversification + from opes.objectives import MaxDiversification # Set with 'entropy' regularization optimizer = MaxDiversification(reg='entropy', strength=0.01) @@ -617,7 +619,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None, custom_cov=None): !!! example "Example:" ```python # Importing the risk parity module - from opes.objectives.heuristics import RiskParity + from opes.objectives import RiskParity # Let this be your ticker data training_data = some_data() @@ -655,7 +657,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Risk parity optimization failed: {result.message}" @@ -676,7 +678,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the RiskParity class - from opes.objectives.heuristics import RiskParity + from opes.objectives import RiskParity # Set with 'entropy' regularization optimizer = RiskParity(reg='entropy', strength=0.01) @@ -796,12 +798,12 @@ def optimize(self, data=None, bin=20, custom_mean=None, seed=100, **kwargs): !!! example "Example:" ```python # Importing the REPO module - from opes.objectives.heuristics import REPO + from opes.objectives import REPO # Let this be your ticker data training_data = some_data() - # Let these be your custom mean vector + # Let this be your custom mean vector mean_v = customMean() # Initialize with custom regularization @@ -841,7 +843,7 @@ def f(w): ) if result.success: self.weights = result.x / (result.x.sum() + 1e-12) - return self.weights + return self.weights.copy() else: raise OptimizationError(f"REPO optimization failed: {result.message}") @@ -860,7 +862,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the REPO class - from opes.objectives.heuristics import REPO + from opes.objectives import REPO # Set with 'entropy' regularization optimizer = REPO(reg='entropy', strength=0.01) @@ -877,3 +879,200 @@ def set_regularizer(self, reg=None, strength=1): """ self.reg = _find_regularizer(reg) self.strength = strength + + +class HierarchicalRiskParity(Optimizer): + """ + Hierarchical Risk Parity (HRP) optimization. + + Hierarchical Risk Parity (HRP), introduced by López de Prado, + is a portfolio construction methodology that allocates capital + through hierarchical clustering and recursive risk balancing + rather than direct optimization of a scalar objective. HRP + addresses several structural weaknesses of traditional + mean-variance and risk parity approaches, including sensitivity + to covariance matrix estimation error, numerical instability + arising from matrix inversion and over-concentration in highly + correlated assets. By organizing assets into a hierarchy based + on correlation structure and allocating weights inversely to + cluster-level variance, HRP achieves diversification across + both individual assets and correlated groups. This procedure + yields stable, fully invested portfolios that are robust + out-of-sample and well-suited for high-dimensional or noisy + return environments, particularly when covariance estimates are + ill-conditioned. + """ + + def __init__(self, cluster_method="average"): + """ + **Args:** + + - `cluster_method` (*str, optional*): Clustering method to be used for hierarchical clustering. Defaults to `'average'`. Available methods are: + - '`average`': Merges clusters based on the minimum pairwise distance, often producing elongated, chain-like clusters. + - '`single'`: Merges clusters based on the maximum pairwise distance, favoring compact and tightly bound clusters. + - `'complete'`: Merges clusters using the average pairwise distance between all points in each cluster, providing a balanced compromise between single and complete linkage. + - '`ward`': Merges clusters to minimize the increase in within-cluster variance, producing balanced clusters aligned with variance-based portfolio construction. + """ + self.cluster_method = cluster_method.lower() + self.covariance = None + self.correlation = None + + self.weights = None + self.tickers = None + + # Helper method to prepare inputs for optimization + # Also validates parameters and checks for data inconsistency + def _prepare_optimization_inputs(self, data, custom_cov): + + # Checking for cluster method validity + if self.cluster_method not in ["single", "average", "complete", "ward"]: + raise PortfolioError(f"Unknown cluster_method: {self.cluster_method}") + + # Obtaining return and ticker data + self.tickers, data = extract_trim(data) + + # Computing correlation matrix + self.correlation = np.corrcoef(data, rowvar=False) + + # Checking for covariance and weights and assigning optimization data accordingly + if custom_cov is None: + # Invertibility need not be handled as usual since HRP does not invert covariance + self.covariance = np.cov(data, rowvar=False) + else: + self.covariance = custom_cov + # We start with all assets set to 1 + # This is different from the usual equal weight since HRP allocates step by step + # But is initialized as 1 before allocation + self.weights = np.ones(len(self.tickers)) + + # Functions to test data integrity and find optimization constraint + test_integrity(tickers=self.tickers, weights=self.weights, cov=self.covariance) + + # Function to get leaf order of dendogram + def _get_leaf_order(self, condensed_distance_matrix): + clustered_linkage = linkage( + condensed_distance_matrix, method=self.cluster_method + ) + cluster_dendogram = dendrogram(clustered_linkage, no_plot=True) + return np.array(cluster_dendogram["leaves"]) + + # Function to get cluster variance + def _get_cluster_variance(self, cluster): + # Slicing covariance which is required for the cluster + cov_slice = self.covariance[np.ix_(cluster, cluster)] + + # Finding variance (diagonal elements) + diag = np.diag(cov_slice) + + # Computing cluster variance + # NOTE: 1e-12 added for numerical stability + inv_diag = 1.0 / (diag + 1e-12) + return 1.0 / inv_diag.sum() + + # 'Recursive' bisection function + # NOTE: while loop + queue is used for stability + def _recursive_bisection(self, leaf_order): + clusters = [leaf_order] + + # Instead of recursion, which might hit a stack limit in python, we use a while loop and a queue + # Not a queue exactly, but we similate one using pop(0) and append() + while clusters: + cluster = clusters.pop(0) + + # Checking if cluster is a single element or none + if len(cluster) <= 1: + continue + + # Splitting clusters into left and right subsections + split_var = len(cluster) // 2 + left_cluster = cluster[:split_var] + right_cluster = cluster[split_var:] + + # Obtaining cluster variance for left and right + left_variance = self._get_cluster_variance(left_cluster) + right_variance = self._get_cluster_variance(right_cluster) + + # Computing left weights (alpha) + alpha = 1 - left_variance / (left_variance + right_variance) + + # Assigning weights to left and right clusters respectively + self.weights[left_cluster] *= alpha + self.weights[right_cluster] *= 1 - alpha + + # Appending for recursion + clusters.append(left_cluster) + clusters.append(right_cluster) + + self.weights = self.weights / self.weights.sum() + + def optimize(self, data, custom_cov=None): + """ + Computes the Hierarchical Risk Parity portfolio: + + $$ + \\mathbf{w}_i \\propto \\frac{1}{\\sigma^2_{\\text{cluster}}} + $$ + + !!! note "Note" + Asset weight bounds are defaulted to (0,1). + + Args: + data (*pd.DataFrame*): Ticker price data in either multi-index or single-index formats. Examples are given below: + ``` + # Single-Index Example + Ticker TSLA NVDA GME PFE AAPL ... + Date + 2015-01-02 14.620667 0.483011 6.288958 18.688917 24.237551 ... + 2015-01-05 14.006000 0.474853 6.460137 18.587513 23.554741 ... + 2015-01-06 14.085333 0.460456 6.268492 18.742599 23.556952 ... + 2015-01-07 14.063333 0.459257 6.195926 18.999102 23.887287 ... + 2015-01-08 14.041333 0.476533 6.268492 19.386841 24.805082 ... + ... + + # Multi-Index Example Structure (OHLCV) + Columns: + + Ticker (e.g. GME, PFE, AAPL, ...) + - Open + - High + - Low + - Close + - Volume + ``` + `custom_cov` (*None or array-like of shape (n_assets, n_assets), optional*): Custom covariance matrix. Can be used to inject externally generated covariance matrices (eg. Ledoit-Wolf). Defaults to `None`. + + **Returns:** + + - `np.ndarray`: Vector of optimized portfolio weights. + + Raises: + DataError: For any data mismatch during integrity check. + PortfolioError: For any invalid portfolio variable inputs during integrity check. + + !!! example "Example:" + ```python + # Importing the HRP module + from opes.objectives import HierarchicalRiskParity as HRP + + # Let this be your ticker data + training_data = some_data() + + # Let this be your custom covariance matrix + cov_m = covMatrix() + + # Initialize with custom clustering method + hrp_portfolio = HRP(cluster_method='ward') + + # Optimize portfolio with custom covariance matrix + weights = hrp_portfolio.optimize(data=training_data, custom_cov=cov_m) + ``` + """ + # Preparing inputs for HRP optimization + self._prepare_optimization_inputs(data, custom_cov=custom_cov) + + # Computing distance matrix and condensing it for leaf order + distance_matrix = np.sqrt(0.5 * (1 - self.correlation)) + condensed_distance = squareform(distance_matrix, checks=False) + leaf_order = self._get_leaf_order(condensed_distance) + + self._recursive_bisection(leaf_order) + return self.weights.copy() diff --git a/opes/objectives/markowitz.py b/opes/objectives/markowitz.py index 56785c5..49575a0 100644 --- a/opes/objectives/markowitz.py +++ b/opes/objectives/markowitz.py @@ -112,7 +112,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None, custom_mean=None): !!! example "Example:" ```python # Importing the maximum mean module - from opes.objectives.markowitz import MaxMean + from opes.objectives import MaxMean # Let this be your ticker data training_data = some_data() @@ -148,7 +148,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Maximum mean optimization failed: {result.message}" @@ -169,7 +169,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the MaxMean class - from opes.objectives.markowitz import MaxMean + from opes.objectives import MaxMean # Set with 'entropy' regularization optimizer = MaxMean(reg='entropy', strength=0.01) @@ -292,7 +292,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None, custom_cov=None): !!! example "Example:" ```python # Importing the Global Minimum Variance (GMV) module - from opes.objectives.markowitz import MinVariance + from opes.objectives import MinVariance # Let this be your ticker data training_data = some_data() @@ -326,7 +326,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Global minimum optimization failed: {result.message}" @@ -347,7 +347,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the MinVariance class - from opes.objectives.markowitz import MinVariance + from opes.objectives import MinVariance # Set with 'entropy' regularization optimizer = MinVariance(reg='entropy', strength=0.01) @@ -474,7 +474,7 @@ def optimize( !!! example "Example:" ```python # Importing the mean variance module - from opes.objectives.markowitz import MeanVariance + from opes.objectives import MeanVariance # Let this be your ticker data training_data = some_data() @@ -514,7 +514,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Mean variance optimization failed: {result.message}" @@ -535,7 +535,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the MeanVariance class - from opes.objectives.markowitz import MeanVariance + from opes.objectives import MeanVariance # Set with 'entropy' regularization optimizer = MeanVariance(reg='entropy', strength=0.01) @@ -662,7 +662,7 @@ def optimize( !!! example "Example:" ```python # Importing the maximum sharpe module - from opes.objectives.markowitz import MaxSharpe + from opes.objectives import MaxSharpe # Let this be your ticker data training_data = some_data() @@ -701,7 +701,7 @@ def f(w): ) if result.success: self.weights = result.x / (result.x.sum() + 1e-12) - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Maximum sharpe optimization failed: {result.message}" @@ -722,7 +722,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the MaxSharpe class - from opes.objectives.markowitz import MaxSharpe + from opes.objectives import MaxSharpe # Set with 'entropy' regularization optimizer = MaxSharpe(reg='entropy', strength=0.01) diff --git a/opes/objectives/online.py b/opes/objectives/online.py index 3d2c330..1eb237b 100644 --- a/opes/objectives/online.py +++ b/opes/objectives/online.py @@ -159,7 +159,7 @@ def optimize(self, data=None, seed=100, **kwargs): !!! example "Example:" ```python # Importing the universal portfolios module - from opes.objectives.online import UniversalPortfolios as UP + from opes.objectives import UniversalPortfolios as UP # Let this be your ticker data training_data = some_data() @@ -192,7 +192,7 @@ def optimize(self, data=None, seed=100, **kwargs): # Normalization and finding optimal weights self.weights = wealth_weighted_portfolio / portfolio_wealths.sum() - return self.weights + return self.weights.copy() class BCRP(Optimizer): @@ -280,7 +280,7 @@ def optimize(self, data=None, w=None): !!! example "Example:" ```python # Importing the BCRP module - from opes.objectives.online import BCRP + from opes.objectives import BCRP # Let this be your ticker data training_data = some_data() @@ -310,7 +310,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError(f"BCRP optimization failed: {result.message}") @@ -330,7 +330,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the BCRP class - from opes.objectives.online import BCRP + from opes.objectives import BCRP # Set with 'entropy' regularization ftrl = BCRP(reg='entropy', strength=0.01) @@ -431,7 +431,7 @@ def optimize(self, data=None, w=None): !!! example "Example:" ```python # Importing the exponential gradient module - from opes.objectives.online import ExponentialGradient as EG + from opes.objectives import ExponentialGradient as EG # Let this be your ticker data training_data = some_data() @@ -464,4 +464,4 @@ def optimize(self, data=None, w=None): new_weights = np.exp(log_w) self.weights = new_weights / new_weights.sum() - return self.weights + return self.weights.copy() diff --git a/opes/objectives/risk_measures.py b/opes/objectives/risk_measures.py index 060c291..dabe8eb 100644 --- a/opes/objectives/risk_measures.py +++ b/opes/objectives/risk_measures.py @@ -123,7 +123,7 @@ def optimize(self, data=None, seed=100, **kwargs): !!! example "Example:" ```python # Importing the VaR module - from opes.objectives.risk_measures import VaR + from opes.objectives import VaR # Let this be your ticker data training_data = some_data() @@ -153,7 +153,7 @@ def f(w): ) if result.success: self.weights = result.x / (result.x.sum() + 1e-12) - return self.weights + return self.weights.copy() else: raise OptimizationError(f"VaR optimization failed: {result.message}") @@ -285,7 +285,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the CVaR module - from opes.objectives.risk_measures import CVaR + from opes.objectives import CVaR # Let this be your ticker data training_data = some_data() @@ -321,7 +321,7 @@ def f(x): ) if result.success: self.weights = result.x[:-1] - return self.weights + return self.weights.copy() else: raise OptimizationError(f"CVaR optimization failed: {result.message}") @@ -340,7 +340,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the CVaR class - from opes.objectives.risk_measures import CVaR + from opes.objectives import CVaR # Set with 'entropy' regularization optimizer = CVaR(reg='entropy', strength=0.01) @@ -455,7 +455,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None, custom_mean=None): !!! example "Example:" ```python # Importing the Mean-CVaR module - from opes.objectives.risk_measures import MeanCVaR + from opes.objectives import MeanCVaR # Let this be your ticker data training_data = some_data() @@ -502,7 +502,7 @@ def f(x): ) if result.success: self.weights = result.x[:-1] - return self.weights + return self.weights.copy() else: raise OptimizationError(f"Mean CVaR optimization failed: {result.message}") @@ -521,7 +521,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the MeanCVaR class - from opes.objectives.risk_measures import MeanCVaR + from opes.objectives import MeanCVaR # Set with 'entropy' regularization optimizer = MeanCVaR(reg='entropy', strength=0.01) @@ -636,7 +636,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the EVaR module - from opes.objectives.risk_measures import EVaR + from opes.objectives import EVaR # Let this be your ticker data training_data = some_data() @@ -673,7 +673,7 @@ def f(x): ) if result.success: self.weights = result.x[:-1] - return self.weights + return self.weights.copy() else: raise OptimizationError(f"EVaR optimization failed: {result.message}") @@ -692,7 +692,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the EVaR class - from opes.objectives.risk_measures import EVaR + from opes.objectives import EVaR # Set with 'entropy' regularization optimizer = EVaR(reg='entropy', strength=0.01) @@ -808,7 +808,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None, custom_mean=None): !!! example "Example:" ```python # Importing the Mean-EVaR module - from opes.objectives.risk_measures import MeanEVaR + from opes.objectives import MeanEVaR # Let this be your ticker data training_data = some_data() @@ -855,7 +855,7 @@ def f(x): ) if result.success: self.weights = result.x[:-1] - return self.weights + return self.weights.copy() else: raise OptimizationError(f"Mean EVaR optimization failed: {result.message}") @@ -874,7 +874,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the MeanEVaR class - from opes.objectives.risk_measures import MeanEVaR + from opes.objectives import MeanEVaR # Set with 'entropy' regularization optimizer = MeanEVaR(reg='entropy', strength=0.01) @@ -993,7 +993,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the ERM module - from opes.objectives.risk_measures import EntropicRisk + from opes.objectives import EntropicRisk # Let this be your ticker data training_data = some_data() @@ -1026,7 +1026,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Entropic risk metric optimization failed: {result.message}" @@ -1047,7 +1047,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the EntropicRisk class - from opes.objectives.risk_measures import EntropicRisk + from opes.objectives import EntropicRisk # Set with 'entropy' regularization optimizer = EntropicRisk(reg='entropy', strength=0.01) @@ -1160,7 +1160,7 @@ def optimize(self, data=None, seed=100, **kwargs): !!! example "Example:" ```python # Importing the worst-case loss module - from opes.objectives.risk_measures import WorstCaseLoss + from opes.objectives import WorstCaseLoss # Let this be your ticker data training_data = some_data() @@ -1190,7 +1190,7 @@ def f(w): ) if result.success: self.weights = result.x / (result.x.sum() + 1e-12) - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Worst-Case Loss optimization failed: {result.message}" @@ -1211,7 +1211,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the WorstCaseLoss class - from opes.objectives.risk_measures import WorstCaseLoss + from opes.objectives import WorstCaseLoss # Set with 'entropy' regularization optimizer = WorstCaseLoss(reg='entropy', strength=0.01) diff --git a/opes/objectives/utility_theory.py b/opes/objectives/utility_theory.py index 51d7eec..0ee7c51 100644 --- a/opes/objectives/utility_theory.py +++ b/opes/objectives/utility_theory.py @@ -109,7 +109,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the kelly criterion module - from opes.objectives.utility_theory import Kelly + from opes.objectives import Kelly # Let this be your ticker data training_data = some_data() @@ -140,7 +140,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Kelly criterion optimization failed: {result.message}" @@ -161,7 +161,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the Kelly Criterion class - from opes.objectives.utility_theory import Kelly + from opes.objectives import Kelly # Set with 'entropy' regularization optimizer = Kelly(reg='entropy', strength=0.01) @@ -262,7 +262,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the Quadratic Utility class - from opes.objectives.utility_theory import QuadraticUtility as QU + from opes.objectives import QuadraticUtility as QU # Let this be your ticker data training_data = some_data() @@ -295,7 +295,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError( f"Quadratic utility optimization failed: {result.message}" @@ -316,7 +316,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the Quadratic Utility class - from opes.objectives.utility_theory import QuadraticUtility + from opes.objectives import QuadraticUtility # Set with 'entropy' regularization optimizer = QuadraticUtility(reg='entropy', strength=0.01) @@ -425,7 +425,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the CARA class - from opes.objectives.utility_theory import CARA + from opes.objectives import CARA # Let this be your ticker data training_data = some_data() @@ -458,7 +458,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError(f"CARA optimization failed: {result.message}") @@ -477,7 +477,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the CARA class - from opes.objectives.utility_theory import CARA + from opes.objectives import CARA # Set with 'entropy' regularization optimizer = CARA(reg='entropy', strength=0.01) @@ -585,7 +585,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the CRRA class - from opes.objectives.utility_theory import CRRA + from opes.objectives import CRRA # Let this be your ticker data training_data = some_data() @@ -618,7 +618,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError(f"CRRA optimization failed: {result.message}") @@ -637,7 +637,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the CRRA class - from opes.objectives.utility_theory import CRRA + from opes.objectives import CRRA # Set with 'entropy' regularization optimizer = CRRA(reg='entropy', strength=0.01) @@ -759,7 +759,7 @@ def optimize(self, data=None, weight_bounds=(0, 1), w=None): !!! example "Example:" ```python # Importing the HARA class - from opes.objectives.utility_theory import HARA + from opes.objectives import HARA # Let this be your ticker data training_data = some_data() @@ -793,7 +793,7 @@ def f(w): ) if result.success: self.weights = result.x - return self.weights + return self.weights.copy() else: raise OptimizationError(f"HARA optimization failed: {result.message}") @@ -812,7 +812,7 @@ def set_regularizer(self, reg=None, strength=1): !!! example "Example:" ```python # Import the HARA class - from opes.objectives.utility_theory import HARA + from opes.objectives import HARA # Set with 'entropy' regularization optimizer = HARA(reg='entropy', strength=0.01) diff --git a/opes/regularizer.py b/opes/regularizer.py index 8560da9..0245a44 100644 --- a/opes/regularizer.py +++ b/opes/regularizer.py @@ -45,6 +45,7 @@ - `Uniform` - `InverseVolatility` - `SoftmaxMean` +- `HierarchicalRiskParity` - `UniversalPortfolios` - `ExponentialGradient` - `KLRobustMaxMean` diff --git a/pyproject.toml b/pyproject.toml index bfed9d6..106cdef 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,7 +10,7 @@ dev = ["pytest", "other-dev-packages"] [project] name = "opes" -version = "0.9.1" +version = "0.10.0" description = "A research-focused portfolio optimization and backtesting engine." readme = "README.md" requires-python = ">=3.10" From 6293a4f2ef1aaebc6f80c8670f6ec183e8445c1c Mon Sep 17 00:00:00 2001 From: Nitin Tony Paul <108007300+nitintonypaul@users.noreply.github.com> Date: Thu, 22 Jan 2026 23:44:41 +0530 Subject: [PATCH 2/2] [MNT] Improving documentation Improving docstrings and markdown documentation website. --- README.md | 6 +-- docs/docs/index.md | 4 +- docs/docs/installation.md | 85 +++++++++++++++++++++++++++------------ 3 files changed, 65 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index c84b200..a5c68d9 100644 --- a/README.md +++ b/README.md @@ -107,7 +107,7 @@ Alternatively, you are also welcome to install directly from the GitHub reposito ```bash git clone https://github.com/opes-core/opes.git -cd opes +cd opes-main pip install . ``` @@ -163,7 +163,7 @@ asset_data = yf.download( kelly_portfolio = Kelly(fraction=0.8, reg="l2", strength=0.001) # Compute portfolio weights with custom weight bounds -kelly_portfolio.optimize(data, weight_bounds=(0.05, 0.8)) +kelly_portfolio.optimize(asset_data, weight_bounds=(0.05, 0.8)) # Clean negligible allocations cleaned_weights = kelly_portfolio.clean_weights(threshold=1e-6) @@ -172,7 +172,7 @@ cleaned_weights = kelly_portfolio.clean_weights(threshold=1e-6) print(cleaned_weights) ``` -This showcases the simplicty of the module. However there are far more diverse features you can still explore. If you're looking for more examples, preferably some of them with *"context"*, I recommend you check out the [examples](https://opes.pages.dev/examples/good_strategy/) page within the documentation. +This showcases the simplicity of the module. However there are far more diverse features you can still explore. If you're looking for more examples, preferably some of them with *"context"*, I recommend you check out the [examples](https://opes.pages.dev/examples/good_strategy/) page within the documentation. --- diff --git a/docs/docs/index.md b/docs/docs/index.md index 5e07ebc..ae1ceb8 100644 --- a/docs/docs/index.md +++ b/docs/docs/index.md @@ -23,7 +23,7 @@ OPES is a research-oriented and experimentation-focused Python module for portfo !!! example "Demo" ```python # Demonstration of portfolio optimization using the Kelly Criterion - # 'data' represents OHLCV market data grouped by ticker symbols + # `return_data` represents OHLCV market data grouped by ticker symbols from opes.objectives import Kelly @@ -31,7 +31,7 @@ OPES is a research-oriented and experimentation-focused Python module for portfo kelly_portfolio = Kelly(fraction=0.8, reg="l2", strength=0.01) # Compute portfolio weights with custom bounds and clean negligible allocations - kelly_portfolio.optimize(data, weight_bounds=(0.05, 0.8)) + kelly_portfolio.optimize(return_data, weight_bounds=(0.05, 0.8)) cleaned_weights = kelly_portfolio.clean_weights(threshold=1e-6) # Output the final portfolio weights diff --git a/docs/docs/installation.md b/docs/docs/installation.md index c981f2c..bc69ecd 100644 --- a/docs/docs/installation.md +++ b/docs/docs/installation.md @@ -7,59 +7,94 @@ This page guides you through installing OPES for experimentation & research. !!! warning "Warning:" OPES is currently under development. While it is relatively stable for experimentation, some features may change or break. Use at your own discretion and always verify results when testing. -## Prerequisites - -- Python 3.10+ (tested up to 3.12) -- `pip` package manager - --- ## Procedure -### 1. Install OPES +Python 3.10+ is required for `opes` to run (although it *may* work on some lower versions). `opes` is tested upto Python 3.14. To install a stable release of `opes`, `pip` is recommended for convenience. -You can install OPES easily via PyPI: +### Installation + +You can install OPES easily via PyPI using `pip`. ```bash pip install opes ``` -This will fetch the latest stable release and all required dependencies. - -You are also welcome to install the module directly from GitHub: +This will fetch the latest stable release and all required dependencies. Alternatively, you are also welcome to install the module directly from GitHub. ```bash git clone https://github.com/opes-core/opes.git cd opes-main -pip install -e . +pip install . ``` -!!! note "Note:" - The `-e` flag installs OPES in editable mode, so any changes you make to the source code are reflected immediately without reinstalling. This is great for developers or those tinkering with advanced features. +You can also install in editable mode if you plan on making any changes to the source code. + +```bash +# After cloning and in the root of the project +pip install -e . +``` --- -### 2. Verify the Installation +### Verification -After installation, make sure everything works by opening Python and importing OPES: +After installation, make sure everything works by opening Python and importing `opes`. ```python >>> import opes >>> opes.__version__ -'1.0.0' +>>> '0.10.0' # May not be the current version but you get the idea ``` -If no errors appear, OPES is ready to use. +You can also verify your installation by using `pip`. + +```bash +pip show opes +``` + +If no errors appear, `opes` is ready to use. --- -## Dependencies +## Getting Started -OPES requires the following Python modules: +`opes` is designed to be minimalistic and easy to use and learn for any user. Here is an example script which implements my favorite portfolio, the Kelly Criterion. + +```python +# I recommend you use yfinance for testing. +# However for serious research, using an external, faster API would be more fruitful. +import yfinance as yf + +# Importing our Kelly class +from opes.objectives import Kelly + +# ---------- FETCHING DATA ---------- +# Obtaining ticker data +# Basic yfinance stuff +TICKERS = ["AAPL", "NVDA", "PFE", "TSLA", "BRK-B", "SHV", "TLT"] +asset_data = yf.download( + tickers=TICKERS, + start="2010-01-01", + end="2020-01-01", + group_by="ticker", + auto_adjust=True +) + +# ---------- OPES USAGE ---------- +# Initialize a Kelly portfolio with fractional exposure and L2 regularization +# Fractional exposure produces less risky weights and L2 regularization contributes in penalizing concentration +kelly_portfolio = Kelly(fraction=0.8, reg="l2", strength=0.001) + +# Compute portfolio weights with custom weight bounds +kelly_portfolio.optimize(asset_data, weight_bounds=(0.05, 0.8)) + +# Clean negligible allocations +cleaned_weights = kelly_portfolio.clean_weights(threshold=1e-6) + +# Output the final portfolio weights +print(cleaned_weights) +``` -| Module name | Minimum version | Maximum version | -| --------------- | --------------- | --------------- | -| **NumPy** | 2.2.6 | < 3.0 | -| **pandas** | 2.3.3 | < 3.0 | -| **SciPy** | 1.15.2 | < 2.0 | -| **matplotlib** | 3.10.1 | < 4.0 | \ No newline at end of file +This showcases the simplicity of the module. However there are far more diverse features you can still explore. If you're looking for more examples, preferably some of them with *"context"*, I recommend you check out the [examples](./examples/good_strategy.md) page within the documentation. \ No newline at end of file