From f5fc8545828e757f998c549db62b752cf282bd53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Z=C3=A9=20Vin=C3=ADcius?= Date: Sun, 26 May 2024 19:13:53 +0800 Subject: [PATCH] Revert "Changes: Finer control of the iterative algorithm and dummy variables allowed in the constraints." --- .gitignore | 3 +- setup.py | 2 +- src/riskparityportfolio/rpp.py | 38 +---- src/riskparityportfolio/sca.py | 112 ++++---------- .../tests/test_modern_rpp.py | 144 +----------------- .../tests/test_vanilla_rpp.py | 22 --- 6 files changed, 47 insertions(+), 274 deletions(-) diff --git a/.gitignore b/.gitignore index f915e57..9d19115 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,7 @@ build/ riskparityportfolio.egg-info/ var/ .DS_Store -.idea/ -.coverage* +.coverage docs/source/tutorials/.ipynb_checkpoints/* .eggs/ .ipynb_checkpoints/* diff --git a/setup.py b/setup.py index b1ba198..913326e 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,7 @@ import setuptools import os -__version__ = "0.6.0" +__version__ = "0.5.1" # Prepare and send a new release to PyPI if "release" in sys.argv[-1]: diff --git a/src/riskparityportfolio/rpp.py b/src/riskparityportfolio/rpp.py index 33a5e9b..60f7780 100644 --- a/src/riskparityportfolio/rpp.py +++ b/src/riskparityportfolio/rpp.py @@ -18,22 +18,14 @@ def __init__(self): class RiskParityPortfolio: - """Designs risk parity portfolios by solving the following optimization problem: + """Designs risk parity portfolios by solving the following optimization problem - minimize R(w) - alpha * mu.T w + lambda * w.T Sigma w - subject to Cw = c, Dw <= d + minimize R(w) - alpha * mu.T * w + lambda * w.T Sigma w + subject to Cw = c, Dw <= d - where R(w) is a risk concentration function, and alpha and lambda are trade-off + where R is a risk concentration function, and alpha and lambda are trade-off parameters for the expected return and the variance, respectively. - The risk concentration R(w) is computed as the squared l2-norm of the risk concentration vector, - which by default is obtained from RiskContribOverVarianceMinusBudget(): - - R(w) = sum_i (MR_i/sum(MR_i) - budget_i)^2 - - where MR_i = w_i * (Sigma @ w)_i are the marginal risks (sum(MR_i) = Var(w)), and - budget_i are the risk budgets (by default 1/n). - Parameters ---------- covariance : array, shape=(n, n) @@ -44,20 +36,6 @@ class RiskParityPortfolio: weights of the portfolio risk_concentration : class any valid child class of RiskConcentrationFunction - - Examples: - # Set up: - >>> import numpy as np - >>> import riskparityportfolio as rpp - >>> n = 10 - >>> U = np.random.multivariate_normal(mean=np.zeros(n), cov=0.1 * np.eye(n), size=round(.7 * n)).T - >>> Sigma = U @ U.T + np.eye(n) - # Basic usage with default constraints sum(w) = 1 and w >= 0: - >>> my_portfolio = rpp.RiskParityPortfolio(Sigma) - >>> my_portfolio.design() - >>> my_portfolio.weights - # Basic usage with equality and inequality constraints: - >>> my_portfolio.design(Cmat=Cmat, cvec=cvec, Dmat=Dmat, dvec=dvec) """ def __init__( @@ -194,15 +172,13 @@ def add_variance(self, lmd): self.lmd = lmd self.has_variance = True - def design(self, verbose=True, **kwargs): + def design(self, **kwargs): """Optimize the portfolio. Parameters ---------- - verbose : boolean - Whether to print the optimization process. kwargs : dict - Dictionary of parameters to be passed to SuccessiveConvexOptimizer(). + Dictionary of parameters to be passed to SuccessiveConvexOptimizer. """ self.sca = SuccessiveConvexOptimizer(self, **kwargs) - self.sca.solve(verbose) + self.sca.solve() diff --git a/src/riskparityportfolio/sca.py b/src/riskparityportfolio/sca.py index b8c1a63..454077c 100644 --- a/src/riskparityportfolio/sca.py +++ b/src/riskparityportfolio/sca.py @@ -1,5 +1,4 @@ import numpy as np -import warnings from tqdm import tqdm try: @@ -102,25 +101,9 @@ def maxiter(self, value): class SuccessiveConvexOptimizer: """ - Successive Convex Approximation optimizer tailored for the risk parity problem including the linear constraints: - Cmat @ w = cvec - Dmat @ w <= dvec, - where matrices Cmat and Dmat have n columns (n being the number of assets). Based on the paper: - - Feng, Y., and Palomar, D. P. (2015). SCRIP: Successive convex optimization methods for risk parity portfolios design. - IEEE Trans. Signal Processing, 63(19), 5285–5300. - - By default, the constraints are set to sum(w) = 1 and w >= 0, i.e., - Cmat = np.ones((1, n)) - cvec = np.array([1.0]) - Dmat = -np.eye(n) - dvec = np.zeros(n). - - Notes: - 1) If equality constraints are not needed, set Cmat = np.empty((0, n)) and cvec = []. - 2) If the matrices Cmat and Dmat have more than n columns, it is assumed that the additional columns - (same number for both matrices) correspond to dummy variables (which do not appear in the objective function). + Successive Convex Approximation optimizer tailored for the risk parity problem. """ + def __init__( self, portfolio, @@ -136,7 +119,9 @@ def __init__( dvec=None, ): self.portfolio = portfolio - self.tau = tau or 1e-4 # 0.05 * np.trace(self.portfolio.covariance) / (2 * self.portfolio.number_of_assets) + self.tau = tau or 0.05 * np.sum(np.diag(self.portfolio.covariance)) / ( + 2 * self.portfolio.number_of_assets + ) sca_validator = SuccessiveConvexOptimizerValidator() self.gamma = sca_validator.gamma = gamma self.zeta = sca_validator.zeta = zeta @@ -144,19 +129,18 @@ def __init__( self.wtol = sca_validator.wtol = wtol self.maxiter = sca_validator.maxiter = maxiter self.Cmat = Cmat - self.Dmat = Dmat # Dmat @ w <= dvec + self.Dmat = Dmat self.cvec = cvec self.dvec = dvec - self.number_of_vars = self.Cmat.shape[1] - self.number_of_dummy_vars = self.number_of_vars - self.portfolio.number_of_assets - self.dummy_vars = np.zeros(self.number_of_dummy_vars) - self.CCmat = np.vstack((self.Cmat, -self.Dmat)).T # CCmat.T @ w >= bvec - self.bvec = np.concatenate((self.cvec, -self.dvec)) + self.CCmat = np.vstack((self.Cmat, self.Dmat)).T + self.bvec = np.concatenate((self.cvec, self.dvec)) self.meq = self.Cmat.shape[0] self._funk = self.get_objective_function_value() self.objective_function = [self._funk] self._tauI = self.tau * np.eye(self.portfolio.number_of_assets) - self.Amat = self.portfolio.risk_concentration.jacobian_risk_concentration_vector() + self.Amat = ( + self.portfolio.risk_concentration.jacobian_risk_concentration_vector() + ) self.gvec = self.portfolio.risk_concentration.risk_concentration_vector @property @@ -167,7 +151,7 @@ def Cmat(self): def Cmat(self, value): if value is None: self._Cmat = np.atleast_2d(np.ones(self.portfolio.number_of_assets)) - elif np.atleast_2d(value).shape[1] >= self.portfolio.number_of_assets: + elif np.atleast_2d(value).shape[1] == self.portfolio.number_of_assets: self._Cmat = np.atleast_2d(value) else: raise ValueError( @@ -182,9 +166,9 @@ def Dmat(self): @Dmat.setter def Dmat(self, value): if value is None: - self._Dmat = -np.eye(self.portfolio.number_of_assets) - elif np.atleast_2d(value).shape[1] == self.Cmat.shape[1]: - self._Dmat = np.atleast_2d(value) + self._Dmat = np.eye(self.portfolio.number_of_assets) + elif np.atleast_2d(value).shape[1] == self.portfolio.number_of_assets: + self._Dmat = -np.atleast_2d(value) else: raise ValueError( "Dmat shape {} doesnt agree with the number of" @@ -216,7 +200,7 @@ def dvec(self, value): if value is None: self._dvec = np.zeros(self.portfolio.number_of_assets) elif len(value) == self.Dmat.shape[0]: - self._dvec = np.atleast_1d(value) + self._dvec = -np.atleast_1d(value) else: raise ValueError( "dvec shape {} doesnt agree with Dmat shape" @@ -231,74 +215,42 @@ def get_objective_function_value(self): obj += self.portfolio.lmd * self.portfolio.volatility ** 2 return obj - def iterate(self, verbose=True): + def iterate(self): wk = self.portfolio.weights g = self.gvec(wk) A = np.ascontiguousarray(self.Amat(wk)) At = np.transpose(A) Q = 2 * At @ A + self._tauI - q = 2 * np.matmul(At, g) - Q @ wk # np.matmul() is necessary here since g is not a numpy array + q = 2 * np.matmul(At, g) - np.matmul(Q, wk) if self.portfolio.has_variance: Q += self.portfolio.lmd * self.portfolio.covariance if self.portfolio.has_mean_return: q -= self.portfolio.alpha * self.portfolio.mean - if self.number_of_dummy_vars > 0: - Q = np.vstack([np.hstack([Q, np.zeros((self.portfolio.number_of_assets, self.number_of_dummy_vars))]), - np.hstack([np.zeros((self.number_of_dummy_vars, self.portfolio.number_of_assets)), - self.tau * np.eye(self.portfolio.number_of_assets)])]) - q = np.concatenate([q, -self.tau * self.dummy_vars]) - # Call QP solver (min 0.5*x.T G x + a.T x s.t. C.T x >= b) controlling for ill-conditioning: - try: - w_hat = quadprog.solve_qp(Q, -q, C=self.CCmat, b=self.bvec, meq=self.meq)[0] - except ValueError as e: - if str(e) == "matrix G is not positive definite": - warnings.warn( - "Matrix Q is not positive definite: adding regularization term and then calling QP solver again.") - # eigvals = np.linalg.eigvals(Q) - # print(" - before regularization: cond. number = {:,.0f}".format(max(eigvals) / min(eigvals))) - # print(" - after regularization: cond. number = {:,.0f}".format(max(eigvals + np.trace(Q)/1e7) / min(eigvals + np.trace(Q)/1e7))) - Q += np.eye(Q.shape[0]) * np.trace(Q)/1e7 - w_hat = quadprog.solve_qp(Q, -q, C=self.CCmat, b=self.bvec, meq=self.meq)[0] - else: - # If the error is different, re-raise it - raise - self.portfolio.weights = wk + self.gamma * (w_hat[:self.portfolio.number_of_assets] - wk) + w_hat = quadprog.solve_qp(Q, -q, C=self.CCmat, b=self.bvec, meq=self.meq)[0] + self.portfolio.weights = wk + self.gamma * (w_hat - wk) fun_next = self.get_objective_function_value() self.objective_function.append(fun_next) has_w_converged = ( - (np.abs(self.portfolio.weights - wk) <= self.wtol * 0.5 * (np.abs(self.portfolio.weights) + np.abs(wk))) - | ((np.abs(self.portfolio.weights) < 1e-6) & (np.abs(wk) < 1e-6)) + np.abs(self.portfolio.weights - wk) + <= 0.5 * self.wtol * (np.abs(self.portfolio.weights) + np.abs(wk)) ).all() has_fun_converged = ( - (np.abs(self._funk - fun_next) <= self.funtol * 0.5 * (np.abs(self._funk) + np.abs(fun_next))) - | ((np.abs(self._funk) <= 1e-10) & (np.abs(fun_next) <= 1e-10)) - ) - if self.number_of_dummy_vars > 0: - have_dummies_converged = ( - (np.abs(w_hat[self.portfolio.number_of_assets:] - self.dummy_vars) <= self.wtol * 0.5 * - (np.abs(w_hat[self.portfolio.number_of_assets:]) + np.abs(self.dummy_vars))) - | ((np.abs(w_hat[self.portfolio.number_of_assets:]) < 1e-6) & (np.abs(self.dummy_vars) < 1e-6)) - ).all() - self.dummy_vars = w_hat[self.portfolio.number_of_assets:] - else: - have_dummies_converged = True - if (has_w_converged and have_dummies_converged) or has_fun_converged: - # if verbose: - # print(f" Has func. converged: {has_fun_converged}; has w converged: {has_w_converged}") + np.abs(self._funk - fun_next) + <= 0.5 * self.funtol * (np.abs(self._funk) + np.abs(fun_next)) + ).all() + if has_w_converged or has_fun_converged: return False self.gamma = self.gamma * (1 - self.zeta * self.gamma) self._funk = fun_next return True - def solve(self, verbose=True): + def solve(self): i = 0 - iterator = range(self.maxiter) - if verbose: - iterator = tqdm(iterator) - for _ in iterator: - if not self.iterate(verbose=verbose): - break - i += 1 + with tqdm(total=self.maxiter) as pbar: + while self.iterate() and i < self.maxiter: + i += 1 + pbar.update() + def project_line_and_box(weights, lower_bound, upper_bound): def objective_function(variable, weights): diff --git a/src/riskparityportfolio/tests/test_modern_rpp.py b/src/riskparityportfolio/tests/test_modern_rpp.py index ad6ffc4..0501d41 100644 --- a/src/riskparityportfolio/tests/test_modern_rpp.py +++ b/src/riskparityportfolio/tests/test_modern_rpp.py @@ -1,7 +1,6 @@ import numpy as np -import riskparityportfolio as rpp -import pytest -import pdb +from ..rpp import RiskParityPortfolio + def test_on_tricky_example(): """This is a test on a known somewhat tricky problem""" @@ -14,138 +13,7 @@ def test_on_tricky_example(): ) b = np.array((0.1594, 0.0126, 0.8280)) ans = np.array([0.2798628, 0.08774909, 0.63238811]) - #pdb.set_trace() - my_portfolio = rpp.RiskParityPortfolio(covariance=S, budget=b) - my_portfolio.design() - np.testing.assert_allclose(my_portfolio.weights, ans, rtol=1e-5) - assert my_portfolio.risk_concentration.evaluate() < 1e-9 - - -def test_random_covmat(): - N = 100 - b = np.ones(N)/N - np.random.seed(42) - U = np.random.multivariate_normal(mean=np.zeros(N), cov=0.1 * np.eye(N), size=round(.7 * N)).T - Sigma = U @ U.T + np.eye(N) - #pdb.set_trace() - my_portfolio = rpp.RiskParityPortfolio(Sigma, budget=b) - my_portfolio.design() - w = my_portfolio.weights - - # assert that the portfolio respect the budget constraint - np.testing.assert_almost_equal(np.sum(w), 1.0) - # assert that the portfolio respect the no-shortselling constraint - np.testing.assert_equal(all(w >= 0), True) - # assert that the desired risk contributions are attained - rc = w @ (Sigma * w) - rc = rc / np.sum(rc) - np.testing.assert_allclose(rc, b, atol=1/(10*N)) - - -def test_singularity_issues_with_G_matrix(): - N = 100 - b = np.ones(N) / N - np.random.seed(42) - U = np.random.multivariate_normal(mean=np.zeros(N), cov=0.1 * np.eye(N), size=round(.7 * N)).T - Sigma = U @ U.T # singular covariance matrix - - my_portfolio = rpp.RiskParityPortfolio(Sigma, budget=b) - with pytest.warns(UserWarning, match="Matrix Q is not positive definite: adding regularization term and then calling QP solver again."): - my_portfolio.design(verbose=False, tau=1e-12) # <-- force ill-conditioned matrix - w_ref = my_portfolio.weights - - my_portfolio = rpp.RiskParityPortfolio(Sigma, budget=b) - my_portfolio.design(tau=1e-4) # <-- default tau, should be fine - w = my_portfolio.weights - np.testing.assert_allclose(w, w_ref, rtol=1e-3) - - # my_portfolio = rpp.RiskParityPortfolio(Sigma, budget=b) - # my_portfolio.design(verbose=True, tau=0.05 * np.sum(np.diag(Sigma)) / (2 * N)) - # w = my_portfolio.weights - # np.testing.assert_allclose(w, w_ref, rtol=1e-3) - # - # my_portfolio = rpp.RiskParityPortfolio(Sigma, budget=b) - # my_portfolio.design(verbose=True, tau=2 * 0.1 ** 2 / (2 * N)) - # w = my_portfolio.weights - # np.testing.assert_allclose(w, w_ref, rtol=1e-3) - - -def test_constraints(): - N = 50 - np.random.seed(42) - U = np.random.multivariate_normal(mean=np.zeros(N), cov=0.1 * np.eye(N), size=round(.7 * N)).T - Sigma = U @ U.T + np.eye(N) - - # Default constraints sum(w) = 1 and w >= 0: - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.design() - w1 = my_portfolio.weights - np.testing.assert_almost_equal(np.sum(w1), 1) - np.testing.assert_equal(all(w1 >= 0), True) - - # Equivalently, specifying explicitly sum(w) = 1: - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.design(Cmat=np.ones((1, N)), cvec=np.array([1.0])) - w2 = my_portfolio.weights - np.testing.assert_allclose(w1, w2, rtol=1e-5) - - # Equivalently, specifying explicitly sum(w) = 1 and w >= 0: - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.design(Cmat=np.ones((1, N)), cvec=np.array([1.0]), - Dmat=-np.eye(N), dvec=np.zeros(N)) - w3 = my_portfolio.weights - np.testing.assert_allclose(w1, w3, rtol=1e-5) - - - # Additional upper bound: w <= 0.03 - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.design(Cmat=np.ones((1, N)), cvec=np.array([1.0]), - Dmat=np.vstack([np.eye(N), -np.eye(N)]), dvec=np.concatenate([0.03*np.ones(N), np.zeros(N)])) - w = my_portfolio.weights - np.testing.assert_almost_equal(np.sum(w), 1) - np.testing.assert_equal(all(w >= 0), True) - print(max(w)) - np.testing.assert_array_less(w, (0.03 + 1e-3)*np.ones(N)) - - - # Bounds for sum(w): 0.5 <= sum(w) <= 1 (tending to upper bound) - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.add_mean_return(alpha=1e-6, mean=np.ones(N)) # this adds sum(w) to also maximize sum(w) - my_portfolio.design(Cmat=np.empty((0, N)), cvec=[], - Dmat=np.vstack([-np.ones((1,N)), np.ones((1,N))]), dvec=np.array([-0.5, 1])) - w = my_portfolio.weights - np.testing.assert_almost_equal(np.sum(w), 1.0) - - - # Bounds for sum(w): 0.5 <= sum(w) <= 1 (tending to lower bound) - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.add_mean_return(alpha=1e-6, mean=-np.ones(N)) # this adds -sum(w) to also minimize sum(w) - my_portfolio.design(Cmat=np.empty((0, N)), cvec=[], - Dmat=np.vstack([-np.ones((1,N)), np.ones((1,N))]), dvec=np.array([-0.5, 1])) - w = my_portfolio.weights - np.testing.assert_almost_equal(np.sum(w), 0.5, decimal=3) - - - -def test_dummy_variables(): - N = 50 - np.random.seed(42) - U = np.random.multivariate_normal(mean=np.zeros(N), cov=0.1 * np.eye(N), size=round(.7 * N)).T - Sigma = U @ U.T + np.eye(N) - - # Upper-bounded: sum(w) = 1 and 0 <= w <= 0.03 - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.design(Cmat=np.ones((1, N)), cvec=np.array([1.0]), - Dmat=np.vstack([np.eye(N), -np.eye(N)]), dvec=np.concatenate([0.03*np.ones(N), np.zeros(N)])) - w_ref = my_portfolio.weights - - # Equivalently: sum(w) = 1, 0 <= w <= u, and u <= 0.03 (new dummy variable u, with w_tilde = [w; u]) - my_portfolio = rpp.RiskParityPortfolio(Sigma) - my_portfolio.design(Cmat=np.hstack([np.ones((1, N)), np.zeros((1, N))]), cvec=np.array([1.0]), - Dmat=np.vstack([np.hstack([-np.eye(N), np.zeros((N, N))]), - np.hstack([np.eye(N), -np.eye(N)]), - np.hstack([np.zeros((N, N)), np.eye(N)])]), - dvec=np.concatenate([np.zeros(N), np.zeros(N), 0.03*np.ones(N)])) - w = my_portfolio.weights - - np.testing.assert_allclose(w, w_ref, rtol=1e-5, atol=1e-5) + rpp = RiskParityPortfolio(covariance=S, budget=b) + rpp.design() + np.testing.assert_allclose(rpp.weights, ans, rtol=1e-5) + assert rpp.risk_concentration.evaluate() < 1e-9 diff --git a/src/riskparityportfolio/tests/test_vanilla_rpp.py b/src/riskparityportfolio/tests/test_vanilla_rpp.py index d0e5cec..ee71b27 100644 --- a/src/riskparityportfolio/tests/test_vanilla_rpp.py +++ b/src/riskparityportfolio/tests/test_vanilla_rpp.py @@ -1,8 +1,6 @@ import riskparityportfolio as rpp import numpy as np import pytest -import pdb - @pytest.mark.parametrize("method", ["choi", "spinu"]) def test(method): @@ -21,23 +19,3 @@ def test(method): np.testing.assert_equal(all(w >= 0), True) # assert that the desired risk contributions are attained np.testing.assert_allclose(rc, b, atol=1/(10*n)) - - -def test_random_covmat(): - N = 100 - b = np.ones(N)/N - np.random.seed(42) - #pdb.set_trace() - U = np.random.multivariate_normal(mean=np.zeros(N), cov=0.1 * np.eye(N), size=round(.7 * N)).T - Sigma = U @ U.T + np.eye(N) - w = rpp.vanilla.design(Sigma, b, maxiter=1000, method="spinu") - # assert that the portfolio respect the budget constraint - np.testing.assert_almost_equal(np.sum(w), 1) - # assert that the portfolio respect the no-shortselling constraint - np.testing.assert_equal(all(w >= 0), True) - # assert that the desired risk contributions are attained - rc = w @ (Sigma * w) - rc = rc / np.sum(rc) - np.testing.assert_allclose(rc, b, atol=1/(10*N)) - -