Since diffing a notebook is not really easy to read, below's a diff from nbdime. Most of the changes are regarding the changing API in cvxpy 1.0, see here, some whitespace issues, and fixing a few dimensionality issues (where things got stricter apparently about the distinction between (n,) and (n,1)).
nbdime diff --ignore-outputs
```diff
## modified /cells/2/source:
@@ -1,18 +1,18 @@
# Random LP
-np.random.seed(hash('lp') % 2 ** 31)
+np.random.seed(hash('lp') % 2 ** 30)
# Dimensions
n = 100
m = 70
A = scipy.sparse.random(m, n, density=0.2, data_rvs = np.random.randn)
-b = np.random.randn(m, 1)
-c = np.random.rand(n, 1)
+b = np.random.randn(m)
+c = np.random.rand(n)
# Problem construction
x = cvxpy.Variable(n)
-objective = cvxpy.Minimize(c.T * x)
-constraints = [x >= 0, A * x <= b]
+objective = cvxpy.Minimize(c.T @ x)
+constraints = [x >= 0, A @ x <= b]
prob = cvxpy.Problem(objective, constraints)
prob.solve(solver='SCS', verbose=True)
## modified /cells/3/source:
@@ -6,17 +6,17 @@ T = 10
n, p = (10, 5)
A = np.random.randn(n, n)
B = np.random.randn(n, p)
-x_init = np.random.randn(n, 1)
-x_final = np.random.randn(n, 1)
+x_init = np.random.randn(n)
+x_final = np.random.randn(n)
def step(A, B, x_prev):
- x = cvxpy.Variable(n, 1)
- u = cvxpy.Variable(p, 1)
+ x = cvxpy.Variable(n)
+ u = cvxpy.Variable(p)
cost = sum(cvxpy.square(u)) + sum(cvxpy.abs(x))
- constraint = (x == A * x_prev + B * u)
+ constraint = (x == A @ x_prev + B @ u)
return cost, constraint, x
-x = cvxpy.Variable(n, 1)
+x = cvxpy.Variable(n)
constraints = [(x == x_init)]
total_cost = 0.
for t in range(T):
## modified /cells/4/source:
@@ -7,12 +7,13 @@ m = 50
x_true = scipy.sparse.rand(n, 1, density=0.1)
A = scipy.sparse.random(m, n, density=0.2, data_rvs = np.random.randn)
-b = A * x_true + 0.1 * np.random.randn(m, 1)
+b = A @ x_true + 0.1 * np.random.randn(m, 1)
+b = np.array([x for x in b.flat])
mu = 1
# Problem construction
x = cvxpy.Variable(n)
-objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A*x - b) + mu * cvxpy.norm1(x))
+objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A @ x - b) + mu * cvxpy.norm1(x))
prob = cvxpy.Problem(objective)
prob.solve(solver='SCS', verbose=True)
## modified /cells/5/source:
@@ -7,12 +7,13 @@ m = 50
x_true = scipy.sparse.rand(n, 1, density=0.1)
A = scipy.sparse.random(m, n, density=0.2, data_rvs = np.random.randn)
-b = A * x_true + 0.1 * np.random.randn(m, 1)
+b = A @ x_true + 0.1 * np.random.randn(m, 1)
+b = np.array([x for x in b.flat])
mu = 1
# Problem construction
x = cvxpy.Variable(n)
-objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A*x - b) + mu * cvxpy.norm1(x))
+objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A @ x - b) + mu * cvxpy.norm1(x))
constraints = [x >= 0]
prob = cvxpy.Problem(objective, constraints)
## modified /cells/6/source:
@@ -5,7 +5,7 @@ np.random.seed(hash('sdp') % 2 ** 31)
n = 50
P = np.random.randn(n, n)
P = P + P.T
-Z = cvxpy.semidefinite(n)
+Z = cvxpy.Variable((n, n), PSD=True)
objective = cvxpy.Maximize(cvxpy.lambda_min(P - Z))
prob = cvxpy.Problem(objective, [Z >= 0])
## modified /cells/7/source:
@@ -6,8 +6,9 @@ m = 100
x = cvxpy.Variable(n)
A = np.random.rand(m, n)
x0 = scipy.sparse.rand(n, 1, 0.1)
-b = A*x0
+b = A @ x0
+b = np.array([x for x in b.flat])
-prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(x)), [A*x == b])
+prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(x)), [A @ x == b])
prob.solve(solver='SCS', verbose=True)
prob.solve(solver='SCS', verbose=True, acceleration_lookback=0)
## modified /cells/8/source:
@@ -16,10 +16,10 @@ c = np.random.rand(k)
x = cvxpy.Variable(n)
t = cvxpy.Variable(k)
-f = cvxpy.max_entries(t + cvxpy.abs(B * x - c))
+f = cvxpy.max(t + cvxpy.abs(B @ x - c))
constraints = []
for i in range(k):
- constraints.append(cvxpy.norm(A[i]*x) <= t[i])
+ constraints.append(cvxpy.norm(A[i] @ x) <= t[i])
prob = cvxpy.Problem(cvxpy.Minimize(f), constraints)
prob.solve(solver='SCS', verbose=True)
## modified /cells/9/source:
@@ -14,6 +14,6 @@ b[idx] += 10 * np.random.randn(k)
x = cvxpy.Variable(n)
-prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(A*x - b)))
+prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(A @ x - b)))
prob.solve(solver='SCS', verbose=True)
prob.solve(solver='SCS', verbose=True, acceleration_lookback=0)
## modified /cells/10/source:
@@ -5,11 +5,11 @@ n = 20
m = int(n / 4)
G = np.random.randn(m, n)
-f = np.random.randn(m, 1)
+f = np.random.randn(m)
power = np.pi
x = cvxpy.Variable(n)
-constraints = [G*x == f]
+constraints = [G @ x == f]
prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm(x, power)), constraints)
prob.solve(solver='SCS', verbose=True)
## modified /cells/11/source:
@@ -8,17 +8,17 @@ w_true = np.random.randn(p, 1)
X_tmp = np.random.randn(p, q)
ips = -w_true.T.dot(X_tmp)
-ps = (np.exp(ips)/(1 + np.exp(ips))).T
-labels = 2*(np.random.rand(q,1) < ps) - 1
+ps = (np.exp(ips) / (1 + np.exp(ips))).T
+labels = 2 * (np.random.rand(q, 1) < ps) - 1
X_pos = X_tmp[:,np.where(labels==1)[0]]
X_neg = X_tmp[:,np.where(labels==-1)[0]]
X = np.hstack([X_pos, -X_neg]) # include labels with data
lam = 2
-w = cvxpy.Variable(p, 1)
-obj = (cvxpy.sum_entries(cvxpy.log_sum_exp(cvxpy.vstack([np.zeros((1,q)), w.T*X]), axis = 0))
- + lam * cvxpy.norm(w,1))
+w = cvxpy.Variable(p)
+obj = (cvxpy.sum(cvxpy.log_sum_exp(cvxpy.vstack([np.zeros((q,)), w.T @ X]), axis = 0))
+ + lam * cvxpy.norm(w, 1))
prob = cvxpy.Problem(cvxpy.Minimize(obj))
prob.solve(solver='SCS', verbose=True)
## modified /cells/12/source:
@@ -13,7 +13,7 @@ M[missing_idx] = 0.
X = cvxpy.Variable(m * n)
lam = 0.5
-diff = cvxpy.reshape(X, m, n) - np.reshape(M, (m, n))
+diff = cvxpy.reshape(X, (m, n)) - np.reshape(M, (m, n))
obj = cvxpy.norm(diff, "nuc") + lam * cvxpy.sum_squares(X)
constraints = [X[valid_idx] == M[valid_idx]]
## modified /cells/13/source:
@@ -4,12 +4,12 @@ np.random.seed(hash('min-norm') % 2 ** 31)
m = 500
n = int(m / 2)
A = np.random.randn(m, n)
-b = 10 * np.random.randn(m, 1)
+b = 10 * np.random.randn(m)
G = 2 * np.random.randn(2 * n, n)
x = cvxpy.Variable(n)
-obj = cvxpy.norm(A * x - b)
-constraints = [cvxpy.norm(G * x) <= 1]
+obj = cvxpy.norm(A @ x - b)
+constraints = [cvxpy.norm(G @ x) <= 1]
prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints)
prob.solve(solver='SCS', verbose=True)
## modified /cells/14/source:
@@ -6,10 +6,11 @@ A = np.diag(-np.logspace(-0.5, 1, n))
U = scipy.linalg.orth(np.random.randn(n,n))
A = U.T.dot(A.dot(U))
-P = cvxpy.Symmetric(n, n)
+P = cvxpy.Variable((n, n), symmetric=True)
obj = cvxpy.trace(P)
-constraints = [A.T*P + P*A << -np.eye(n), P >> np.eye(n)]
+constraints = [A.T @ P + P @ A << -np.eye(n), P >> np.eye(n)]
prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints)
+# WARNING: this may run for a very long time (~10min)!
prob.solve(solver='SCS', verbose=True)
prob.solve(solver='SCS', verbose=True, acceleration_lookback=0)
## modified /cells/15/source:
@@ -6,17 +6,17 @@ n = 5000
density = 0.1
mu = np.exp(0.01 * np.random.randn(n)) - 1. # returns
-D = np.random.rand(n) / 10. # idiosyncratic risk
-F = scipy.sparse.rand(n, m, density) / 10. # factor model
+D = np.random.rand(n) / 10. # idiosyncratic risk
+F = scipy.sparse.rand(n, m, density) / 10. # factor model
lambda_risk = 1
leverage = 1
x = cvxpy.Variable(n)
-obj = mu.T * x - lambda_risk * (cvxpy.sum_squares(F.T.dot(x)) +
- cvxpy.sum_squares(cvxpy.mul_elemwise(D, x)))
+obj = mu.T @ x - lambda_risk * (cvxpy.sum_squares(F.T @ x) +
+ cvxpy.sum_squares(cvxpy.multiply(D, x)))
-constraints = [cvxpy.sum_entries(x) == leverage, x >= 0]
+constraints = [cvxpy.sum(x) == leverage, x >= 0]
prob = cvxpy.Problem(cvxpy.Maximize(obj), constraints)
prob.solve(solver='SCS', verbose=True)
## modified /cells/16/source:
@@ -8,14 +8,13 @@ lam = 0.1
A = scipy.sparse.rand(n, n, 0.01)
A = A.T.dot(A).todense() + 0.1 * np.eye(n)
L = np.linalg.cholesky(np.linalg.inv(A))
-X = L.dot(np.random.randn(n, num_samples)) # Draw m experiments according to the covariance matrix A^-1
-S = X.dot(X.T) / num_samples # Estimate of covariance matrix
-mask = np.ones((n,n)) - np.eye(n)
+X = L.dot(np.random.randn(n, num_samples)) # Draw m experiments according to the covariance matrix A^-1
+S = X.dot(X.T) / num_samples # Estimate of covariance matrix
+mask = np.ones((n, n)) - np.eye(n)
-theta = cvxpy.Variable(n, n)
+theta = cvxpy.Variable((n, n))
-obj = (lam*cvxpy.norm1(cvxpy.mul_elemwise(mask, theta)) +
- cvxpy.trace(S * theta) - cvxpy.log_det(theta))
+obj = lam * cvxpy.norm1(cvxpy.multiply(mask, theta)) + cvxpy.trace(S @ theta) - cvxpy.log_det(theta)
prob = cvxpy.Problem(cvxpy.Minimize(obj))
prob.solve(solver='SCS', verbose=True)
## modified /cells/17/source:
@@ -4,8 +4,8 @@ np.random.seed(hash('fused-lasso') % 2 ** 31)
m = 1000
ni = 10
k = 1000
-rho=0.05
-sigma=0.05
+rho = 0.05
+sigma = 0.05
A = np.random.randn(m, ni * k)
A /= np.sqrt(np.sum(A ** 2, 0))
@@ -19,8 +19,9 @@ b = A.dot(x0) + sigma * np.random.randn(m)
lam = 0.1 * sigma * np.sqrt(m * np.log(ni * k))
x = cvxpy.Variable(ni * k)
-obj = cvxpy.sum_squares(A * x - b) + lam * cvxpy.norm1(x) + lam * cvxpy.tv(x)
+obj = cvxpy.sum_squares(A @ x - b) + lam * cvxpy.norm1(x) + lam * cvxpy.tv(x)
prob = cvxpy.Problem(cvxpy.Minimize(obj))
+# WARNING: this may run for a long time (~2min)!
prob.solve(solver='SCS', verbose=True)
prob.solve(solver='SCS', verbose=True, acceleration_lookback=0)
## modified /cells/18/source:
@@ -5,12 +5,11 @@ m = 150
n = 500
A = np.random.randn(m, n)
x0 = np.random.rand(n)
-y = np.sign(A.dot(x0) + 0.05*np.random.randn(m))
+y = np.sign(A.dot(x0) + 0.05 * np.random.randn(m))
lam = 1.0
x = cvxpy.Variable(n)
-obj = ((1./m) * cvxpy.sum_entries(cvxpy.pos(1 - cvxpy.mul_elemwise(y, A * x)))
- + lam * cvxpy.norm(x, 1))
+obj = (1./m) * cvxpy.sum(cvxpy.pos(1 - cvxpy.multiply(y, A @ x))) + lam * cvxpy.norm(x, 1)
prob = cvxpy.Problem(cvxpy.Minimize(obj))
prob.solve(solver='SCS', verbose=True)
## modified /cells/19/source:
@@ -5,14 +5,14 @@ n = 100
r = 10 # Rank
density = 0.1
-L0 = np.random.randn(n, r).dot(np.random.randn(r, n)) # Low rank matrix
-S0 = scipy.sparse.rand(n, n, density) # Sparse matrix w/ Normally distributed entries.
+L0 = np.random.randn(n, r).dot(np.random.randn(r, n)) # Low rank matrix
+S0 = scipy.sparse.rand(n, n, density) # Sparse matrix w/ Normally distributed entries.
S0.data = 10 * np.random.randn(len(S0.data))
M = L0 + S0
-L = cvxpy.Variable(n, n)
-S = cvxpy.Variable(n, n)
+L = cvxpy.Variable((n, n))
+S = cvxpy.Variable((n, n))
lam = 0.1
obj = cvxpy.norm(L, "nuc") + lam * cvxpy.norm1(S)
## modified /cells/20/source:
@@ -7,14 +7,14 @@ k = 3 # Needs to be less than m.
A = np.matrix(np.random.rand(m, n))
A -= np.mean(A, axis=0)
-K = np.array([(A[i].T * A[i]).flatten() for i in range(m)])
+K = np.array([list((A[i].T * A[i]).flat) for i in range(m)])
-sigma_inv1 = cvxpy.Variable(n,n) # Inverse covariance matrix
+sigma_inv1 = cvxpy.Variable((n, n)) # Inverse covariance matrix
t = cvxpy.Variable(m)
tdet = cvxpy.Variable(1)
-obj = cvxpy.sum_largest(t+tdet, k)
-z = K*cvxpy.reshape(sigma_inv1, n*n, 1)
+obj = cvxpy.sum_largest(t + tdet, k)
+z = K @ cvxpy.reshape(sigma_inv1, n * n)
constraints = [-cvxpy.log_det(sigma_inv1) <= tdet, t == z]
prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints)
prob.solve(solver='SCS', verbose=True)
## modified /cells/21/source:
@@ -10,9 +10,9 @@ Xp = np.random.randn(m, d)
Xn = np.random.randn(n, d)
lam = 1
-theta = cvxpy.Variable(d)
-Z = cvxpy.max_elemwise(1 - (Xp * theta * np.ones((1,n)) - (Xn * theta * np.ones((1,m))).T), 0)
-obj = cvxpy.max_entries(cvxpy.sum_entries(Z, axis=0)) + lam * cvxpy.sum_squares(theta)
+theta = cvxpy.Variable((d, 1))
+Z = cvxpy.maximum(1 - (Xp @ theta @ np.ones((1, n)) - (Xn @ theta @ np.ones((1, m))).T), 0)
+obj = cvxpy.max(cvxpy.sum(Z, axis=0)) + lam * cvxpy.sum_squares(theta)
prob = cvxpy.Problem(cvxpy.Minimize(obj))
prob.solve(solver='SCS', verbose=True)
## modified /cells/22/source:
@@ -1,36 +1,36 @@
# Quantile regression
np.random.seed(hash('quantile-regression') % 2 ** 31)
-m = 100 # Number of data entries
-n = 5 # Number of weights
-k = 20 # Number of quantiles
+m = 100 # Number of data entries
+n = 5 # Number of weights
+k = 20 # Number of quantiles
p = 1
sigma = 0.1
-x = np.random.rand(m)* 2 * np.pi * p
+x = np.random.rand(m) * 2 * np.pi * p
y = np.sin(x) + sigma * np.sin(x) * np.random.randn(m)
alphas = np.linspace(1. / (k + 1), 1 - 1. / (k + 1), k) # Do a bunch of quantiles at once
# RBF (Radial Basis Function) features
mu_rbf = np.array([np.linspace(-1, 2 * np.pi * p + 1, n)])
mu_sig = (2 * np.pi * p + 2)/n
-X = np.exp(-(mu_rbf.T - x).T**2 / (2 * mu_sig**2)) # Gaussian
+X = np.exp(-(mu_rbf.T - x).T ** 2 / (2 * mu_sig ** 2)) # Gaussian
-theta = cvxpy.Variable(n,k)
+theta = cvxpy.Variable((n, k))
def quantile_loss(alphas, theta, X, y):
m, n = X.shape
k = len(alphas)
Y = np.tile(y.flatten(), (k, 1)).T
A = np.tile(alphas, (m, 1))
- Z = X * theta - Y
- return cvxpy.sum_entries(
- cvxpy.max_elemwise(
- cvxpy.mul_elemwise(-A, Z),
- cvxpy.mul_elemwise(1 - A, Z)))
+ Z = X @ theta - Y
+ return cvxpy.sum(
+ cvxpy.maximum(
+ cvxpy.multiply(-A, Z),
+ cvxpy.multiply(1 - A, Z)))
obj = quantile_loss(alphas, theta, X, y)
-constraints = [X*(theta[:,1:] - theta[:,:-1]) >= 0]
+constraints = [X @ (theta[:, 1:] - theta[:, :-1]) >= 0]
prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints)
prob.solve(solver='SCS', verbose=True)
## modified /cells/23/source:
@@ -13,7 +13,7 @@ idx = np.random.randint(m, size=k)
b[idx] += 10 * np.random.randn(k)
x = cvxpy.Variable(n)
-prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum_entries(cvxpy.huber(A*x - b))))
+prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum(cvxpy.huber(A @ x - b))))
prob.solve(solver='SCS', verbose=True)
prob.solve(solver='SCS', verbose=True, acceleration_lookback=0)
## modified /metadata/kernelspec/display_name:
- Python 2
+ Python (scs)
## modified /metadata/kernelspec/name:
- python2
+ scs
## replaced /metadata/language_info/codemirror_mode/version:
- 2
+ 3
## modified /metadata/language_info/pygments_lexer:
- ipython2
+ ipython3
## modified /metadata/language_info/version:
- 2.7.11
+ 3.8.10
## replaced /nbformat_minor:
- 2
+ 4
```
This is a follow-up to #37, where I wasn't able to fully update the notebook in the test folder (that hadn't been touched in 4 years 😅).
This was actually a good exercise, because it revealed two bugs in cvxpy that have already gotten fixed since then: https://github.com/cvxpy/cvxpy/issues/1369 & https://github.com/cvxpy/cvxpy/issues/1370.
In order to run this notebook to completion, I needed to use the cvxpy-master (built from https://github.com/cvxpy/cvxpy/commit/86307f271819bb78fcdf64a9c3a424773e8269fa) to catch the fixes since 1.1.12, though hopefully a new version will be released soon.
Since diffing a notebook is not really easy to read, below's a diff from nbdime. Most of the changes are regarding the changing API in cvxpy 1.0, see here, some whitespace issues, and fixing a few dimensionality issues (where things got stricter apparently about the distinction between
(n,)
and(n,1)
).
```diff ## modified /cells/2/source: @@ -1,18 +1,18 @@ # Random LP -np.random.seed(hash('lp') % 2 ** 31) +np.random.seed(hash('lp') % 2 ** 30) # Dimensions n = 100 m = 70 A = scipy.sparse.random(m, n, density=0.2, data_rvs = np.random.randn) -b = np.random.randn(m, 1) -c = np.random.rand(n, 1) +b = np.random.randn(m) +c = np.random.rand(n) # Problem construction x = cvxpy.Variable(n) -objective = cvxpy.Minimize(c.T * x) -constraints = [x >= 0, A * x <= b] +objective = cvxpy.Minimize(c.T @ x) +constraints = [x >= 0, A @ x <= b] prob = cvxpy.Problem(objective, constraints) prob.solve(solver='SCS', verbose=True) ## modified /cells/3/source: @@ -6,17 +6,17 @@ T = 10 n, p = (10, 5) A = np.random.randn(n, n) B = np.random.randn(n, p) -x_init = np.random.randn(n, 1) -x_final = np.random.randn(n, 1) +x_init = np.random.randn(n) +x_final = np.random.randn(n) def step(A, B, x_prev): - x = cvxpy.Variable(n, 1) - u = cvxpy.Variable(p, 1) + x = cvxpy.Variable(n) + u = cvxpy.Variable(p) cost = sum(cvxpy.square(u)) + sum(cvxpy.abs(x)) - constraint = (x == A * x_prev + B * u) + constraint = (x == A @ x_prev + B @ u) return cost, constraint, x -x = cvxpy.Variable(n, 1) +x = cvxpy.Variable(n) constraints = [(x == x_init)] total_cost = 0. for t in range(T): ## modified /cells/4/source: @@ -7,12 +7,13 @@ m = 50 x_true = scipy.sparse.rand(n, 1, density=0.1) A = scipy.sparse.random(m, n, density=0.2, data_rvs = np.random.randn) -b = A * x_true + 0.1 * np.random.randn(m, 1) +b = A @ x_true + 0.1 * np.random.randn(m, 1) +b = np.array([x for x in b.flat]) mu = 1 # Problem construction x = cvxpy.Variable(n) -objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A*x - b) + mu * cvxpy.norm1(x)) +objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A @ x - b) + mu * cvxpy.norm1(x)) prob = cvxpy.Problem(objective) prob.solve(solver='SCS', verbose=True) ## modified /cells/5/source: @@ -7,12 +7,13 @@ m = 50 x_true = scipy.sparse.rand(n, 1, density=0.1) A = scipy.sparse.random(m, n, density=0.2, data_rvs = np.random.randn) -b = A * x_true + 0.1 * np.random.randn(m, 1) +b = A @ x_true + 0.1 * np.random.randn(m, 1) +b = np.array([x for x in b.flat]) mu = 1 # Problem construction x = cvxpy.Variable(n) -objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A*x - b) + mu * cvxpy.norm1(x)) +objective = cvxpy.Minimize(0.5 * cvxpy.sum_squares(A @ x - b) + mu * cvxpy.norm1(x)) constraints = [x >= 0] prob = cvxpy.Problem(objective, constraints) ## modified /cells/6/source: @@ -5,7 +5,7 @@ np.random.seed(hash('sdp') % 2 ** 31) n = 50 P = np.random.randn(n, n) P = P + P.T -Z = cvxpy.semidefinite(n) +Z = cvxpy.Variable((n, n), PSD=True) objective = cvxpy.Maximize(cvxpy.lambda_min(P - Z)) prob = cvxpy.Problem(objective, [Z >= 0]) ## modified /cells/7/source: @@ -6,8 +6,9 @@ m = 100 x = cvxpy.Variable(n) A = np.random.rand(m, n) x0 = scipy.sparse.rand(n, 1, 0.1) -b = A*x0 +b = A @ x0 +b = np.array([x for x in b.flat]) -prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(x)), [A*x == b]) +prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(x)), [A @ x == b]) prob.solve(solver='SCS', verbose=True) prob.solve(solver='SCS', verbose=True, acceleration_lookback=0) ## modified /cells/8/source: @@ -16,10 +16,10 @@ c = np.random.rand(k) x = cvxpy.Variable(n) t = cvxpy.Variable(k) -f = cvxpy.max_entries(t + cvxpy.abs(B * x - c)) +f = cvxpy.max(t + cvxpy.abs(B @ x - c)) constraints = [] for i in range(k): - constraints.append(cvxpy.norm(A[i]*x) <= t[i]) + constraints.append(cvxpy.norm(A[i] @ x) <= t[i]) prob = cvxpy.Problem(cvxpy.Minimize(f), constraints) prob.solve(solver='SCS', verbose=True) ## modified /cells/9/source: @@ -14,6 +14,6 @@ b[idx] += 10 * np.random.randn(k) x = cvxpy.Variable(n) -prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(A*x - b))) +prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm1(A @ x - b))) prob.solve(solver='SCS', verbose=True) prob.solve(solver='SCS', verbose=True, acceleration_lookback=0) ## modified /cells/10/source: @@ -5,11 +5,11 @@ n = 20 m = int(n / 4) G = np.random.randn(m, n) -f = np.random.randn(m, 1) +f = np.random.randn(m) power = np.pi x = cvxpy.Variable(n) -constraints = [G*x == f] +constraints = [G @ x == f] prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.norm(x, power)), constraints) prob.solve(solver='SCS', verbose=True) ## modified /cells/11/source: @@ -8,17 +8,17 @@ w_true = np.random.randn(p, 1) X_tmp = np.random.randn(p, q) ips = -w_true.T.dot(X_tmp) -ps = (np.exp(ips)/(1 + np.exp(ips))).T -labels = 2*(np.random.rand(q,1) < ps) - 1 +ps = (np.exp(ips) / (1 + np.exp(ips))).T +labels = 2 * (np.random.rand(q, 1) < ps) - 1 X_pos = X_tmp[:,np.where(labels==1)[0]] X_neg = X_tmp[:,np.where(labels==-1)[0]] X = np.hstack([X_pos, -X_neg]) # include labels with data lam = 2 -w = cvxpy.Variable(p, 1) -obj = (cvxpy.sum_entries(cvxpy.log_sum_exp(cvxpy.vstack([np.zeros((1,q)), w.T*X]), axis = 0)) - + lam * cvxpy.norm(w,1)) +w = cvxpy.Variable(p) +obj = (cvxpy.sum(cvxpy.log_sum_exp(cvxpy.vstack([np.zeros((q,)), w.T @ X]), axis = 0)) + + lam * cvxpy.norm(w, 1)) prob = cvxpy.Problem(cvxpy.Minimize(obj)) prob.solve(solver='SCS', verbose=True) ## modified /cells/12/source: @@ -13,7 +13,7 @@ M[missing_idx] = 0. X = cvxpy.Variable(m * n) lam = 0.5 -diff = cvxpy.reshape(X, m, n) - np.reshape(M, (m, n)) +diff = cvxpy.reshape(X, (m, n)) - np.reshape(M, (m, n)) obj = cvxpy.norm(diff, "nuc") + lam * cvxpy.sum_squares(X) constraints = [X[valid_idx] == M[valid_idx]] ## modified /cells/13/source: @@ -4,12 +4,12 @@ np.random.seed(hash('min-norm') % 2 ** 31) m = 500 n = int(m / 2) A = np.random.randn(m, n) -b = 10 * np.random.randn(m, 1) +b = 10 * np.random.randn(m) G = 2 * np.random.randn(2 * n, n) x = cvxpy.Variable(n) -obj = cvxpy.norm(A * x - b) -constraints = [cvxpy.norm(G * x) <= 1] +obj = cvxpy.norm(A @ x - b) +constraints = [cvxpy.norm(G @ x) <= 1] prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) prob.solve(solver='SCS', verbose=True) ## modified /cells/14/source: @@ -6,10 +6,11 @@ A = np.diag(-np.logspace(-0.5, 1, n)) U = scipy.linalg.orth(np.random.randn(n,n)) A = U.T.dot(A.dot(U)) -P = cvxpy.Symmetric(n, n) +P = cvxpy.Variable((n, n), symmetric=True) obj = cvxpy.trace(P) -constraints = [A.T*P + P*A << -np.eye(n), P >> np.eye(n)] +constraints = [A.T @ P + P @ A << -np.eye(n), P >> np.eye(n)] prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) +# WARNING: this may run for a very long time (~10min)! prob.solve(solver='SCS', verbose=True) prob.solve(solver='SCS', verbose=True, acceleration_lookback=0) ## modified /cells/15/source: @@ -6,17 +6,17 @@ n = 5000 density = 0.1 mu = np.exp(0.01 * np.random.randn(n)) - 1. # returns -D = np.random.rand(n) / 10. # idiosyncratic risk -F = scipy.sparse.rand(n, m, density) / 10. # factor model +D = np.random.rand(n) / 10. # idiosyncratic risk +F = scipy.sparse.rand(n, m, density) / 10. # factor model lambda_risk = 1 leverage = 1 x = cvxpy.Variable(n) -obj = mu.T * x - lambda_risk * (cvxpy.sum_squares(F.T.dot(x)) + - cvxpy.sum_squares(cvxpy.mul_elemwise(D, x))) +obj = mu.T @ x - lambda_risk * (cvxpy.sum_squares(F.T @ x) + + cvxpy.sum_squares(cvxpy.multiply(D, x))) -constraints = [cvxpy.sum_entries(x) == leverage, x >= 0] +constraints = [cvxpy.sum(x) == leverage, x >= 0] prob = cvxpy.Problem(cvxpy.Maximize(obj), constraints) prob.solve(solver='SCS', verbose=True) ## modified /cells/16/source: @@ -8,14 +8,13 @@ lam = 0.1 A = scipy.sparse.rand(n, n, 0.01) A = A.T.dot(A).todense() + 0.1 * np.eye(n) L = np.linalg.cholesky(np.linalg.inv(A)) -X = L.dot(np.random.randn(n, num_samples)) # Draw m experiments according to the covariance matrix A^-1 -S = X.dot(X.T) / num_samples # Estimate of covariance matrix -mask = np.ones((n,n)) - np.eye(n) +X = L.dot(np.random.randn(n, num_samples)) # Draw m experiments according to the covariance matrix A^-1 +S = X.dot(X.T) / num_samples # Estimate of covariance matrix +mask = np.ones((n, n)) - np.eye(n) -theta = cvxpy.Variable(n, n) +theta = cvxpy.Variable((n, n)) -obj = (lam*cvxpy.norm1(cvxpy.mul_elemwise(mask, theta)) + - cvxpy.trace(S * theta) - cvxpy.log_det(theta)) +obj = lam * cvxpy.norm1(cvxpy.multiply(mask, theta)) + cvxpy.trace(S @ theta) - cvxpy.log_det(theta) prob = cvxpy.Problem(cvxpy.Minimize(obj)) prob.solve(solver='SCS', verbose=True) ## modified /cells/17/source: @@ -4,8 +4,8 @@ np.random.seed(hash('fused-lasso') % 2 ** 31) m = 1000 ni = 10 k = 1000 -rho=0.05 -sigma=0.05 +rho = 0.05 +sigma = 0.05 A = np.random.randn(m, ni * k) A /= np.sqrt(np.sum(A ** 2, 0)) @@ -19,8 +19,9 @@ b = A.dot(x0) + sigma * np.random.randn(m) lam = 0.1 * sigma * np.sqrt(m * np.log(ni * k)) x = cvxpy.Variable(ni * k) -obj = cvxpy.sum_squares(A * x - b) + lam * cvxpy.norm1(x) + lam * cvxpy.tv(x) +obj = cvxpy.sum_squares(A @ x - b) + lam * cvxpy.norm1(x) + lam * cvxpy.tv(x) prob = cvxpy.Problem(cvxpy.Minimize(obj)) +# WARNING: this may run for a long time (~2min)! prob.solve(solver='SCS', verbose=True) prob.solve(solver='SCS', verbose=True, acceleration_lookback=0) ## modified /cells/18/source: @@ -5,12 +5,11 @@ m = 150 n = 500 A = np.random.randn(m, n) x0 = np.random.rand(n) -y = np.sign(A.dot(x0) + 0.05*np.random.randn(m)) +y = np.sign(A.dot(x0) + 0.05 * np.random.randn(m)) lam = 1.0 x = cvxpy.Variable(n) -obj = ((1./m) * cvxpy.sum_entries(cvxpy.pos(1 - cvxpy.mul_elemwise(y, A * x))) - + lam * cvxpy.norm(x, 1)) +obj = (1./m) * cvxpy.sum(cvxpy.pos(1 - cvxpy.multiply(y, A @ x))) + lam * cvxpy.norm(x, 1) prob = cvxpy.Problem(cvxpy.Minimize(obj)) prob.solve(solver='SCS', verbose=True) ## modified /cells/19/source: @@ -5,14 +5,14 @@ n = 100 r = 10 # Rank density = 0.1 -L0 = np.random.randn(n, r).dot(np.random.randn(r, n)) # Low rank matrix -S0 = scipy.sparse.rand(n, n, density) # Sparse matrix w/ Normally distributed entries. +L0 = np.random.randn(n, r).dot(np.random.randn(r, n)) # Low rank matrix +S0 = scipy.sparse.rand(n, n, density) # Sparse matrix w/ Normally distributed entries. S0.data = 10 * np.random.randn(len(S0.data)) M = L0 + S0 -L = cvxpy.Variable(n, n) -S = cvxpy.Variable(n, n) +L = cvxpy.Variable((n, n)) +S = cvxpy.Variable((n, n)) lam = 0.1 obj = cvxpy.norm(L, "nuc") + lam * cvxpy.norm1(S) ## modified /cells/20/source: @@ -7,14 +7,14 @@ k = 3 # Needs to be less than m. A = np.matrix(np.random.rand(m, n)) A -= np.mean(A, axis=0) -K = np.array([(A[i].T * A[i]).flatten() for i in range(m)]) +K = np.array([list((A[i].T * A[i]).flat) for i in range(m)]) -sigma_inv1 = cvxpy.Variable(n,n) # Inverse covariance matrix +sigma_inv1 = cvxpy.Variable((n, n)) # Inverse covariance matrix t = cvxpy.Variable(m) tdet = cvxpy.Variable(1) -obj = cvxpy.sum_largest(t+tdet, k) -z = K*cvxpy.reshape(sigma_inv1, n*n, 1) +obj = cvxpy.sum_largest(t + tdet, k) +z = K @ cvxpy.reshape(sigma_inv1, n * n) constraints = [-cvxpy.log_det(sigma_inv1) <= tdet, t == z] prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) prob.solve(solver='SCS', verbose=True) ## modified /cells/21/source: @@ -10,9 +10,9 @@ Xp = np.random.randn(m, d) Xn = np.random.randn(n, d) lam = 1 -theta = cvxpy.Variable(d) -Z = cvxpy.max_elemwise(1 - (Xp * theta * np.ones((1,n)) - (Xn * theta * np.ones((1,m))).T), 0) -obj = cvxpy.max_entries(cvxpy.sum_entries(Z, axis=0)) + lam * cvxpy.sum_squares(theta) +theta = cvxpy.Variable((d, 1)) +Z = cvxpy.maximum(1 - (Xp @ theta @ np.ones((1, n)) - (Xn @ theta @ np.ones((1, m))).T), 0) +obj = cvxpy.max(cvxpy.sum(Z, axis=0)) + lam * cvxpy.sum_squares(theta) prob = cvxpy.Problem(cvxpy.Minimize(obj)) prob.solve(solver='SCS', verbose=True) ## modified /cells/22/source: @@ -1,36 +1,36 @@ # Quantile regression np.random.seed(hash('quantile-regression') % 2 ** 31) -m = 100 # Number of data entries -n = 5 # Number of weights -k = 20 # Number of quantiles +m = 100 # Number of data entries +n = 5 # Number of weights +k = 20 # Number of quantiles p = 1 sigma = 0.1 -x = np.random.rand(m)* 2 * np.pi * p +x = np.random.rand(m) * 2 * np.pi * p y = np.sin(x) + sigma * np.sin(x) * np.random.randn(m) alphas = np.linspace(1. / (k + 1), 1 - 1. / (k + 1), k) # Do a bunch of quantiles at once # RBF (Radial Basis Function) features mu_rbf = np.array([np.linspace(-1, 2 * np.pi * p + 1, n)]) mu_sig = (2 * np.pi * p + 2)/n -X = np.exp(-(mu_rbf.T - x).T**2 / (2 * mu_sig**2)) # Gaussian +X = np.exp(-(mu_rbf.T - x).T ** 2 / (2 * mu_sig ** 2)) # Gaussian -theta = cvxpy.Variable(n,k) +theta = cvxpy.Variable((n, k)) def quantile_loss(alphas, theta, X, y): m, n = X.shape k = len(alphas) Y = np.tile(y.flatten(), (k, 1)).T A = np.tile(alphas, (m, 1)) - Z = X * theta - Y - return cvxpy.sum_entries( - cvxpy.max_elemwise( - cvxpy.mul_elemwise(-A, Z), - cvxpy.mul_elemwise(1 - A, Z))) + Z = X @ theta - Y + return cvxpy.sum( + cvxpy.maximum( + cvxpy.multiply(-A, Z), + cvxpy.multiply(1 - A, Z))) obj = quantile_loss(alphas, theta, X, y) -constraints = [X*(theta[:,1:] - theta[:,:-1]) >= 0] +constraints = [X @ (theta[:, 1:] - theta[:, :-1]) >= 0] prob = cvxpy.Problem(cvxpy.Minimize(obj), constraints) prob.solve(solver='SCS', verbose=True) ## modified /cells/23/source: @@ -13,7 +13,7 @@ idx = np.random.randint(m, size=k) b[idx] += 10 * np.random.randn(k) x = cvxpy.Variable(n) -prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum_entries(cvxpy.huber(A*x - b)))) +prob = cvxpy.Problem(cvxpy.Minimize(cvxpy.sum(cvxpy.huber(A @ x - b)))) prob.solve(solver='SCS', verbose=True) prob.solve(solver='SCS', verbose=True, acceleration_lookback=0) ## modified /metadata/kernelspec/display_name: - Python 2 + Python (scs) ## modified /metadata/kernelspec/name: - python2 + scs ## replaced /metadata/language_info/codemirror_mode/version: - 2 + 3 ## modified /metadata/language_info/pygments_lexer: - ipython2 + ipython3 ## modified /metadata/language_info/version: - 2.7.11 + 3.8.10 ## replaced /nbformat_minor: - 2 + 4 ```nbdime diff --ignore-outputs