Open iapuiu opened 4 years ago
I managed to solve it by doing a vertical concatenation of the type X = np.concatenate((X,numpy_CG(M, GSK[:,z], X_old[:,z], tol, max_iter)), axis=1) but this is slower. Is there any way to assign to pre-allocated memory?
I am trying to get the gradient of a function that has inside a conjugate gradient solver defined by me. This gives me an error and I can't understand why.
`#!/usr/bin/env python3
-- coding: utf-8 --
""" Created on Mon Jun 29 21:18:24 2020
@author: i.a.puiu cg test """
import numpy as np import autograd.numpy as np from autograd import grad, jacobian
def numpy_CG(A,b, x0, tol, max_iter): r = b - A@x0 p = r + 0.0 count = 0 x = x0+0.0 while count<= min(max_iter, A.shape[0]): alpha_k = np.dot(p,r)/ np.dot(p, A@p) x = x + alphak*p r = r - A @ x if np.linalg.norm(r_)<tol: return x betak = -np.dot(A@p,r)/np.dot(p,A@p) p = r_ + betak * p count+=1 r = r+0.0 print('iter lim reached') return x
def get_inverse_part(b, A, GSK, X_old, tol, max_iter): Nz = GSK.shape[1] X = np.zeros((GSK.shape[0],GSK.shape[1]))
print(X.shape)
def get_PbsGSK_prod(b, A, GSK, X_old, tol, max_iter): X = get_inverse_part(b, A, GSK, X_old, tol, max_iter) return np.diag(b) @ A @ X, X
def f(b_s, A_s, GSK, X_old, Omega_lhs_st, PTDFst,tol, max_iter): PbsGSK = get_PbsGSK_prod(b_s, A_s, GSK, X_old, tol, max_iter) return np.linalg.norm(PTDFst - Omega_lhs_st @ PbsGSK)
nn, nl, nz = 3, 5, 2 A_s = np.random.randn(nl,nn) b = np.random.randn(nl) GSK = np.random.randn(nn,nz) Omega_lhs_st = np.eye(nl) PTDFst = np.random.randn(nl,nz) X_old = np.zeros(GSK.shape)
grf = grad(f)
print(grf(b,A_s, GSK, X_old, Omega_lhs_st, PTDFst, 10**(-8), 100)) ` the error I get is : ValueError: setting an array element with a sequence and the problem seems to be in this line : X[:,z] = numpy_CG(M, GSK[:,z], X_old[:,z], tol, max_iter) due to the grad function returning ArrayBox object
Could you please help me with this? Not sure how to keep indents in here so I attached the code as text file code.txt
Many thanks