Closed jackzhangqing closed 3 months ago
Hi, here is a minimal example how you can use KAN to solve PDE, hope this helps!
Hi, here is a minimal example how you can use KAN to solve PDE, hope this helps!
Thank you for your assistance. Following your example, I replaced the MLP in PINN with the KAN for solving the PDE process. Below are the modified code and the error messages. I've been working on this for quite some time and am unsure how to resolve it effectively。
data = scipy.io.loadmat('./data/Homo_4Hz_singlesource_ps.mat') ''' A 40401x1 646416 double complex B 40401x1 646416 double complex C 40401x1 646416 double complex Ps 40401x1 646416 double complex
U_imag 40401x1 323208 double U_real 40401x1 323208 double
m 40401x1 323208 double x_star 40401x1 323208 double z_star 40401x1 323208 double ''' x = data['x_star'] z = data['z_star']
ps = data['Ps'] m = data['m'] A = data['A'] B = data['B'] C = data['C']
N = x.shape[0] N_train = N idx = np.random.choice(N, N_train, replace=False) x_train = x[idx, :] z_train = z[idx, :] x = torch.tensor(x_train, dtype=torch.float32) z = torch.tensor(z_train, dtype=torch.float32)
ps_train = ps[idx, :] m_train = m[idx, :] A_train = A[idx, :] B_train = B[idx, :] C_train = C[idx, :]
model = KAN(width=[2, 10, 10, 10, 10, 2], grid=5, k=3, grid_eps=1.0, noise_scale_base=0.25)
def batch_jacobian(func, x, create_graph=False):
def _func_sum(x):
return func(x).sum(dim=0)
return autograd.functional.jacobian(_func_sum, x, create_graph=create_graph).permute(1,0,2)
def fwd_gradients(Y, x): dummy = torch.ones_like(Y) G = torch.autograd.grad(Y, x, grad_outputs=dummy, retain_graph=True)[0] Y_x = torch.autograd.grad(G, dummy, retain_graph=True)[0] return Y_x
steps = 25000
loss_values = []
def train(): optimizer = LBFGS(model.parameters(),lr=1, history_size=10, line_search_fn="strong_wolfe", tolerance_grad=1e-32, tolerance_change=1e-32, tolerance_ys=1e-32)
pbar = tqdm(range(steps), desc='description')
global loss
for it in pbar:
def closure():
optimizer.zero_grad()
# pde loss
ureal_and_uimag = model(torch.cat((x, z), dim=1))
u_real = ureal_and_uimag[:, 0:1]
u_imag = ureal_and_uimag[:, 1:2]
u = torch.complex(u_real, u_imag)
dudx = fwd_gradients(u, x)
dudz = fwd_gradients(u, z)
dudxx = fwd_gradients((A_train * dudx), x)
dudzz = fwd_gradients((B_train * dudz), z)
f_loss = C_train*omega*omega*m_train*u + dudxx + dudzz - ps_train
loss = torch.sum(torch.square(torch.abs(f_loss)))
loss.backward()
loss_values.append(loss.item())
return loss
if it % 10 == 0 and it < 30000:
model.update_grid_from_samples(torch.cat((x, z), dim=1))
optimizer.step(closure)
if it % 10 == 0:
pbar.set_description("loss: %.2e" % (loss.cpu().detach().numpy()))
scipy.io.savemat('loss_KAN.mat', {'loss': loss_values})
torch.save(model.state_dict(), 'trained_KAN_Helm1.pt')
train()
description: 0%| | 0/25000 [00:12<?, ?it/s]
Traceback (most recent call last):
File "D:/0_Suda_Py/pykan-master/test_KAN_Helm1.py", line 127, in
class PhysicsInformedNN: