maziarraissi / PINNs

Physics Informed Deep Learning: Data-driven Solutions and Discovery of Nonlinear Partial Differential Equations
https://maziarraissi.github.io/PINNs
MIT License
3.65k stars 1.25k forks source link

solving pinn #59

Open AAB28 opened 1 month ago

AAB28 commented 1 month ago

in this given code set for the particular pinn of the second order differential wave equation,with every training i am getting a different solutions instead of a particular solution and my model is not following the ic annd bc,moreover i took the codes from github and tried to incorporate it with mine, import torch import torch.nn as nn import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D

Check device

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

Define the neural network

class Net(nn.Module): def init(self): super(Net, self).init() self.hidden_layer1 = nn.Linear(2, 50) # Increased number of neurons self.hidden_layer2 = nn.Linear(50, 50) self.hidden_layer3 = nn.Linear(50, 50) self.hidden_layer4 = nn.Linear(50, 50) self.hidden_layer5 = nn.Linear(50, 50) self.output_layer = nn.Linear(50, 1)

def forward(self, x, t):
    inputs = torch.cat([x, t], axis=1)
    layer1_out = torch.tanh(self.hidden_layer1(inputs))  # Changed activation function
    layer2_out = torch.tanh(self.hidden_layer2(layer1_out))
    layer3_out = torch.tanh(self.hidden_layer3(layer2_out))
    layer4_out = torch.tanh(self.hidden_layer4(layer3_out))
    layer5_out = torch.tanh(self.hidden_layer5(layer4_out))
    output = self.output_layer(layer5_out)
    return output

Initialize the neural network

net = Net().to(device) mse_cost_function = nn.MSELoss() optimizer = torch.optim.Adam(net.parameters(), lr=0.001)

Parameters for the wave equation

Nx = 100 # Number of spatial points Nt = 100 # Number of time points L = 1.0 # Length of spatial domain T = 1.0 # Total time c = 1.0 # Speed of wave propagation

Calculate spatial and time steps

hx = L / (Nx - 1) ht = T / (Nt - 1)

Generate training data

x = np.linspace(0, L, Nx) t = np.linspace(0, T, Nt) x, t = np.meshgrid(x, t) x = x.reshape(-1, 1) t = t.reshape(-1, 1)

Initial Conditions

u_init = np.zeros((Nx, Nt)) u_init[10:30, 0] = 0.5 u_init[10:30, 1] = 0.5

Boundary Conditions

def boundary_conditions(t): return np.zeros_like(t)

Collocation Points for PDE Residual

x_collocation = np.random.uniform(low=0.0, high=L, size=(10000, 1)) t_collocation = np.random.uniform(low=0.0, high=T, size=(10000, 1)) all_zeros = np.zeros((10000, 1))

PDE loss function

def f(x, t, net): x.requiresgrad(True) t.requiresgrad(True)

u = net(x, t)
u_x = torch.autograd.grad(u.sum(), x, create_graph=True)[0]
u_xx = torch.autograd.grad(u_x.sum(), x, create_graph=True)[0]
u_t = torch.autograd.grad(u.sum(), t, create_graph=True)[0]
u_tt = torch.autograd.grad(u_t.sum(), t, create_graph=True)[0]
pde = u_tt - c**2 * u_xx
return pde

Training parameters

iterations = 5000

Training the PINN

for epoch in range(iterations): optimizer.zero_grad()

# Initial Conditions Loss
x_ic = np.linspace(0, L, Nx).reshape(-1, 1)
t_ic = np.zeros((Nx, 1))
t_ic_sec = np.ones((Nx, 1)) * ht
u_ic_first = u_init[:, 0].reshape(-1, 1)
u_ic_sec = u_init[:, 1].reshape(-1, 1)

pt_x_ic = torch.from_numpy(x_ic).float().to(device)
pt_t_ic_first = torch.from_numpy(t_ic).float().to(device)
pt_t_ic_sec = torch.from_numpy(t_ic_sec).float().to(device)
pt_u_ic_first = torch.from_numpy(u_ic_first).float().to(device)
pt_u_ic_sec = torch.from_numpy(u_ic_sec).float().to(device)

net_ic_out_first = net(pt_x_ic, pt_t_ic_first)
net_ic_out_sec = net(pt_x_ic, pt_t_ic_sec)
mse_ic_first = mse_cost_function(net_ic_out_first, pt_u_ic_first)
mse_ic_sec = mse_cost_function(net_ic_out_sec, pt_u_ic_sec)

# Boundary Conditions Loss
t_bc = np.linspace(0, T, Nt).reshape(-1, 1)
x_bc_first = np.zeros((Nt, 1))
x_bc_sec = np.ones((Nt, 1)) * L

pt_x_bc_first = torch.from_numpy(x_bc_first).float().to(device)
pt_x_bc_sec = torch.from_numpy(x_bc_sec).float().to(device)
pt_t_bc = torch.from_numpy(t_bc).float().to(device)

pt_u_bc_first = torch.from_numpy(boundary_conditions(t_bc)).float().to(device)
pt_u_bc_sec = torch.from_numpy(boundary_conditions(t_bc)).float().to(device)

net_bc_out_first = net(pt_x_bc_first, pt_t_bc)
net_bc_out_sec = net(pt_x_bc_sec, pt_t_bc)

mse_bc_first = mse_cost_function(net_bc_out_first, pt_u_bc_first)
mse_bc_sec = mse_cost_function(net_bc_out_sec, pt_u_bc_sec)

# PDE Loss
pt_x_collocation = torch.from_numpy(x_collocation).float().to(device)
pt_t_collocation = torch.from_numpy(t_collocation).float().to(device)
pt_all_zeros = torch.from_numpy(all_zeros).float().to(device)

f_out = f(pt_x_collocation, pt_t_collocation, net)
mse_f = mse_cost_function(f_out, pt_all_zeros)

# Combine the loss functions
loss = mse_ic_first + mse_ic_sec + mse_bc_first + mse_bc_sec + mse_f

loss.backward()
optimizer.step()

# Print and monitor training progress
if epoch % 100 == 0:
    with torch.autograd.no_grad():
        print(f'Epoch {epoch}, Training Loss: {loss.item()}')

Visualization

with torch.no_grad(): x_vis = np.linspace(0, L, Nx) t_vis = np.linspace(0, T, Nt) ms_x, ms_t = np.meshgrid(x_vis, t_vis) x_flat = ms_x.ravel().reshape(-1, 1) t_flat = ms_t.ravel().reshape(-1, 1)

pt_x_vis = torch.from_numpy(x_flat).float().to(device)
pt_t_vis = torch.from_numpy(t_flat).float().to(device)
pt_u_vis = net(pt_x_vis, pt_t_vis).cpu().numpy().reshape(ms_x.shape)

# Plot 3D surface plot
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(ms_x, ms_t, pt_u_vis, cmap='viridis')

ax.set_xlabel('x')
ax.set_ylabel('t')
ax.set_zlabel('u')
ax.set_title('3D Surface Plot of Wave Equation Solution')
plt.show()

Save Model

torch.save(net.state_dict(), "model_wave.pt") I am unable to find the error.Someone pplease suggest some help as this is very new to me and i am a noob in it.