PaddlePaddle / PaddleScience

PaddleScience is SDK and library for developing AI-driven scientific computing applications based on PaddlePaddle.
http://paddlescience-docs.rtfd.io/
Apache License 2.0
260 stars 147 forks source link

复现PINNs论文 #27

Closed wuxinwang1997 closed 1 year ago

wuxinwang1997 commented 2 years ago

基于PaddleScience复现Raissi, Maziar, Paris Perdikaris, and George E. Karniadakis. Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations. Journal of Computational Physics 378 (2019): 686-707.中所有代码,原始仓库链接为:https://github.com/maziarraissi/PINNs

xingfeng01 commented 2 years ago

您好,请问是复现过程中遇到问题了吗 ?

wuxinwang1997 commented 2 years ago

您好,请问是复现过程中遇到问题了吗 ?

我目前直接用的Paddle按照原论文的方式复现,求grad的时候发现求二阶导数的时候只能设置retrain_graph=True, create_graph=False,但是API文档提到设置create_graph=True可求高阶导,不知道这样设置计算是否正确,或者是否Tanh函数不支持二阶导数,超参数相同的情况下,训练loss和原论文差不多,但是测试误差大了一个数量级。

tongxin commented 2 years ago

可以贴下你的代码看看。

wuxinwang1997 commented 2 years ago

可以贴下你的代码看看。

这是按照原论文方式构建的网络和训练代码主体

`class PhysicsInformedNN(nn.Layer): def init(self, x0, u0, v0, tb, X_f, layers, lb, ub): super(PhysicsInformedNN, self).init() self.layers = layers X0 = np.concatenate((x0, 0 x0), 1) # (x0, 0) X_lb = np.concatenate((0 tb + lb[0], tb), 1) # (lb[0], tb) X_ub = np.concatenate((0 * tb + ub[0], tb), 1) # (ub[0], tb)

    self.lb = lb
    self.ub = ub

    self.x0 = X0[:,0:1]
    self.t0 = X0[:,1:2]

    self.x_lb = X_lb[:,0:1]
    self.t_lb = X_lb[:,1:2]

    self.x_ub = X_ub[:,0:1]
    self.t_ub = X_ub[:,1:2]

    self.x_f = X_f[:,0:1]
    self.t_f = X_f[:,1:2]

    self.u0 = u0
    self.v0 = v0

    self.fc_list = nn.LayerList()
    for i in range(0, len(layers) - 1):
        self.fc_list.append(nn.Linear(in_features=layers[i],
                                      out_features=layers[i + 1],
                                      weight_attr=self.init_NN(layers[i], layers[i+1]),
                                      bias_attr=nn.initializer.Constant(value=0.0),
                                      name='fc' + str(i)))

    self.optimizer = paddle.optimizer.Adam(parameters=self.fc_list.parameters())
    #self.scheduler = paddle.optimizer.lr.CosineAnnealingDecay(learning_rate=3e-4, T_max=50000, eta_min=3e-6, verbose=False)
    #self.optimizer = paddle.optimizer.Adam(learning_rate=self.scheduler, parameters=self.fc_list.parameters())

def forward(self):
    self.optimizer.clear_grad()
    u0_pred, v0_pred, _, _ = self.net_uv(paddle.to_tensor(self.x0, dtype='float32'), paddle.to_tensor(self.t0, dtype='float32'))
    u_lb_pred, v_lb_pred, u_x_lb_pred, v_x_lb_pred = self.net_uv(paddle.to_tensor(self.x_lb, dtype='float32'), paddle.to_tensor(self.t_lb, dtype='float32'))
    u_ub_pred, v_ub_pred, u_x_ub_pred, v_x_ub_pred = self.net_uv(paddle.to_tensor(self.x_ub, dtype='float32'), paddle.to_tensor(self.t_ub, dtype='float32'))
    f_u_pred, f_v_pred = self.net_f_uv(self.x_f, self.t_f)

    loss = paddle.mean(paddle.square(paddle.to_tensor(self.u0, dtype='float32') - u0_pred)) + \
        paddle.mean(paddle.square(paddle.to_tensor(self.v0, dtype='float32') - v0_pred)) + \
        paddle.mean(paddle.square(u_lb_pred - u_ub_pred)) + \
        paddle.mean(paddle.square(v_lb_pred - v_ub_pred)) + \
        paddle.mean(paddle.square(u_x_lb_pred - u_x_ub_pred)) + \
        paddle.mean(paddle.square(v_x_lb_pred - v_x_ub_pred)) + \
        paddle.mean(paddle.square(f_u_pred)) + \
        paddle.mean(paddle.square(f_v_pred))

    return loss, u0_pred, v0_pred

def init_NN(self, in_dim, out_dim):
    xavier_stddev = np.sqrt(2/(in_dim + out_dim))
    weight_attr = paddle.framework.ParamAttr(
        initializer=paddle.nn.initializer.TruncatedNormal(mean=0.0, std=xavier_stddev))
    return weight_attr

def neural_net(self, X):
    num_layers = len(self.layers)

    H = 2.0 * (X - paddle.to_tensor(self.lb, dtype='float32')) / paddle.to_tensor(self.ub - self.lb, dtype='float32') - 1.0
    for i in range(0, num_layers - 2):
        H = paddle.tanh(self.fc_list[i](H))
    Y = self.fc_list[-1](H)
    return Y

def net_uv(self, x, t):
    x.stop_gradient = False
    t.stop_gradient = False
    X = paddle.concat([x, t], axis=1)

    uv = self.neural_net(X)
    u = uv[:, 0:1]
    v = uv[:, 1:2]

    u_x = paddle.grad(u, x, retain_graph=True, create_graph=True)[0]
    v_x = paddle.grad(v, x, retain_graph=True, create_graph=True)[0]

    return u, v, u_x, v_x

def net_f_uv(self, x, t):
    x = paddle.to_tensor(x, dtype='float32')
    t = paddle.to_tensor(t, dtype='float32')
    x.stop_gradient = False
    t.stop_gradient = False
    u, v, u_x, v_x = self.net_uv(x, t)

    u_t = paddle.grad(u, t, retain_graph=True, create_graph=True)[0]
    u_xx = paddle.grad(u_x, x, retain_graph=True, create_graph=False)[0]
    v_t = paddle.grad(v, t, retain_graph=True, create_graph=True)[0]
    v_xx = paddle.grad(v_x, x, retain_graph=True, create_graph=False)[0]

    f_u = u_t + 0.5 * v_xx + (u ** 2 + v ** 2) * v
    f_v = v_t - 0.5 * u_xx - (u ** 2 + v ** 2) * u

    return f_u, f_v

def train(self, nIter):
    start_time = time.time()

    for it in range(nIter):
        # tf Graphs
        loss_value, u0_pred, v0_pred = self.forward()
        loss_value.backward()
        self.optimizer.minimize(loss_value)
        self.optimizer.clear_grad()
        # self.scheduler.step()
        # loss_value, u0_pred, v0_pred = forward(model, x0_p, t0_p, u0_p, v0_p, x_lb_p, t_lb_p, x_ub_p, t_ub_p, x_f_p,t_f_p)
        # Print
        if it % 10 == 0:
            elapsed = time.time() - start_time            

            print('It: %d, Loss: %.3e, Time: %.2f' %
                (it, loss_value, elapsed))
            start_time = time.time()

def predict(self, X_star):
    self.eval()
    # paddle.no_grad()
    x0_p = paddle.to_tensor(X_star[:, 0:1], dtype='float32')
    t0_p = paddle.to_tensor(X_star[:, 1:2], dtype='float32')

    u_star, v_star, _, _ = self.net_uv(x0_p, t0_p)
    print('u_star, v_star has been predicted!')

    x_f_p = paddle.to_tensor(X_star[:, 0:1], dtype='float32')
    t_f_p = paddle.to_tensor(X_star[:, 1:2], dtype='float32')

    f_u_star, f_v_star = self.net_f_uv(x_f_p, t_f_p)
    print('f_u_star, f_v_star has been predicted!')

    return np.array(u_star), np.array(v_star), np.array(f_u_star), np.array(f_v_star)`
tongxin commented 2 years ago

我们不是所有算子都支持二阶导数,所以需要看下报错信息,另外你用的是Paddle哪个版本?

wuxinwang1997 commented 2 years ago

我们不是所有算子都支持二阶导数,所以需要看下报错信息,另外你用的是Paddle哪个版本?

报错如下,使用2.2.2和dev版本都是这个错误

RuntimeError Traceback (most recent call last) /tmp/ipykernel_186/1003520142.py in 42 43 start_time = time.time() ---> 44 model.train(nIter) 45 elapsed = time.time() - start_time 46 print('Training time: %.4f' % (elapsed))

/tmp/ipykernel_186/1766200072.py in train(self, nIter) 108 for it in range(nIter): 109 # tf Graphs --> 110 loss_value, u0_pred, v0_pred = self.forward() 111 loss_value.backward() 112 self.optimizer.minimize(loss_value)

/tmp/ipykernel_186/1766200072.py in forward(self) 44 u_lb_pred, v_lb_pred, u_x_lb_pred, v_x_lb_pred = self.net_uv(paddle.to_tensor(self.x_lb, dtype='float32'), paddle.to_tensor(self.t_lb, dtype='float32')) 45 u_ub_pred, v_ub_pred, u_x_ub_pred, v_x_ub_pred = self.net_uv(paddle.to_tensor(self.x_ub, dtype='float32'), paddle.to_tensor(self.t_ub, dtype='float32')) ---> 46 f_u_pred, f_v_pred = self.net_f_uv(self.x_f, self.t_f) 47 48 loss = paddle.mean(paddle.square(paddle.to_tensor(self.u0, dtype='float32') - u0_pred)) + \

/tmp/ipykernel_186/1766200072.py in net_f_uv(self, x, t) 94 95 u_t = paddle.grad(u, t, retain_graph=True, create_graph=True)[0] ---> 96 u_xx = paddle.grad(u_x, x, retain_graph=True, create_graph=True)[0] 97 v_t = paddle.grad(v, t, retain_graph=True, create_graph=True)[0] 98 v_xx = paddle.grad(v_x, x, retain_graph=True, create_graph=True)[0]

in grad(outputs, inputs, grad_outputs, retain_graph, create_graph, only_inputs, allow_unused, no_grad_vars) /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/wrapped_decorator.py in __impl__(func, *args, **kwargs) 23 def __impl__(func, *args, **kwargs): 24 wrapped_func = decorator_func(func) ---> 25 return wrapped_func(*args, **kwargs) 26 27 return __impl__ /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/framework.py in __impl__(*args, **kwargs) 227 assert in_dygraph_mode( 228 ), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ --> 229 return func(*args, **kwargs) 230 231 return __impl__ /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in grad(outputs, inputs, grad_outputs, retain_graph, create_graph, only_inputs, allow_unused, no_grad_vars) 626 return core.dygraph_partial_grad(inputs, outputs, grad_outputs, 627 no_grad_vars, place, create_graph, --> 628 retain_graph, allow_unused, only_inputs) 629 630 RuntimeError: (NotFound) The Op elementwise_sub_grad_grad doesn't have any grad op. If you don't intend calculating higher order derivatives, please set `create_graph` to False. [Hint: double_grad_node should not be null.] (at /paddle/paddle/fluid/imperative/partial_grad_engine.cc:908)
tongxin commented 2 years ago

sub 目前在二阶方程中是不支持的,因为考虑需要调用backward,则在构建的计算图中会出现 triple_grad,现在 sub 的triple grad目前咩有实现。另外 u_xx = paddle.grad(u_x, x, retain_graph=True, create_graph=True)[0],create_graph 要设置成 True。

wuxinwang1997 commented 2 years ago

sub 目前在二阶方程中是不支持的,因为考虑需要调用backward,则在构建的计算图中会出现 triple_grad,现在 sub 的triple grad目前咩有实现。另外 u_xx = paddle.grad(u_x, x, retain_graph=True, create_graph=True)[0],create_graph 要设置成 True。

您好,是把create_graph设为False之后u的计算的也还是二阶导是吗?(u_x是du/dx,u_xx是du_x/dx)

tongxin commented 2 years ago

create_graph=False可算一次二阶反向导数,但是无法再进行一次backward,训练模式下框架本身会默认再创建一次反向导数计算图的。

wuxinwang1997 commented 2 years ago

好的谢谢

tongxin commented 2 years ago

我们近期就会增加sub的三阶导数支持,届时会通知你。