Closed geriatricvibes closed 3 months ago
I am unable to reproduce this:
tension = LinRange(0.1, 10, 100)
callback = function (p, l)
println("Current loss is: $l")
return false
end
#constants
L = 1
ei= 1.0
T = 0.0
Q = [0, -0.5, 0]
@parameters s
@variables theta(..)
Ds = Differential(s)
Dss = Differential(s)^2
I_0_l = Integral(s in DomainSets.ClosedInterval(0, L))
I_0_s = Integral(s in DomainSets.ClosedInterval(0, s))
P = [I_0_l(cos(theta(s))), I_0_l(sin(theta(s))), 0]
A = [I_0_s(cos(theta(s))), I_0_s(sin(theta(s))), 0]
F = ((Q-P)/(norm(Q-P)))*T
t = [cos(theta(s)), sin(theta(s)), 0]
eq = ei * Dss(theta(s)) + dot(cross(t, F), [0, 0, 1]) ~ 0
bcs = [theta(0.0)~0.0, Ds(theta(L))~0.0]
domains = [s ∈ Interval(0.0, 1.0)]
strategy_ = QuadratureTraining()
af = Lux.relu
chain1 = Chain(Dense(1, 10, af), Dense(10, 1)) |> f64
loss = []
callback = function (p, l)
println("loss: $l")
append!(loss, l)
return false
end
init_params = Lux.setup(Random.default_rng(), chain1)[1] |> ComponentArray .|> Float64
discretization = NeuralPDE.PhysicsInformedNN(chain1, strategy_; init_params = init_params)
@named pde_system = PDESystem(eq, bcs, domains, [s], [theta(s)])
prob = NeuralPDE.discretize(pde_system, discretization)
@time res = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); callback = callback, maxiters = 200) # 1.69s
T = 0.1
F = ((Q-P)/(norm(Q-P))).*T
eq = ei * Dss(theta(s)) + dot(cross(t, F), [0, 0, 1]) ~ 0
# Using old weights
discretization = NeuralPDE.PhysicsInformedNN(chain1,strategy_; init_params = res.u)
@named pde_system = PDESystem(eq, bcs, domains, [s], [theta(s)])
prob = NeuralPDE.discretize(pde_system, discretization)
@time res2 = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); callback = callback, maxiters = 200) # 63.9s
# Using random weights
discretization = NeuralPDE.PhysicsInformedNN(chain1,strategy_)
@named pde_system = PDESystem(eq, bcs, domains, [s], [theta(s)])
prob = NeuralPDE.discretize(pde_system, discretization)
@time res2 = Optimization.solve(prob, OptimizationOptimisers.Adam(5e-3); callback = callback, maxiters = 200) # 114.58s
I am getting the opposite.
@geriatricvibes what versions? ]st
?
Hi, forgot about the issue, my apologies.
So the thing was my fault, there was some issue with jupyter julia kernel and everything was running slow, started using julia repl and it works alright at that. Closing the question.
Question❓
So the base case gets done in like 2 seconds, but when in the loops when I change the 'T Values' in the equation, it gets real slow like takes a second or two just to get one iteration done. Running just random weights and biases is proving to be way faster than using the old ones.
So I think I'm doing something wrong and there may be a better way to go about it.
Below is the code