Nonlinear elliptic system of PDEs

We can also solve nonlinear systems, such as the system of nonlinear elliptic PDEs

\[\begin{aligned} \frac{\partial^2u}{\partial x^2} + \frac{\partial^2u}{\partial y^2} = uf(\frac{u}{w}) + \frac{u}{w}h(\frac{u}{w}) \\ \frac{\partial^2w}{\partial x^2} + \frac{\partial^2w}{\partial y^2} = wg(\frac{u}{w}) + h(\frac{u}{w}) \\ \end{aligned}\]

where f, g, h are arbitrary functions. With initial and boundary conditions:

\[\begin{aligned} u(0,y) = y + 1 \\ w(1, y) = [cosh(\sqrt[]{f(k)}) + sinh(\sqrt[]{f(k)})]\cdot(y + 1) \\ w(x,0) = cosh(\sqrt[]{f(k)}) + sinh(\sqrt[]{f(k)}) \\ w(0,y) = k(y + 1) \\ u(1, y) = k[cosh(\sqrt[]{f(k)}) + sinh(\sqrt[]{f(k)})]\cdot(y + 1) \\ u(x,0) = k[cosh(\sqrt[]{f(k)}) + sinh(\sqrt[]{f(k)})] \\ \end{aligned}\]

where k is a root of the algebraic (transcendental) equation f(k) = g(k).

This is done using a derivative neural network approximation.

using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, Roots
using Plots
using ModelingToolkit: Interval, infimum, supremum

@parameters x, y
Dx = Differential(x)
Dy = Differential(y)
@variables Dxu(..), Dyu(..), Dxw(..), Dyw(..)
@variables u(..), w(..)

# Arbitrary functions
f(x) = sin(x)
g(x) = cos(x)
h(x) = x
root(x) = f(x) - g(x)

# Analytic solution
k = find_zero(root, (0, 1), Bisection())                            # k is a root of the algebraic (transcendental) equation f(x) = g(x)
θ(x, y) = (cosh(sqrt(f(k)) * x) + sinh(sqrt(f(k)) * x)) * (y + 1)   # Analytical solution to Helmholtz equation
w_analytic(x, y) = θ(x, y) - h(k) / f(k)
u_analytic(x, y) = k * w_analytic(x, y)

# Nonlinear Steady-State Systems of Two Reaction-Diffusion Equations with 3 arbitrary function f, g, h
eqs_ = [
    Dx(Dxu(x, y)) + Dy(Dyu(x, y)) ~ u(x, y) * f(u(x, y) / w(x, y)) +
                                    u(x, y) / w(x, y) * h(u(x, y) / w(x, y)),
    Dx(Dxw(x, y)) + Dy(Dyw(x, y)) ~ w(x, y) * g(u(x, y) / w(x, y)) + h(u(x, y) / w(x, y))]

# Boundary conditions
bcs_ = [u(0, y) ~ u_analytic(0, y),
    u(1, y) ~ u_analytic(1, y),
    u(x, 0) ~ u_analytic(x, 0),
    w(0, y) ~ w_analytic(0, y),
    w(1, y) ~ w_analytic(1, y),
    w(x, 0) ~ w_analytic(x, 0)]

der_ = [Dy(u(x, y)) ~ Dyu(x, y),
    Dy(w(x, y)) ~ Dyw(x, y),
    Dx(u(x, y)) ~ Dxu(x, y),
    Dx(w(x, y)) ~ Dxw(x, y)]

bcs__ = [bcs_; der_]

# Space and time domains
domains = [x ∈ Interval(0.0, 1.0), y ∈ Interval(0.0, 1.0)]

# Neural network
input_ = length(domains)
n = 15
chain = [Chain(Dense(input_, n, σ), Dense(n, n, σ), Dense(n, 1)) for _ in 1:6] # 1:number of @variables

strategy = GridTraining(0.01)
discretization = PhysicsInformedNN(chain, strategy)

vars = [u(x, y), w(x, y), Dxu(x, y), Dyu(x, y), Dxw(x, y), Dyw(x, y)]
@named pdesystem = PDESystem(eqs_, bcs__, domains, [x, y], vars)
prob = NeuralPDE.discretize(pdesystem, discretization)
sym_prob = NeuralPDE.symbolic_discretize(pdesystem, discretization)

pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions
bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions[1:6]
approx_derivative_loss_functions = sym_prob.loss_functions.bc_loss_functions[7:end]

callback = function (p, l)
    if p.iter % 10 == 0
        println("loss: ", l)
        println("pde_losses: ", map(l_ -> l_(p.u), pde_inner_loss_functions))
        println("bcs_losses: ", map(l_ -> l_(p.u), bcs_inner_loss_functions))
        println("der_losses: ", map(l_ -> l_(p.u), approx_derivative_loss_functions))
    end
    return false
end

res = solve(prob, BFGS(); maxiters = 100, callback)

phi = discretization.phi

# Analysis
xs, ys = [infimum(d.domain):0.01:supremum(d.domain) for d in domains]
depvars = [:u, :w]
minimizers_ = [res.u.depvar[depvars[i]] for i in 1:2]

analytic_sol_func(x, y) = [u_analytic(x, y), w_analytic(x, y)]
u_real = [[analytic_sol_func(x, y)[i] for x in xs for y in ys] for i in 1:2]
u_predict = [[phi[i]([x, y], minimizers_[i])[1] for x in xs for y in ys] for i in 1:2]
diff_u = [abs.(u_real[i] .- u_predict[i]) for i in 1:2]
ps = []
for i in 1:2
    p1 = plot(xs, ys, u_real[i], linetype = :contourf, title = "u$i, analytic")
    p2 = plot(xs, ys, u_predict[i], linetype = :contourf, title = "predict")
    p3 = plot(xs, ys, diff_u[i], linetype = :contourf, title = "error")
    push!(ps, plot(p1, p2, p3))
end
loss: 7.964455546599065
pde_losses: [0.09456092773810654, 1.5278662964839276]
bcs_losses: [0.053590253344409325, 2.6205956859419475, 0.09419525111625969, 0.31169646529892847, 2.6587479428245655, 0.3361955625740608]
der_losses: [0.022326615772320736, 0.031961477841070944, 0.005541688605176259, 0.20717737905829156]
loss: 2.0918365758138076
pde_losses: [0.16388107408317265, 0.2641205167981561]
bcs_losses: [0.06406084530785108, 0.7735739391768419, 0.09860390744833769, 0.046391020829693534, 0.23769429537951228, 0.1539025886683835]
der_losses: [0.00823152688865449, 0.11350832992359058, 0.0265225862054522, 0.14134594510416174]
loss: 1.033118515187256
pde_losses: [0.2154094339983379, 0.2705645011485058]
bcs_losses: [0.020458447868296856, 0.12352019439933315, 0.02206550286307987, 0.02139663734845508, 0.0465942771530311, 0.0428488589984824]
der_losses: [0.03357936554081804, 0.07931127269284038, 0.09547945201171389, 0.06189057116436144]
loss: 0.6861309306779912
pde_losses: [0.09108548466464678, 0.15140252308871463]
bcs_losses: [0.01348918838935601, 0.08931653318767571, 0.028261984074557486, 0.009896927606467902, 0.05876276175679301, 0.01866329779432452]
der_losses: [0.031855432178832854, 0.052675365867472276, 0.09129783847535068, 0.04942359359379946]
loss: 0.4377541032255472
pde_losses: [0.10811155196506117, 0.05144162277702473]
bcs_losses: [0.005969631389722684, 0.061946752727388164, 0.03326129393027355, 0.005262654011418557, 0.035608568879674715, 0.007180902570080027]
der_losses: [0.04238868775757121, 0.0184785664242419, 0.053495733510817677, 0.014608137282272825]
loss: 0.26056086405678053
pde_losses: [0.03907444752877687, 0.02232514099991089]
bcs_losses: [0.005282269273820356, 0.06030951842871602, 0.019787320837177498, 0.004434417771092251, 0.0033576677738608148, 0.008360576610215076]
der_losses: [0.04601268914859409, 0.023387387821421322, 0.020258748580062914, 0.00797067928313244]
loss: 0.18553612928283586
pde_losses: [0.06768912827829418, 0.010303269332647537]
bcs_losses: [0.003776004775033352, 0.0371471266986153, 0.008657591026221661, 0.003941906873592612, 0.0027065963993660896, 0.006711704212636146]
der_losses: [0.018251480275424458, 0.012056767064059953, 0.005691325530498652, 0.008603228816445936]
loss: 0.11809321940334075
pde_losses: [0.024166735801855473, 0.0052848260519647436]
bcs_losses: [0.002483596640342322, 0.01546856276907252, 0.012607791235355156, 0.0024572147419907324, 0.0020752626148572884, 0.002428897666966379]
der_losses: [0.007387424652282897, 0.01865422179560543, 0.012120395135990622, 0.012958290297057161]
loss: 0.07084468345774037
pde_losses: [0.006814271103036366, 0.004748018817677722]
bcs_losses: [0.0019554086446022514, 0.009518368010332581, 0.013020823339281479, 0.0016247985722549318, 0.004633405791332967, 0.0035469175788277484]
der_losses: [0.002881418761818275, 0.01047146118964521, 0.00513055511969303, 0.006499236529237803]
loss: 0.052297419928712605
pde_losses: [0.0026744587451543174, 0.0012476418273926185]
bcs_losses: [0.00308322714865252, 0.008186602467899907, 0.009436295253149294, 0.0047098009045736825, 0.0033368269776272784, 0.004136647189230145]
der_losses: [0.0018684032722154852, 0.008403595098790101, 0.0020550313386161955, 0.0031588897054110644]
loss: 0.038840570347191405
pde_losses: [0.0021141541131729806, 0.0011961298483566724]
bcs_losses: [0.003789398802024242, 0.0030991698716349536, 0.006106015063747031, 0.003060080807012735, 0.002430466124912393, 0.0023936429257477116]
der_losses: [0.0017068745074623425, 0.006209739535651947, 0.0023039216985677805, 0.004430977048900615]
ps[1]
Example block output
ps[2]
Example block output