Imposing Constraints on Physics-Informed Neural Network (PINN) Solutions

Let's consider the Fokker-Planck equation:

\[- \frac{∂}{∂x} \left [ \left( \alpha x - \beta x^3\right) p(x)\right ] + \frac{\sigma^2}{2} \frac{∂^2}{∂x^2} p(x) = 0 \, ,\]

which must satisfy the normalization condition:

\[\Delta t \, p(x) = 1\]

with the boundary conditions:

\[p(-2.2) = p(2.2) = 0\]

with Physics-Informed Neural Networks.

using NeuralPDE, Lux, ModelingToolkit, Optimization, OptimizationOptimJL, LineSearches
using Integrals, Cubature
using ModelingToolkit: Interval, infimum, supremum
# the example is taken from this article https://arxiv.org/abs/1910.10503
@parameters x
@variables p(..)
Dx = Differential(x)
Dxx = Differential(x)^2

α = 0.3
β = 0.5
_σ = 0.5
x_0 = -2.2
x_end = 2.2

eq = Dx((α * x - β * x^3) * p(x)) ~ (_σ^2 / 2) * Dxx(p(x))

# Initial and boundary conditions
bcs = [p(x_0) ~ 0.0, p(x_end) ~ 0.0]

# Space and time domains
domains = [x ∈ Interval(x_0, x_end)]

# Neural network
inn = 18
chain = Lux.Chain(Dense(1, inn, Lux.σ),
                  Dense(inn, inn, Lux.σ),
                  Dense(inn, inn, Lux.σ),
                  Dense(inn, 1))

lb = [x_0]
ub = [x_end]
function norm_loss_function(phi, θ, p)
    function inner_f(x, θ)
        0.01 * phi(x, θ) .- 1
    end
    prob = IntegralProblem(inner_f, lb, ub, θ)
    norm2 = solve(prob, HCubatureJL(), reltol = 1e-8, abstol = 1e-8, maxiters = 10)
    abs(norm2[1])
end

discretization = PhysicsInformedNN(chain,
                                   QuadratureTraining(),
                                   additional_loss = norm_loss_function)

@named pdesystem = PDESystem(eq, bcs, domains, [x], [p(x)])
prob = discretize(pdesystem, discretization)
phi = discretization.phi

sym_prob = NeuralPDE.symbolic_discretize(pdesystem, discretization)

pde_inner_loss_functions = sym_prob.loss_functions.pde_loss_functions
bcs_inner_loss_functions = sym_prob.loss_functions.bc_loss_functions
aprox_derivative_loss_functions = sym_prob.loss_functions.bc_loss_functions

cb_ = function (p, l)
    println("loss: ", l)
    println("pde_losses: ", map(l_ -> l_(p.u), pde_inner_loss_functions))
    println("bcs_losses: ", map(l_ -> l_(p.u), bcs_inner_loss_functions))
    println("additional_loss: ", norm_loss_function(phi, p.u, nothing))
    return false
end

res = Optimization.solve(prob, BFGS(linesearch = BackTracking()), callback = cb_, maxiters = 600)
retcode: Failure
u: ComponentVector{Float64}(layer_1 = (weight = [0.1894037249340206; 11.212322154888907; … ; 4.561463110910554; 0.9249706119159565;;], bias = [-4.9939709216246015; -16.983111370523684; … ; 4.755319033710654; 7.314305591877715;;]), layer_2 = (weight = [-0.19253387490307144 0.8819948437986614 … 6.935781416952728 -7.359566419508793; -0.514459119997886 0.6725339986901472 … -11.279061855398211 2.400566155382252; … ; -1.3853140921971885 1.8541521998251893 … 2.9601681326349616 0.0594757123312619; 4.8670952513219365 1.2864488196503965 … -10.641827533896562 -0.45537752682822696], bias = [-4.018995392234688; 2.27378762001731; … ; -1.1237030626448148; 0.887876038886428;;]), layer_3 = (weight = [3.6532685449231614 1.8888020931586842 … 3.511994872600242 0.8604473650413736; 4.224899895178879 -1.9852028131772577 … 2.6634846820789204 -2.4049473091967823; … ; -11.572591919534963 13.366020549684963 … -4.303739007163671 8.485713706888177; -6.687087450981428 7.333787024386438 … -2.8030656345853737 4.3701369962159164], bias = [5.452397180770637; 0.659455829440858; … ; 3.2804843497418137; 2.5672778995414807;;]), layer_4 = (weight = [15.163448462773754 20.93587108022273 … -40.06468036284133 -38.146150212577744], bias = [-10.892098099792173;;]))

And some analysis:

using Plots
C = 142.88418699042 #fitting param
analytic_sol_func(x) = C * exp((1 / (2 * _σ^2)) * (2 * α * x^2 - β * x^4))

xs = [infimum(d.domain):0.01:supremum(d.domain) for d in domains][1]
u_real = [analytic_sol_func(x) for x in xs]
u_predict = [first(phi(x, res.u)) for x in xs]

plot(xs, u_real, label = "analytic")
plot!(xs, u_predict, label = "predict")
Example block output