Skip to content

Training a PINN on 2D PDE

In this tutorial we will go over using a PINN to solve 2D PDEs. We will be using the system from NeuralPDE Tutorials. However, we will be using our custom loss function and use nested AD capabilities of Lux.jl.

This is a demonstration of Lux.jl. For serious usecases of PINNs, please refer to the package: NeuralPDE.jl.

Package Imports

julia
using Lux,
    Optimisers,
    Random,
    Printf,
    Statistics,
    MLUtils,
    OnlineStats,
    CairoMakie,
    Reactant,
    Enzyme

const xdev = reactant_device(; force=true)
const cdev = cpu_device()
(::MLDataDevices.CPUDevice) (generic function with 1 method)

Problem Definition

Since Lux supports efficient nested AD upto 2nd order, we will rewrite the problem with first order derivatives, so that we can compute the gradients of the loss using 2nd order AD.

Define the Neural Networks

All the networks take 3 input variables and output a scalar value. Here, we will define a a wrapper over the 3 networks, so that we can train them using Training.TrainState.

julia
struct PINN{M} <: AbstractLuxWrapperLayer{:model}
    model::M
end

function PINN(; hidden_dims::Int=32)
    return PINN(
        Chain(
            Dense(3 => hidden_dims, tanh),
            Dense(hidden_dims => hidden_dims, tanh),
            Dense(hidden_dims => hidden_dims, tanh),
            Dense(hidden_dims => 1),
        ),
    )
end
Main.var"##230".PINN

Define the Loss Functions

We will define a custom loss function to compute the loss using 2nd order AD. We will use the following loss function

julia
function ∂u_∂t(model::StatefulLuxLayer, xyt::AbstractArray)
    return Enzyme.gradient(Enzyme.Reverse, sum  model, xyt)[1][3, :]
end

function ∂u_∂x(model::StatefulLuxLayer, xyt::AbstractArray)
    return Enzyme.gradient(Enzyme.Reverse, sum  model, xyt)[1][1, :]
end

function ∂u_∂y(model::StatefulLuxLayer, xyt::AbstractArray)
    return Enzyme.gradient(Enzyme.Reverse, sum  model, xyt)[1][2, :]
end

function ∂²u_∂x²(model::StatefulLuxLayer, xyt::AbstractArray)
    return Enzyme.gradient(Enzyme.Reverse, sum  ∂u_∂x, Enzyme.Const(model), xyt)[2][1, :]
end

function ∂²u_∂y²(model::StatefulLuxLayer, xyt::AbstractArray)
    return Enzyme.gradient(Enzyme.Reverse, sum  ∂u_∂y, Enzyme.Const(model), xyt)[2][2, :]
end

function physics_informed_loss_function(model::StatefulLuxLayer, xyt::AbstractArray)
    return mean(abs2, ∂u_∂t(model, xyt) .- ∂²u_∂x²(model, xyt) .- ∂²u_∂y²(model, xyt))
end
physics_informed_loss_function (generic function with 1 method)

Additionally, we need to compute the loss wrt the boundary conditions.

julia
function mse_loss_function(
    model::StatefulLuxLayer, target::AbstractArray, xyt::AbstractArray
)
    return MSELoss()(model(xyt), target)
end

function loss_function(model, ps, st, (xyt, target_data, xyt_bc, target_bc))
    smodel = StatefulLuxLayer{true}(model, ps, st)
    physics_loss = physics_informed_loss_function(smodel, xyt)
    data_loss = mse_loss_function(smodel, target_data, xyt)
    bc_loss = mse_loss_function(smodel, target_bc, xyt_bc)
    loss = physics_loss + data_loss + bc_loss
    return loss, smodel.st, (; physics_loss, data_loss, bc_loss)
end
loss_function (generic function with 1 method)

Generate the Data

We will generate some random data to train the model on. We will take data on a square spatial and temporal domain x[0,2], y[0,2], and t[0,2]. Typically, you want to be smarter about the sampling process, but for the sake of simplicity, we will skip that.

julia
analytical_solution(x, y, t) = @. exp(x + y) * cos(x + y + 4t)
analytical_solution(xyt) = analytical_solution(xyt[1, :], xyt[2, :], xyt[3, :])

begin
    grid_len = 16

    grid = range(0.0f0, 2.0f0; length=grid_len)
    xyt = stack([[elem...] for elem in vec(collect(Iterators.product(grid, grid, grid)))])

    target_data = reshape(analytical_solution(xyt), 1, :)

    bc_len = 512

    x = collect(range(0.0f0, 2.0f0; length=bc_len))
    y = collect(range(0.0f0, 2.0f0; length=bc_len))
    t = collect(range(0.0f0, 2.0f0; length=bc_len))

    xyt_bc = hcat(
        stack((x, y, zeros(Float32, bc_len)); dims=1),
        stack((zeros(Float32, bc_len), y, t); dims=1),
        stack((ones(Float32, bc_len) .* 2, y, t); dims=1),
        stack((x, zeros(Float32, bc_len), t); dims=1),
        stack((x, ones(Float32, bc_len) .* 2, t); dims=1),
    )
    target_bc = reshape(analytical_solution(xyt_bc), 1, :)

    min_target_bc, max_target_bc = extrema(target_bc)
    min_data, max_data = extrema(target_data)
    min_pde_val, max_pde_val = min(min_data, min_target_bc), max(max_data, max_target_bc)

    xyt = (xyt .- minimum(xyt)) ./ (maximum(xyt) .- minimum(xyt))
    xyt_bc = (xyt_bc .- minimum(xyt_bc)) ./ (maximum(xyt_bc) .- minimum(xyt_bc))
    target_bc = (target_bc .- min_pde_val) ./ (max_pde_val - min_pde_val)
    target_data = (target_data .- min_pde_val) ./ (max_pde_val - min_pde_val)
end

Training

julia
function train_model(
    xyt,
    target_data,
    xyt_bc,
    target_bc;
    seed::Int=0,
    maxiters::Int=50000,
    hidden_dims::Int=128,
)
    rng = Random.default_rng()
    Random.seed!(rng, seed)

    pinn = PINN(; hidden_dims)
    ps, st = Lux.setup(rng, pinn) |> xdev

    bc_dataloader =
        DataLoader((xyt_bc, target_bc); batchsize=128, shuffle=true, partial=false) |> xdev
    pde_dataloader =
        DataLoader((xyt, target_data); batchsize=128, shuffle=true, partial=false) |> xdev

    train_state = Training.TrainState(pinn, ps, st, Adam(0.005f0))

    lr = i -> i < 5000 ? 0.005f0 : (i < 10000 ? 0.0005f0 : 0.00005f0)

    total_loss_tracker, physics_loss_tracker, data_loss_tracker, bc_loss_tracker = ntuple(
        _ -> OnlineStats.CircBuff(Float32, 32; rev=true), 4
    )

    iter = 1
    for ((xyt_batch, target_data_batch), (xyt_bc_batch, target_bc_batch)) in
        zip(Iterators.cycle(pde_dataloader), Iterators.cycle(bc_dataloader))
        Optimisers.adjust!(train_state, lr(iter))

        _, loss, stats, train_state = Training.single_train_step!(
            AutoEnzyme(),
            loss_function,
            (xyt_batch, target_data_batch, xyt_bc_batch, target_bc_batch),
            train_state;
            return_gradients=Val(false),
        )

        fit!(total_loss_tracker, Float32(loss))
        fit!(physics_loss_tracker, Float32(stats.physics_loss))
        fit!(data_loss_tracker, Float32(stats.data_loss))
        fit!(bc_loss_tracker, Float32(stats.bc_loss))

        mean_loss = mean(OnlineStats.value(total_loss_tracker))
        mean_physics_loss = mean(OnlineStats.value(physics_loss_tracker))
        mean_data_loss = mean(OnlineStats.value(data_loss_tracker))
        mean_bc_loss = mean(OnlineStats.value(bc_loss_tracker))

        isnan(loss) && throw(ArgumentError("NaN Loss Detected"))

        if iter % 1000 == 1 || iter == maxiters
            @printf "Iteration: [%6d/%6d] \t Loss: %.9f (%.9f) \t Physics Loss: %.9f \
                     (%.9f) \t Data Loss: %.9f (%.9f) \t BC \
                     Loss: %.9f (%.9f)\n" iter maxiters loss mean_loss stats.physics_loss mean_physics_loss stats.data_loss mean_data_loss stats.bc_loss mean_bc_loss
        end

        iter += 1
        iter  maxiters && break
    end

    return StatefulLuxLayer{true}(
        pinn, cdev(train_state.parameters), cdev(train_state.states)
    )
end

trained_model = train_model(xyt, target_data, xyt_bc, target_bc)
2025-05-23 23:05:02.453595: I external/xla/xla/service/service.cc:152] XLA service 0x2bda3000 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2025-05-23 23:05:02.453659: I external/xla/xla/service/service.cc:160]   StreamExecutor device (0): Quadro RTX 5000, Compute Capability 7.5
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1748041502.454341  837841 se_gpu_pjrt_client.cc:1026] Using BFC allocator.
I0000 00:00:1748041502.454404  837841 gpu_helpers.cc:136] XLA backend allocating 12527321088 bytes on device 0 for BFCAllocator.
I0000 00:00:1748041502.454451  837841 gpu_helpers.cc:177] XLA backend will use up to 4175773696 bytes on device 0 for CollectiveBFCAllocator.
I0000 00:00:1748041502.465570  837841 cuda_dnn.cc:529] Loaded cuDNN version 90400
Iteration: [     1/ 50000] 	 Loss: 20.523933411 (20.523933411) 	 Physics Loss: 16.931318283 (16.931318283) 	 Data Loss: 2.007483006 (2.007483006) 	 BC Loss: 1.585133314 (1.585133314)
Iteration: [  1001/ 50000] 	 Loss: 0.017368617 (0.019241164) 	 Physics Loss: 0.000384363 (0.000523646) 	 Data Loss: 0.005318490 (0.007538576) 	 BC Loss: 0.011665763 (0.011178942)
Iteration: [  2001/ 50000] 	 Loss: 0.015431679 (0.018665746) 	 Physics Loss: 0.001248590 (0.001662111) 	 Data Loss: 0.004322518 (0.006408248) 	 BC Loss: 0.009860571 (0.010595388)
Iteration: [  3001/ 50000] 	 Loss: 0.015749719 (0.015215931) 	 Physics Loss: 0.000569923 (0.001279026) 	 Data Loss: 0.004014860 (0.004232407) 	 BC Loss: 0.011164935 (0.009704499)
Iteration: [  4001/ 50000] 	 Loss: 0.009721351 (0.008721044) 	 Physics Loss: 0.002388531 (0.003386593) 	 Data Loss: 0.003175570 (0.002105072) 	 BC Loss: 0.004157250 (0.003229378)
Iteration: [  5001/ 50000] 	 Loss: 0.007812903 (0.007147927) 	 Physics Loss: 0.004434738 (0.003748615) 	 Data Loss: 0.001722062 (0.001521818) 	 BC Loss: 0.001656103 (0.001877495)
Iteration: [  6001/ 50000] 	 Loss: 0.000966392 (0.001188839) 	 Physics Loss: 0.000228821 (0.000281602) 	 Data Loss: 0.000566887 (0.000706535) 	 BC Loss: 0.000170684 (0.000200702)
Iteration: [  7001/ 50000] 	 Loss: 0.001327108 (0.000992831) 	 Physics Loss: 0.000275251 (0.000351548) 	 Data Loss: 0.000947732 (0.000502443) 	 BC Loss: 0.000104125 (0.000138840)
Iteration: [  8001/ 50000] 	 Loss: 0.001122999 (0.000848716) 	 Physics Loss: 0.000743390 (0.000349752) 	 Data Loss: 0.000310917 (0.000403828) 	 BC Loss: 0.000068692 (0.000095136)
Iteration: [  9001/ 50000] 	 Loss: 0.004153907 (0.003223831) 	 Physics Loss: 0.003048246 (0.002250909) 	 Data Loss: 0.000753449 (0.000563253) 	 BC Loss: 0.000352211 (0.000409668)
Iteration: [ 10001/ 50000] 	 Loss: 0.000605130 (0.000697488) 	 Physics Loss: 0.000217061 (0.000325370) 	 Data Loss: 0.000306616 (0.000311040) 	 BC Loss: 0.000081453 (0.000061078)
Iteration: [ 11001/ 50000] 	 Loss: 0.000361692 (0.000377859) 	 Physics Loss: 0.000133546 (0.000064761) 	 Data Loss: 0.000184457 (0.000277116) 	 BC Loss: 0.000043690 (0.000035982)
Iteration: [ 12001/ 50000] 	 Loss: 0.000261113 (0.000354993) 	 Physics Loss: 0.000047183 (0.000065501) 	 Data Loss: 0.000172159 (0.000253253) 	 BC Loss: 0.000041772 (0.000036238)
Iteration: [ 13001/ 50000] 	 Loss: 0.000312921 (0.000330453) 	 Physics Loss: 0.000066998 (0.000066961) 	 Data Loss: 0.000210090 (0.000230421) 	 BC Loss: 0.000035833 (0.000033070)
Iteration: [ 14001/ 50000] 	 Loss: 0.000396499 (0.000335560) 	 Physics Loss: 0.000073465 (0.000069135) 	 Data Loss: 0.000291113 (0.000237840) 	 BC Loss: 0.000031920 (0.000028585)
Iteration: [ 15001/ 50000] 	 Loss: 0.000241214 (0.000284670) 	 Physics Loss: 0.000044117 (0.000053078) 	 Data Loss: 0.000165352 (0.000201082) 	 BC Loss: 0.000031746 (0.000030510)
Iteration: [ 16001/ 50000] 	 Loss: 0.000218280 (0.000291941) 	 Physics Loss: 0.000043733 (0.000061258) 	 Data Loss: 0.000142434 (0.000202081) 	 BC Loss: 0.000032114 (0.000028602)
Iteration: [ 17001/ 50000] 	 Loss: 0.000411429 (0.000302260) 	 Physics Loss: 0.000088574 (0.000071290) 	 Data Loss: 0.000299822 (0.000203119) 	 BC Loss: 0.000023032 (0.000027851)
Iteration: [ 18001/ 50000] 	 Loss: 0.000210372 (0.000276954) 	 Physics Loss: 0.000041067 (0.000055854) 	 Data Loss: 0.000139373 (0.000195172) 	 BC Loss: 0.000029932 (0.000025927)
Iteration: [ 19001/ 50000] 	 Loss: 0.000221974 (0.000272862) 	 Physics Loss: 0.000065688 (0.000052131) 	 Data Loss: 0.000137388 (0.000196559) 	 BC Loss: 0.000018898 (0.000024172)
Iteration: [ 20001/ 50000] 	 Loss: 0.000298736 (0.000262362) 	 Physics Loss: 0.000050584 (0.000058507) 	 Data Loss: 0.000227205 (0.000180636) 	 BC Loss: 0.000020947 (0.000023219)
Iteration: [ 21001/ 50000] 	 Loss: 0.000303169 (0.000240393) 	 Physics Loss: 0.000058669 (0.000047547) 	 Data Loss: 0.000222400 (0.000170185) 	 BC Loss: 0.000022100 (0.000022660)
Iteration: [ 22001/ 50000] 	 Loss: 0.000163029 (0.000233151) 	 Physics Loss: 0.000028403 (0.000044451) 	 Data Loss: 0.000106589 (0.000167016) 	 BC Loss: 0.000028037 (0.000021684)
Iteration: [ 23001/ 50000] 	 Loss: 0.000237907 (0.000248595) 	 Physics Loss: 0.000045338 (0.000050847) 	 Data Loss: 0.000169656 (0.000176680) 	 BC Loss: 0.000022913 (0.000021067)
Iteration: [ 24001/ 50000] 	 Loss: 0.000289768 (0.000238424) 	 Physics Loss: 0.000048903 (0.000044078) 	 Data Loss: 0.000222246 (0.000170180) 	 BC Loss: 0.000018620 (0.000024165)
Iteration: [ 25001/ 50000] 	 Loss: 0.000199920 (0.000221828) 	 Physics Loss: 0.000037046 (0.000033586) 	 Data Loss: 0.000144489 (0.000167642) 	 BC Loss: 0.000018385 (0.000020600)
Iteration: [ 26001/ 50000] 	 Loss: 0.000239511 (0.000247295) 	 Physics Loss: 0.000060653 (0.000060418) 	 Data Loss: 0.000157945 (0.000163779) 	 BC Loss: 0.000020914 (0.000023098)
Iteration: [ 27001/ 50000] 	 Loss: 0.000226333 (0.000222564) 	 Physics Loss: 0.000044221 (0.000036721) 	 Data Loss: 0.000157731 (0.000164702) 	 BC Loss: 0.000024381 (0.000021140)
Iteration: [ 28001/ 50000] 	 Loss: 0.000203030 (0.000214076) 	 Physics Loss: 0.000033543 (0.000035788) 	 Data Loss: 0.000153231 (0.000157642) 	 BC Loss: 0.000016256 (0.000020646)
Iteration: [ 29001/ 50000] 	 Loss: 0.000207842 (0.000221459) 	 Physics Loss: 0.000034102 (0.000043224) 	 Data Loss: 0.000142929 (0.000157899) 	 BC Loss: 0.000030811 (0.000020336)
Iteration: [ 30001/ 50000] 	 Loss: 0.000199394 (0.000213459) 	 Physics Loss: 0.000024793 (0.000034220) 	 Data Loss: 0.000153788 (0.000159613) 	 BC Loss: 0.000020814 (0.000019626)
Iteration: [ 31001/ 50000] 	 Loss: 0.000259331 (0.000209178) 	 Physics Loss: 0.000027255 (0.000033038) 	 Data Loss: 0.000212833 (0.000157258) 	 BC Loss: 0.000019244 (0.000018882)
Iteration: [ 32001/ 50000] 	 Loss: 0.000209516 (0.000200706) 	 Physics Loss: 0.000037465 (0.000029447) 	 Data Loss: 0.000150773 (0.000151957) 	 BC Loss: 0.000021279 (0.000019301)
Iteration: [ 33001/ 50000] 	 Loss: 0.000175537 (0.000198191) 	 Physics Loss: 0.000020622 (0.000030190) 	 Data Loss: 0.000132517 (0.000147978) 	 BC Loss: 0.000022399 (0.000020023)
Iteration: [ 34001/ 50000] 	 Loss: 0.000198288 (0.000197085) 	 Physics Loss: 0.000038477 (0.000034653) 	 Data Loss: 0.000139594 (0.000143636) 	 BC Loss: 0.000020217 (0.000018796)
Iteration: [ 35001/ 50000] 	 Loss: 0.000133405 (0.000207845) 	 Physics Loss: 0.000016809 (0.000042805) 	 Data Loss: 0.000096007 (0.000146670) 	 BC Loss: 0.000020589 (0.000018370)
Iteration: [ 36001/ 50000] 	 Loss: 0.000164951 (0.000201747) 	 Physics Loss: 0.000030809 (0.000037578) 	 Data Loss: 0.000116482 (0.000146033) 	 BC Loss: 0.000017660 (0.000018135)
Iteration: [ 37001/ 50000] 	 Loss: 0.000289196 (0.000189187) 	 Physics Loss: 0.000054318 (0.000025703) 	 Data Loss: 0.000216863 (0.000144960) 	 BC Loss: 0.000018014 (0.000018524)
Iteration: [ 38001/ 50000] 	 Loss: 0.000237108 (0.000194672) 	 Physics Loss: 0.000021421 (0.000027486) 	 Data Loss: 0.000192690 (0.000150418) 	 BC Loss: 0.000022997 (0.000016767)
Iteration: [ 39001/ 50000] 	 Loss: 0.000171158 (0.000192285) 	 Physics Loss: 0.000033970 (0.000028719) 	 Data Loss: 0.000122918 (0.000145318) 	 BC Loss: 0.000014269 (0.000018248)
Iteration: [ 40001/ 50000] 	 Loss: 0.000164264 (0.000187916) 	 Physics Loss: 0.000017215 (0.000025523) 	 Data Loss: 0.000127765 (0.000143793) 	 BC Loss: 0.000019284 (0.000018600)
Iteration: [ 41001/ 50000] 	 Loss: 0.000155966 (0.000184873) 	 Physics Loss: 0.000018395 (0.000020868) 	 Data Loss: 0.000121398 (0.000146531) 	 BC Loss: 0.000016173 (0.000017474)
Iteration: [ 42001/ 50000] 	 Loss: 0.000182299 (0.000191650) 	 Physics Loss: 0.000033198 (0.000027508) 	 Data Loss: 0.000134649 (0.000145640) 	 BC Loss: 0.000014452 (0.000018501)
Iteration: [ 43001/ 50000] 	 Loss: 0.000181653 (0.000190743) 	 Physics Loss: 0.000017883 (0.000026300) 	 Data Loss: 0.000149802 (0.000144423) 	 BC Loss: 0.000013968 (0.000020021)
Iteration: [ 44001/ 50000] 	 Loss: 0.000175671 (0.000194985) 	 Physics Loss: 0.000019150 (0.000031185) 	 Data Loss: 0.000140690 (0.000142598) 	 BC Loss: 0.000015831 (0.000021203)
Iteration: [ 45001/ 50000] 	 Loss: 0.000271014 (0.000208449) 	 Physics Loss: 0.000040799 (0.000034540) 	 Data Loss: 0.000202894 (0.000151865) 	 BC Loss: 0.000027322 (0.000022044)
Iteration: [ 46001/ 50000] 	 Loss: 0.000218525 (0.000193858) 	 Physics Loss: 0.000042998 (0.000037865) 	 Data Loss: 0.000158263 (0.000138431) 	 BC Loss: 0.000017263 (0.000017562)
Iteration: [ 47001/ 50000] 	 Loss: 0.000168431 (0.000178893) 	 Physics Loss: 0.000014457 (0.000022937) 	 Data Loss: 0.000136733 (0.000138819) 	 BC Loss: 0.000017240 (0.000017136)
Iteration: [ 48001/ 50000] 	 Loss: 0.000180434 (0.000176540) 	 Physics Loss: 0.000021836 (0.000020793) 	 Data Loss: 0.000141036 (0.000137630) 	 BC Loss: 0.000017562 (0.000018118)
Iteration: [ 49001/ 50000] 	 Loss: 0.000159058 (0.000194611) 	 Physics Loss: 0.000016074 (0.000034834) 	 Data Loss: 0.000121367 (0.000141970) 	 BC Loss: 0.000021617 (0.000017808)

Visualizing the Results

julia
ts, xs, ys = 0.0f0:0.05f0:2.0f0, 0.0f0:0.02f0:2.0f0, 0.0f0:0.02f0:2.0f0
grid = stack([[elem...] for elem in vec(collect(Iterators.product(xs, ys, ts)))])

u_real = reshape(analytical_solution(grid), length(xs), length(ys), length(ts))

grid_normalized = (grid .- minimum(grid)) ./ (maximum(grid) .- minimum(grid))
u_pred = reshape(trained_model(grid_normalized), length(xs), length(ys), length(ts))
u_pred = u_pred .* (max_pde_val - min_pde_val) .+ min_pde_val

begin
    fig = Figure()
    ax = CairoMakie.Axis(fig[1, 1]; xlabel="x", ylabel="y")
    errs = [abs.(u_pred[:, :, i] .- u_real[:, :, i]) for i in 1:length(ts)]
    Colorbar(fig[1, 2]; limits=extrema(stack(errs)))

    CairoMakie.record(fig, "pinn_nested_ad.gif", 1:length(ts); framerate=10) do i
        ax.title = "Abs. Predictor Error | Time: $(ts[i])"
        err = errs[i]
        contour!(ax, xs, ys, err; levels=10, linewidth=2)
        heatmap!(ax, xs, ys, err)
        return fig
    end

    fig
end

Appendix

julia
using InteractiveUtils
InteractiveUtils.versioninfo()

if @isdefined(MLDataDevices)
    if @isdefined(CUDA) && MLDataDevices.functional(CUDADevice)
        println()
        CUDA.versioninfo()
    end

    if @isdefined(AMDGPU) && MLDataDevices.functional(AMDGPUDevice)
        println()
        AMDGPU.versioninfo()
    end
end
Julia Version 1.11.5
Commit 760b2e5b739 (2025-04-14 06:53 UTC)
Build Info:
  Official https://julialang.org/ release
Platform Info:
  OS: Linux (x86_64-linux-gnu)
  CPU: 48 × AMD EPYC 7402 24-Core Processor
  WORD_SIZE: 64
  LLVM: libLLVM-16.0.6 (ORCJIT, znver2)
Threads: 48 default, 0 interactive, 24 GC (on 2 virtual cores)
Environment:
  JULIA_CPU_THREADS = 2
  LD_LIBRARY_PATH = /usr/local/nvidia/lib:/usr/local/nvidia/lib64
  JULIA_PKG_SERVER = 
  JULIA_NUM_THREADS = 48
  JULIA_CUDA_HARD_MEMORY_LIMIT = 100%
  JULIA_PKG_PRECOMPILE_AUTO = 0
  JULIA_DEBUG = Literate
  JULIA_DEPOT_PATH = /root/.cache/julia-buildkite-plugin/depots/01872db4-8c79-43af-ab7d-12abac4f24f6

This page was generated using Literate.jl.