Skip to content

Convolutional VAE for MNIST

Convolutional variational autoencoder (CVAE) implementation in MLX using MNIST. This is based on the CVAE implementation in MLX.

julia
using Lux,
    Reactant,
    MLDatasets,
    Random,
    Statistics,
    Enzyme,
    MLUtils,
    DataAugmentation,
    ConcreteStructs,
    OneHotArrays,
    ImageShow,
    Images,
    Printf,
    Optimisers

const xdev = reactant_device(; force=true)
const cdev = cpu_device()

const IN_VSCODE = isdefined(Main, :VSCodeServer)
false

Model Definition

First we will define the encoder.It maps the input to a normal distribution in latent space and sample a latent vector from that distribution.

julia
function cvae_encoder(
    rng=Random.default_rng();
    num_latent_dims::Int,
    image_shape::Dims{3},
    max_num_filters::Int,
)
    flattened_dim = prod(image_shape[1:2]  8) * max_num_filters
    return @compact(;
        embed=Chain(
            Chain(
                Conv((3, 3), image_shape[3] => max_num_filters ÷ 4; stride=2, pad=1),
                BatchNorm(max_num_filters ÷ 4, leakyrelu),
            ),
            Chain(
                Conv((3, 3), max_num_filters ÷ 4 => max_num_filters ÷ 2; stride=2, pad=1),
                BatchNorm(max_num_filters ÷ 2, leakyrelu),
            ),
            Chain(
                Conv((3, 3), max_num_filters ÷ 2 => max_num_filters; stride=2, pad=1),
                BatchNorm(max_num_filters, leakyrelu),
            ),
            FlattenLayer(),
        ),
        proj_mu=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
        proj_log_var=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
        rng
    ) do x
        y = embed(x)

        μ = proj_mu(y)
        logσ² = proj_log_var(y)

        T = eltype(logσ²)
        logσ² = clamp.(logσ², -T(20.0f0), T(10.0f0))
        σ = exp.(logσ² .* T(0.5))

        # Generate a tensor of random values from a normal distribution
        ϵ = randn_like(Lux.replicate(rng), σ)

        # Reparameterization trick to backpropagate through sampling
        z = ϵ .* σ .+ μ

        @return z, μ, logσ²
    end
end

Similarly we define the decoder.

julia
function cvae_decoder(; num_latent_dims::Int, image_shape::Dims{3}, max_num_filters::Int)
    flattened_dim = prod(image_shape[1:2]  8) * max_num_filters
    return @compact(;
        linear=Dense(num_latent_dims, flattened_dim),
        upchain=Chain(
            Chain(
                Upsample(2),
                Conv((3, 3), max_num_filters => max_num_filters ÷ 2; stride=1, pad=1),
                BatchNorm(max_num_filters ÷ 2, leakyrelu),
            ),
            Chain(
                Upsample(2),
                Conv((3, 3), max_num_filters ÷ 2 => max_num_filters ÷ 4; stride=1, pad=1),
                BatchNorm(max_num_filters ÷ 4, leakyrelu),
            ),
            Chain(
                Upsample(2),
                Conv(
                    (3, 3), max_num_filters ÷ 4 => image_shape[3], sigmoid; stride=1, pad=1
                ),
            ),
        ),
        max_num_filters
    ) do x
        y = linear(x)
        img = reshape(y, image_shape[1] ÷ 8, image_shape[2] ÷ 8, max_num_filters, :)
        @return upchain(img)
    end
end

@concrete struct CVAE <: AbstractLuxContainerLayer{(:encoder, :decoder)}
    encoder <: AbstractLuxLayer
    decoder <: AbstractLuxLayer
end

function CVAE(
    rng=Random.default_rng();
    num_latent_dims::Int,
    image_shape::Dims{3},
    max_num_filters::Int,
)
    decoder = cvae_decoder(; num_latent_dims, image_shape, max_num_filters)
    encoder = cvae_encoder(rng; num_latent_dims, image_shape, max_num_filters)
    return CVAE(encoder, decoder)
end

function (cvae::CVAE)(x, ps, st)
    (z, μ, logσ²), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
    x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
    return (x_rec, μ, logσ²), (; encoder=st_enc, decoder=st_dec)
end

function encode(cvae::CVAE, x, ps, st)
    (z, _, _), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
    return z, (; encoder=st_enc, st.decoder)
end

function decode(cvae::CVAE, z, ps, st)
    x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
    return x_rec, (; decoder=st_dec, st.encoder)
end

Loading MNIST

julia
@concrete struct TensorDataset
    dataset
    transform
    total_samples::Int
end

Base.length(ds::TensorDataset) = ds.total_samples

function Base.getindex(ds::TensorDataset, idxs::Union{Vector{<:Integer},AbstractRange})
    img = Image.(eachslice(convert2image(ds.dataset, idxs); dims=3))
    return stack(parent  itemdata  Base.Fix1(apply, ds.transform), img)
end

function loadmnist(batchsize, image_size::Dims{2})
    # Load MNIST: Only 1500 for demonstration purposes on CI
    train_dataset = MNIST(; split=:train)
    N = parse(Bool, get(ENV, "CI", "false")) ? 5000 : length(train_dataset)

    train_transform = ScaleKeepAspect(image_size) |> ImageToTensor()
    trainset = TensorDataset(train_dataset, train_transform, N)
    trainloader = DataLoader(trainset; batchsize, shuffle=true, partial=false)

    return trainloader
end

Helper Functions

Generate an Image Grid from a list of images

julia
function create_image_grid(imgs::AbstractArray, grid_rows::Int, grid_cols::Int)
    total_images = grid_rows * grid_cols
    imgs = map(eachslice(imgs[:, :, :, 1:total_images]; dims=4)) do img
        cimg = if size(img, 3) == 1
            colorview(Gray, view(img, :, :, 1))
        else
            colorview(RGB, permutedims(img, (3, 1, 2)))
        end
        return cimg'
    end
    return create_image_grid(imgs, grid_rows, grid_cols)
end

function create_image_grid(images::Vector, grid_rows::Int, grid_cols::Int)
    # Check if the number of images matches the grid
    total_images = grid_rows * grid_cols
    @assert length(images) == total_images

    # Get the size of a single image (assuming all images are the same size)
    img_height, img_width = size(images[1])

    # Create a blank grid canvas
    grid_height = img_height * grid_rows
    grid_width = img_width * grid_cols
    grid_canvas = similar(images[1], grid_height, grid_width)

    # Place each image in the correct position on the canvas
    for idx in 1:total_images
        row = div(idx - 1, grid_cols) + 1
        col = mod(idx - 1, grid_cols) + 1

        start_row = (row - 1) * img_height + 1
        start_col = (col - 1) * img_width + 1

        grid_canvas[start_row:(start_row + img_height - 1), start_col:(start_col + img_width - 1)] .= images[idx]
    end

    return grid_canvas
end

function loss_function(model, ps, st, X)
    (y, μ, logσ²), st = model(X, ps, st)
    reconstruction_loss = MSELoss(; agg=sum)(y, X)
    kldiv_loss = -sum(1 .+ logσ² .- μ .^ 2 .- exp.(logσ²)) / 2
    loss = reconstruction_loss + kldiv_loss
    return loss, st, (; y, μ, logσ², reconstruction_loss, kldiv_loss)
end

function generate_images(
    model, ps, st; num_samples::Int=128, num_latent_dims::Int, decode_compiled=nothing
)
    z = get_device((ps, st))(randn(Float32, num_latent_dims, num_samples))
    if decode_compiled === nothing
        images, _ = decode(model, z, ps, Lux.testmode(st))
    else
        images, _ = decode_compiled(model, z, ps, Lux.testmode(st))
        images = cpu_device()(images)
    end
    return create_image_grid(images, 8, num_samples ÷ 8)
end

function reconstruct_images(model, ps, st, X)
    (recon, _, _), _ = model(X, ps, Lux.testmode(st))
    recon = cpu_device()(recon)
    return create_image_grid(recon, 8, size(X, ndims(X)) ÷ 8)
end
reconstruct_images (generic function with 1 method)

Training the Model

julia
function main(;
    batchsize=128,
    image_size=(64, 64),
    num_latent_dims=8,
    max_num_filters=64,
    seed=0,
    epochs=50,
    weight_decay=1.0e-5,
    learning_rate=1.0e-3,
    num_samples=batchsize,
)
    rng = Xoshiro()
    Random.seed!(rng, seed)

    cvae = CVAE(rng; num_latent_dims, image_shape=(image_size..., 1), max_num_filters)
    ps, st = xdev(Lux.setup(rng, cvae))

    z = xdev(randn(Float32, num_latent_dims, num_samples))
    decode_compiled = Reactant.with_config(;
        dot_general_precision=PrecisionConfig.HIGH,
        convolution_precision=PrecisionConfig.HIGH,
    ) do
        @compile decode(cvae, z, ps, Lux.testmode(st))
    end
    x = xdev(randn(Float32, image_size..., 1, batchsize))
    cvae_compiled = Reactant.with_config(;
        dot_general_precision=PrecisionConfig.HIGH,
        convolution_precision=PrecisionConfig.HIGH,
    ) do
        @compile cvae(x, ps, Lux.testmode(st))
    end

    train_dataloader = xdev(loadmnist(batchsize, image_size))

    opt = AdamW(; eta=learning_rate, lambda=weight_decay)

    train_state = Training.TrainState(cvae, ps, st, opt)

    @printf "Total Trainable Parameters: %0.4f M\n" (Lux.parameterlength(ps) / 1.0e6)

    empty_row, model_img_full = nothing, nothing

    for epoch in 1:epochs
        loss_total = 0.0f0
        total_samples = 0

        start_time = time()
        for (i, X) in enumerate(train_dataloader)
            (_, loss, _, train_state) = Training.single_train_step!(
                AutoEnzyme(), loss_function, X, train_state; return_gradients=Val(false)
            )

            loss_total += loss
            total_samples += size(X, ndims(X))

            if i % 250 == 0 || i == length(train_dataloader)
                throughput = total_samples / (time() - start_time)
                @printf "Epoch %d, Iter %d, Loss: %.7f, Throughput: %.6f im/s\n" epoch i loss throughput
            end
        end
        total_time = time() - start_time

        train_loss = loss_total / length(train_dataloader)
        throughput = total_samples / total_time
        @printf "Epoch %d, Train Loss: %.7f, Time: %.4fs, Throughput: %.6f im/s\n" epoch train_loss total_time throughput

        if IN_VSCODE || epoch == epochs
            recon_images = reconstruct_images(
                cvae_compiled,
                train_state.parameters,
                train_state.states,
                first(train_dataloader),
            )
            gen_images = generate_images(
                cvae,
                train_state.parameters,
                train_state.states;
                num_samples,
                num_latent_dims,
                decode_compiled,
            )
            if empty_row === nothing
                empty_row = similar(gen_images, image_size[1], size(gen_images, 2))
                fill!(empty_row, 0)
            end
            model_img_full = vcat(recon_images, empty_row, gen_images)
            IN_VSCODE && display(model_img_full)
        end
    end

    return model_img_full
end

img = main()
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1757915005.640299 2758876 service.cc:163] XLA service 0x202584c0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
I0000 00:00:1757915005.640357 2758876 service.cc:171]   StreamExecutor device (0): NVIDIA A100-PCIE-40GB MIG 1g.5gb, Compute Capability 8.0
I0000 00:00:1757915005.641070 2758876 se_gpu_pjrt_client.cc:1338] Using BFC allocator.
I0000 00:00:1757915005.641105 2758876 gpu_helpers.cc:136] XLA backend allocating 3825205248 bytes on device 0 for BFCAllocator.
I0000 00:00:1757915005.641136 2758876 gpu_helpers.cc:177] XLA backend will use up to 1275068416 bytes on device 0 for CollectiveBFCAllocator.
I0000 00:00:1757915005.653111 2758876 cuda_dnn.cc:463] Loaded cuDNN version 91200
Total Trainable Parameters: 0.1493 M
┌ Warning: `training` is set to `Val{true}()` but is not being used within an autodiff call (gradient, jacobian, etc...). This will be slow. If you are using a `Lux.jl` model, set it to inference (test) mode using `LuxCore.testmode`. Reliance on this behavior is discouraged, and is not guaranteed by Semantic Versioning, and might be removed without a deprecation cycle. It is recommended to fix this issue in your code.
└ @ LuxLib.Utils /var/lib/buildkite-agent/builds/gpuci-12/julialang/lux-dot-jl/lib/LuxLib/src/utils.jl:334
Epoch 1, Iter 39, Loss: 24366.3906250, Throughput: 48.407827 im/s
Epoch 1, Train Loss: 39711.4101562, Time: 103.5174s, Throughput: 48.223778 im/s
Epoch 2, Iter 39, Loss: 17583.0019531, Throughput: 1842.891975 im/s
Epoch 2, Train Loss: 20097.6406250, Time: 2.7090s, Throughput: 1842.768058 im/s
Epoch 3, Iter 39, Loss: 15335.8203125, Throughput: 1949.677924 im/s
Epoch 3, Train Loss: 16464.8164062, Time: 2.5607s, Throughput: 1949.442667 im/s
Epoch 4, Iter 39, Loss: 14384.9677734, Throughput: 1994.726282 im/s
Epoch 4, Train Loss: 15097.5771484, Time: 2.5029s, Throughput: 1994.495227 im/s
Epoch 5, Iter 39, Loss: 13364.8632812, Throughput: 1915.489746 im/s
Epoch 5, Train Loss: 13921.8544922, Time: 2.6064s, Throughput: 1915.321358 im/s
Epoch 6, Iter 39, Loss: 12681.4189453, Throughput: 2033.267801 im/s
Epoch 6, Train Loss: 13251.4541016, Time: 2.4553s, Throughput: 2033.122884 im/s
Epoch 7, Iter 39, Loss: 12879.7460938, Throughput: 2029.395320 im/s
Epoch 7, Train Loss: 12902.5585938, Time: 2.4601s, Throughput: 2029.182515 im/s
Epoch 8, Iter 39, Loss: 13142.1396484, Throughput: 2024.059216 im/s
Epoch 8, Train Loss: 12404.8955078, Time: 2.4666s, Throughput: 2023.849876 im/s
Epoch 9, Iter 39, Loss: 12170.4941406, Throughput: 1955.564909 im/s
Epoch 9, Train Loss: 12197.7773438, Time: 2.5531s, Throughput: 1955.296822 im/s
Epoch 10, Iter 39, Loss: 11184.3183594, Throughput: 1932.031429 im/s
Epoch 10, Train Loss: 11907.8476562, Time: 2.5842s, Throughput: 1931.733041 im/s
Epoch 11, Iter 39, Loss: 11619.0507812, Throughput: 1944.590825 im/s
Epoch 11, Train Loss: 11751.2705078, Time: 2.5674s, Throughput: 1944.366544 im/s
Epoch 12, Iter 39, Loss: 12159.3554688, Throughput: 1886.837468 im/s
Epoch 12, Train Loss: 11513.3974609, Time: 2.6460s, Throughput: 1886.604211 im/s
Epoch 13, Iter 39, Loss: 11802.1064453, Throughput: 1905.322269 im/s
Epoch 13, Train Loss: 11381.4882812, Time: 2.6203s, Throughput: 1905.128968 im/s
Epoch 14, Iter 39, Loss: 11491.2900391, Throughput: 1972.724162 im/s
Epoch 14, Train Loss: 11234.7675781, Time: 2.5307s, Throughput: 1972.542403 im/s
Epoch 15, Iter 39, Loss: 10475.3144531, Throughput: 1983.513597 im/s
Epoch 15, Train Loss: 11020.4951172, Time: 2.5170s, Throughput: 1983.288138 im/s
Epoch 16, Iter 39, Loss: 11075.1904297, Throughput: 1890.010690 im/s
Epoch 16, Train Loss: 10940.2099609, Time: 2.6415s, Throughput: 1889.813149 im/s
Epoch 17, Iter 39, Loss: 11364.1718750, Throughput: 1855.982964 im/s
Epoch 17, Train Loss: 10763.2851562, Time: 2.6899s, Throughput: 1855.824875 im/s
Epoch 18, Iter 39, Loss: 10784.3916016, Throughput: 1871.859531 im/s
Epoch 18, Train Loss: 10716.8564453, Time: 2.6671s, Throughput: 1871.709268 im/s
Epoch 19, Iter 39, Loss: 10900.5390625, Throughput: 1868.081000 im/s
Epoch 19, Train Loss: 10736.7861328, Time: 2.6725s, Throughput: 1867.886683 im/s
Epoch 20, Iter 39, Loss: 11006.9414062, Throughput: 1847.195578 im/s
Epoch 20, Train Loss: 10643.0605469, Time: 2.7027s, Throughput: 1847.021386 im/s
Epoch 21, Iter 39, Loss: 10788.5537109, Throughput: 1857.093137 im/s
Epoch 21, Train Loss: 10534.2412109, Time: 2.6883s, Throughput: 1856.936342 im/s
Epoch 22, Iter 39, Loss: 10714.3027344, Throughput: 1963.794905 im/s
Epoch 22, Train Loss: 10386.5869141, Time: 2.5423s, Throughput: 1963.601897 im/s
Epoch 23, Iter 39, Loss: 9820.3808594, Throughput: 1832.675166 im/s
Epoch 23, Train Loss: 10391.6250000, Time: 2.7241s, Throughput: 1832.551337 im/s
Epoch 24, Iter 39, Loss: 9989.7929688, Throughput: 1949.368797 im/s
Epoch 24, Train Loss: 10239.6796875, Time: 2.5611s, Throughput: 1949.196034 im/s
Epoch 25, Iter 39, Loss: 10249.0605469, Throughput: 1848.284329 im/s
Epoch 25, Train Loss: 10185.2382812, Time: 2.7011s, Throughput: 1848.154303 im/s
Epoch 26, Iter 39, Loss: 9556.0781250, Throughput: 1872.129665 im/s
Epoch 26, Train Loss: 10184.0927734, Time: 2.6668s, Throughput: 1871.939358 im/s
Epoch 27, Iter 39, Loss: 10126.1132812, Throughput: 1956.615139 im/s
Epoch 27, Train Loss: 10067.0302734, Time: 2.5516s, Throughput: 1956.404162 im/s
Epoch 28, Iter 39, Loss: 10033.1474609, Throughput: 1854.319205 im/s
Epoch 28, Train Loss: 10120.6894531, Time: 2.6923s, Throughput: 1854.176013 im/s
Epoch 29, Iter 39, Loss: 9936.6044922, Throughput: 1865.836158 im/s
Epoch 29, Train Loss: 10090.4541016, Time: 2.6758s, Throughput: 1865.624189 im/s
Epoch 30, Iter 39, Loss: 10190.8476562, Throughput: 1884.980661 im/s
Epoch 30, Train Loss: 10075.0478516, Time: 2.6488s, Throughput: 1884.659815 im/s
Epoch 31, Iter 39, Loss: 10113.8457031, Throughput: 1883.708947 im/s
Epoch 31, Train Loss: 9920.9306641, Time: 2.6504s, Throughput: 1883.483579 im/s
Epoch 32, Iter 39, Loss: 9767.4511719, Throughput: 1881.277651 im/s
Epoch 32, Train Loss: 9922.0976562, Time: 2.6537s, Throughput: 1881.125871 im/s
Epoch 33, Iter 39, Loss: 10236.1464844, Throughput: 1932.385729 im/s
Epoch 33, Train Loss: 9839.0000000, Time: 2.5835s, Throughput: 1932.243602 im/s
Epoch 34, Iter 39, Loss: 9362.5302734, Throughput: 2017.732870 im/s
Epoch 34, Train Loss: 9763.0908203, Time: 2.4743s, Throughput: 2017.528142 im/s
Epoch 35, Iter 39, Loss: 9771.3730469, Throughput: 2017.185661 im/s
Epoch 35, Train Loss: 9781.9306641, Time: 2.4750s, Throughput: 2017.005720 im/s
Epoch 36, Iter 39, Loss: 10394.4765625, Throughput: 1967.035480 im/s
Epoch 36, Train Loss: 9699.3203125, Time: 2.5380s, Throughput: 1966.865852 im/s
Epoch 37, Iter 39, Loss: 10526.4462891, Throughput: 1917.130139 im/s
Epoch 37, Train Loss: 9684.0244141, Time: 2.6041s, Throughput: 1916.986560 im/s
Epoch 38, Iter 39, Loss: 9427.2148438, Throughput: 1939.572425 im/s
Epoch 38, Train Loss: 9651.7031250, Time: 2.5740s, Throughput: 1939.399238 im/s
Epoch 39, Iter 39, Loss: 9724.2988281, Throughput: 1924.300688 im/s
Epoch 39, Train Loss: 9629.8906250, Time: 2.5944s, Throughput: 1924.122791 im/s
Epoch 40, Iter 39, Loss: 9139.0556641, Throughput: 1849.787074 im/s
Epoch 40, Train Loss: 9548.7324219, Time: 2.6990s, Throughput: 1849.601936 im/s
Epoch 41, Iter 39, Loss: 9286.7773438, Throughput: 1823.589628 im/s
Epoch 41, Train Loss: 9677.4287109, Time: 2.7377s, Throughput: 1823.419066 im/s
Epoch 42, Iter 39, Loss: 9203.0156250, Throughput: 1822.038114 im/s
Epoch 42, Train Loss: 9553.8349609, Time: 2.7400s, Throughput: 1821.892414 im/s
Epoch 43, Iter 39, Loss: 9293.8222656, Throughput: 1819.953124 im/s
Epoch 43, Train Loss: 9531.3710938, Time: 2.7431s, Throughput: 1819.806491 im/s
Epoch 44, Iter 39, Loss: 10034.3886719, Throughput: 1830.049622 im/s
Epoch 44, Train Loss: 9463.0957031, Time: 2.7280s, Throughput: 1829.892722 im/s
Epoch 45, Iter 39, Loss: 9122.0781250, Throughput: 1828.102674 im/s
Epoch 45, Train Loss: 9437.2451172, Time: 2.7311s, Throughput: 1827.855627 im/s
Epoch 46, Iter 39, Loss: 9067.2968750, Throughput: 1782.760324 im/s
Epoch 46, Train Loss: 9400.2197266, Time: 2.8004s, Throughput: 1782.582137 im/s
Epoch 47, Iter 39, Loss: 9212.8125000, Throughput: 1841.347615 im/s
Epoch 47, Train Loss: 9367.3642578, Time: 2.7112s, Throughput: 1841.233459 im/s
Epoch 48, Iter 39, Loss: 9970.6025391, Throughput: 1806.594466 im/s
Epoch 48, Train Loss: 9327.6669922, Time: 2.7634s, Throughput: 1806.446705 im/s
Epoch 49, Iter 39, Loss: 9505.4296875, Throughput: 1817.042141 im/s
Epoch 49, Train Loss: 9362.5292969, Time: 2.7476s, Throughput: 1816.890775 im/s
Epoch 50, Iter 39, Loss: 9654.3720703, Throughput: 1841.717707 im/s
Epoch 50, Train Loss: 9332.5058594, Time: 2.7108s, Throughput: 1841.538392 im/s

Appendix

julia
using InteractiveUtils
InteractiveUtils.versioninfo()

if @isdefined(MLDataDevices)
    if @isdefined(CUDA) && MLDataDevices.functional(CUDADevice)
        println()
        CUDA.versioninfo()
    end

    if @isdefined(AMDGPU) && MLDataDevices.functional(AMDGPUDevice)
        println()
        AMDGPU.versioninfo()
    end
end
Julia Version 1.11.6
Commit 9615af0f269 (2025-07-09 12:58 UTC)
Build Info:
  Official https://julialang.org/ release
Platform Info:
  OS: Linux (x86_64-linux-gnu)
  CPU: 48 × AMD EPYC 7402 24-Core Processor
  WORD_SIZE: 64
  LLVM: libLLVM-16.0.6 (ORCJIT, znver2)
Threads: 48 default, 0 interactive, 24 GC (on 2 virtual cores)
Environment:
  JULIA_CPU_THREADS = 2
  JULIA_DEPOT_PATH = /root/.cache/julia-buildkite-plugin/depots/01872db4-8c79-43af-ab7d-12abac4f24f6
  LD_LIBRARY_PATH = /usr/local/nvidia/lib:/usr/local/nvidia/lib64
  JULIA_PKG_SERVER = 
  JULIA_NUM_THREADS = 48
  JULIA_CUDA_HARD_MEMORY_LIMIT = 100%
  JULIA_PKG_PRECOMPILE_AUTO = 0
  JULIA_DEBUG = Literate

This page was generated using Literate.jl.