Skip to content

Convolutional VAE for MNIST

Convolutional variational autoencoder (CVAE) implementation in MLX using MNIST. This is based on the CVAE implementation in MLX.

julia
using Lux,
    Reactant,
    MLDatasets,
    Random,
    Statistics,
    Enzyme,
    MLUtils,
    DataAugmentation,
    ConcreteStructs,
    OneHotArrays,
    ImageShow,
    Images,
    Printf,
    Optimisers

const xdev = reactant_device(; force=true)
const cdev = cpu_device()

const IN_VSCODE = isdefined(Main, :VSCodeServer)
false

Model Definition

First we will define the encoder.It maps the input to a normal distribution in latent space and sample a latent vector from that distribution.

julia
function cvae_encoder(
    rng=Random.default_rng();
    num_latent_dims::Int,
    image_shape::Dims{3},
    max_num_filters::Int,
)
    flattened_dim = prod(image_shape[1:2]  8) * max_num_filters
    return @compact(;
        embed=Chain(
            Chain(
                Conv((3, 3), image_shape[3] => max_num_filters ÷ 4; stride=2, pad=1),
                BatchNorm(max_num_filters ÷ 4, leakyrelu),
            ),
            Chain(
                Conv((3, 3), max_num_filters ÷ 4 => max_num_filters ÷ 2; stride=2, pad=1),
                BatchNorm(max_num_filters ÷ 2, leakyrelu),
            ),
            Chain(
                Conv((3, 3), max_num_filters ÷ 2 => max_num_filters; stride=2, pad=1),
                BatchNorm(max_num_filters, leakyrelu),
            ),
            FlattenLayer(),
        ),
        proj_mu=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
        proj_log_var=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
        rng
    ) do x
        y = embed(x)

        μ = proj_mu(y)
        logσ² = proj_log_var(y)

        T = eltype(logσ²)
        logσ² = clamp.(logσ², -T(20.0f0), T(10.0f0))
        σ = exp.(logσ² .* T(0.5))

        # Generate a tensor of random values from a normal distribution
        ϵ = randn_like(Lux.replicate(rng), σ)

        # Reparameterization trick to backpropagate through sampling
        z = ϵ .* σ .+ μ

        @return z, μ, logσ²
    end
end

Similarly we define the decoder.

julia
function cvae_decoder(; num_latent_dims::Int, image_shape::Dims{3}, max_num_filters::Int)
    flattened_dim = prod(image_shape[1:2]  8) * max_num_filters
    return @compact(;
        linear=Dense(num_latent_dims, flattened_dim),
        upchain=Chain(
            Chain(
                Upsample(2),
                Conv((3, 3), max_num_filters => max_num_filters ÷ 2; stride=1, pad=1),
                BatchNorm(max_num_filters ÷ 2, leakyrelu),
            ),
            Chain(
                Upsample(2),
                Conv((3, 3), max_num_filters ÷ 2 => max_num_filters ÷ 4; stride=1, pad=1),
                BatchNorm(max_num_filters ÷ 4, leakyrelu),
            ),
            Chain(
                Upsample(2),
                Conv(
                    (3, 3), max_num_filters ÷ 4 => image_shape[3], sigmoid; stride=1, pad=1
                ),
            ),
        ),
        max_num_filters
    ) do x
        y = linear(x)
        img = reshape(y, image_shape[1] ÷ 8, image_shape[2] ÷ 8, max_num_filters, :)
        @return upchain(img)
    end
end

@concrete struct CVAE <: AbstractLuxContainerLayer{(:encoder, :decoder)}
    encoder <: AbstractLuxLayer
    decoder <: AbstractLuxLayer
end

function CVAE(
    rng=Random.default_rng();
    num_latent_dims::Int,
    image_shape::Dims{3},
    max_num_filters::Int,
)
    decoder = cvae_decoder(; num_latent_dims, image_shape, max_num_filters)
    encoder = cvae_encoder(rng; num_latent_dims, image_shape, max_num_filters)
    return CVAE(encoder, decoder)
end

function (cvae::CVAE)(x, ps, st)
    (z, μ, logσ²), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
    x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
    return (x_rec, μ, logσ²), (; encoder=st_enc, decoder=st_dec)
end

function encode(cvae::CVAE, x, ps, st)
    (z, _, _), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
    return z, (; encoder=st_enc, st.decoder)
end

function decode(cvae::CVAE, z, ps, st)
    x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
    return x_rec, (; decoder=st_dec, st.encoder)
end

Loading MNIST

julia
@concrete struct TensorDataset
    dataset
    transform
    total_samples::Int
end

Base.length(ds::TensorDataset) = ds.total_samples

function Base.getindex(ds::TensorDataset, idxs::Union{Vector{<:Integer},AbstractRange})
    img = Image.(eachslice(convert2image(ds.dataset, idxs); dims=3))
    return stack(parent  itemdata  Base.Fix1(apply, ds.transform), img)
end

function loadmnist(batchsize, image_size::Dims{2})
    # Load MNIST: Only 1500 for demonstration purposes on CI
    train_dataset = MNIST(; split=:train)
    N = parse(Bool, get(ENV, "CI", "false")) ? 5000 : length(train_dataset)

    train_transform = ScaleKeepAspect(image_size) |> ImageToTensor()
    trainset = TensorDataset(train_dataset, train_transform, N)
    trainloader = DataLoader(trainset; batchsize, shuffle=true, partial=false)

    return trainloader
end

Helper Functions

Generate an Image Grid from a list of images

julia
function create_image_grid(imgs::AbstractArray, grid_rows::Int, grid_cols::Int)
    total_images = grid_rows * grid_cols
    imgs = map(eachslice(imgs[:, :, :, 1:total_images]; dims=4)) do img
        cimg = if size(img, 3) == 1
            colorview(Gray, view(img, :, :, 1))
        else
            colorview(RGB, permutedims(img, (3, 1, 2)))
        end
        return cimg'
    end
    return create_image_grid(imgs, grid_rows, grid_cols)
end

function create_image_grid(images::Vector, grid_rows::Int, grid_cols::Int)
    # Check if the number of images matches the grid
    total_images = grid_rows * grid_cols
    @assert length(images) == total_images

    # Get the size of a single image (assuming all images are the same size)
    img_height, img_width = size(images[1])

    # Create a blank grid canvas
    grid_height = img_height * grid_rows
    grid_width = img_width * grid_cols
    grid_canvas = similar(images[1], grid_height, grid_width)

    # Place each image in the correct position on the canvas
    for idx in 1:total_images
        row = div(idx - 1, grid_cols) + 1
        col = mod(idx - 1, grid_cols) + 1

        start_row = (row - 1) * img_height + 1
        start_col = (col - 1) * img_width + 1

        grid_canvas[start_row:(start_row + img_height - 1), start_col:(start_col + img_width - 1)] .= images[idx]
    end

    return grid_canvas
end

function loss_function(model, ps, st, X)
    (y, μ, logσ²), st = model(X, ps, st)
    reconstruction_loss = MSELoss(; agg=sum)(y, X)
    kldiv_loss = -sum(1 .+ logσ² .- μ .^ 2 .- exp.(logσ²)) / 2
    loss = reconstruction_loss + kldiv_loss
    return loss, st, (; y, μ, logσ², reconstruction_loss, kldiv_loss)
end

function generate_images(
    model, ps, st; num_samples::Int=128, num_latent_dims::Int, decode_compiled=nothing
)
    z = get_device((ps, st))(randn(Float32, num_latent_dims, num_samples))
    if decode_compiled === nothing
        images, _ = decode(model, z, ps, Lux.testmode(st))
    else
        images, _ = decode_compiled(model, z, ps, Lux.testmode(st))
        images = cpu_device()(images)
    end
    return create_image_grid(images, 8, num_samples ÷ 8)
end

function reconstruct_images(model, ps, st, X)
    (recon, _, _), _ = model(X, ps, Lux.testmode(st))
    recon = cpu_device()(recon)
    return create_image_grid(recon, 8, size(X, ndims(X)) ÷ 8)
end
reconstruct_images (generic function with 1 method)

Training the Model

julia
function main(;
    batchsize=128,
    image_size=(64, 64),
    num_latent_dims=8,
    max_num_filters=64,
    seed=0,
    epochs=50,
    weight_decay=1.0e-5,
    learning_rate=1.0e-3,
    num_samples=batchsize,
)
    rng = Xoshiro()
    Random.seed!(rng, seed)

    cvae = CVAE(rng; num_latent_dims, image_shape=(image_size..., 1), max_num_filters)
    ps, st = Lux.setup(rng, cvae) |> xdev

    z = xdev(randn(Float32, num_latent_dims, num_samples))
    decode_compiled = @compile decode(cvae, z, ps, Lux.testmode(st))
    x = randn(Float32, image_size..., 1, batchsize) |> xdev
    cvae_compiled = @compile cvae(x, ps, Lux.testmode(st))

    train_dataloader = loadmnist(batchsize, image_size) |> xdev

    opt = AdamW(; eta=learning_rate, lambda=weight_decay)

    train_state = Training.TrainState(cvae, ps, st, opt)

    @printf "Total Trainable Parameters: %0.4f M\n" (Lux.parameterlength(ps) / 1.0e6)

    empty_row, model_img_full = nothing, nothing

    for epoch in 1:epochs
        loss_total = 0.0f0
        total_samples = 0

        start_time = time()
        for (i, X) in enumerate(train_dataloader)
            (_, loss, _, train_state) = Training.single_train_step!(
                AutoEnzyme(), loss_function, X, train_state; return_gradients=Val(false)
            )

            loss_total += loss
            total_samples += size(X, ndims(X))

            if i % 250 == 0 || i == length(train_dataloader)
                throughput = total_samples / (time() - start_time)
                @printf "Epoch %d, Iter %d, Loss: %.7f, Throughput: %.6f im/s\n" epoch i loss throughput
            end
        end
        total_time = time() - start_time

        train_loss = loss_total / length(train_dataloader)
        throughput = total_samples / total_time
        @printf "Epoch %d, Train Loss: %.7f, Time: %.4fs, Throughput: %.6f im/s\n" epoch train_loss total_time throughput

        if IN_VSCODE || epoch == epochs
            recon_images = reconstruct_images(
                cvae_compiled,
                train_state.parameters,
                train_state.states,
                first(train_dataloader),
            )
            gen_images = generate_images(
                cvae,
                train_state.parameters,
                train_state.states;
                num_samples,
                num_latent_dims,
                decode_compiled,
            )
            if empty_row === nothing
                empty_row = similar(gen_images, image_size[1], size(gen_images, 2))
                fill!(empty_row, 0)
            end
            model_img_full = vcat(recon_images, empty_row, gen_images)
            IN_VSCODE && display(model_img_full)
        end
    end

    return model_img_full
end

img = main()
Total Trainable Parameters: 0.1493 M
Epoch 1, Iter 39, Loss: 24158.3398438, Throughput: 29.072379 im/s
Epoch 1, Train Loss: 39958.9648438, Time: 172.0724s, Throughput: 29.011046 im/s
Epoch 2, Iter 39, Loss: 17957.6562500, Throughput: 92.545063 im/s
Epoch 2, Train Loss: 20380.2988281, Time: 53.9415s, Throughput: 92.544785 im/s
Epoch 3, Iter 39, Loss: 16073.1367188, Throughput: 92.219859 im/s
Epoch 3, Train Loss: 16658.0664062, Time: 54.1317s, Throughput: 92.219596 im/s
Epoch 4, Iter 39, Loss: 15014.4726562, Throughput: 93.101951 im/s
Epoch 4, Train Loss: 15173.9121094, Time: 53.6188s, Throughput: 93.101668 im/s
Epoch 5, Iter 39, Loss: 13553.7656250, Throughput: 92.782999 im/s
Epoch 5, Train Loss: 14169.8369141, Time: 53.8031s, Throughput: 92.782730 im/s
Epoch 6, Iter 39, Loss: 13190.2558594, Throughput: 93.764820 im/s
Epoch 6, Train Loss: 13405.6445312, Time: 53.2398s, Throughput: 93.764515 im/s
Epoch 7, Iter 39, Loss: 12947.9277344, Throughput: 94.373131 im/s
Epoch 7, Train Loss: 12984.7480469, Time: 52.8966s, Throughput: 94.372779 im/s
Epoch 8, Iter 39, Loss: 12857.2890625, Throughput: 93.760650 im/s
Epoch 8, Train Loss: 12562.1318359, Time: 53.2421s, Throughput: 93.760336 im/s
Epoch 9, Iter 39, Loss: 12664.2939453, Throughput: 93.381079 im/s
Epoch 9, Train Loss: 12336.2138672, Time: 53.4585s, Throughput: 93.380790 im/s
Epoch 10, Iter 39, Loss: 12279.1875000, Throughput: 94.827201 im/s
Epoch 10, Train Loss: 12069.5683594, Time: 52.6433s, Throughput: 94.826879 im/s
Epoch 11, Iter 39, Loss: 12012.1552734, Throughput: 93.510758 im/s
Epoch 11, Train Loss: 11817.0312500, Time: 53.3844s, Throughput: 93.510486 im/s
Epoch 12, Iter 39, Loss: 11400.7890625, Throughput: 93.618180 im/s
Epoch 12, Train Loss: 11809.7402344, Time: 53.3231s, Throughput: 93.617882 im/s
Epoch 13, Iter 39, Loss: 11733.1533203, Throughput: 92.651037 im/s
Epoch 13, Train Loss: 11437.7060547, Time: 53.8798s, Throughput: 92.650752 im/s
Epoch 14, Iter 39, Loss: 11106.3417969, Throughput: 93.245912 im/s
Epoch 14, Train Loss: 11332.9785156, Time: 53.5360s, Throughput: 93.245649 im/s
Epoch 15, Iter 39, Loss: 10645.5957031, Throughput: 93.690212 im/s
Epoch 15, Train Loss: 11303.6269531, Time: 53.2821s, Throughput: 93.689957 im/s
Epoch 16, Iter 39, Loss: 10954.5527344, Throughput: 95.474996 im/s
Epoch 16, Train Loss: 11024.5605469, Time: 52.2861s, Throughput: 95.474667 im/s
Epoch 17, Iter 39, Loss: 10106.9667969, Throughput: 93.658319 im/s
Epoch 17, Train Loss: 10964.3740234, Time: 53.3003s, Throughput: 93.658029 im/s
Epoch 18, Iter 39, Loss: 11617.8642578, Throughput: 95.266285 im/s
Epoch 18, Train Loss: 10927.0595703, Time: 52.4006s, Throughput: 95.266003 im/s
Epoch 19, Iter 39, Loss: 11155.8222656, Throughput: 92.800817 im/s
Epoch 19, Train Loss: 10785.2958984, Time: 53.7928s, Throughput: 92.800553 im/s
Epoch 20, Iter 39, Loss: 10789.0878906, Throughput: 95.301402 im/s
Epoch 20, Train Loss: 10741.5468750, Time: 52.3814s, Throughput: 95.301082 im/s
Epoch 21, Iter 39, Loss: 10178.9267578, Throughput: 93.946436 im/s
Epoch 21, Train Loss: 10613.1699219, Time: 53.1368s, Throughput: 93.946167 im/s
Epoch 22, Iter 39, Loss: 10585.2900391, Throughput: 93.056331 im/s
Epoch 22, Train Loss: 10567.5146484, Time: 53.6451s, Throughput: 93.056033 im/s
Epoch 23, Iter 39, Loss: 9663.0253906, Throughput: 93.114205 im/s
Epoch 23, Train Loss: 10481.0185547, Time: 53.6117s, Throughput: 93.113928 im/s
Epoch 24, Iter 39, Loss: 9860.7685547, Throughput: 93.391142 im/s
Epoch 24, Train Loss: 10480.6308594, Time: 53.4528s, Throughput: 93.390866 im/s
Epoch 25, Iter 39, Loss: 10201.1640625, Throughput: 93.842657 im/s
Epoch 25, Train Loss: 10411.8271484, Time: 53.1956s, Throughput: 93.842371 im/s
Epoch 26, Iter 39, Loss: 10401.9121094, Throughput: 93.253275 im/s
Epoch 26, Train Loss: 10415.5332031, Time: 53.5318s, Throughput: 93.252970 im/s
Epoch 27, Iter 39, Loss: 10346.1621094, Throughput: 93.261740 im/s
Epoch 27, Train Loss: 10263.5205078, Time: 53.5270s, Throughput: 93.261298 im/s
Epoch 28, Iter 39, Loss: 11357.7626953, Throughput: 92.263603 im/s
Epoch 28, Train Loss: 10217.3623047, Time: 54.1060s, Throughput: 92.263316 im/s
Epoch 29, Iter 39, Loss: 10314.1396484, Throughput: 93.189625 im/s
Epoch 29, Train Loss: 10214.7705078, Time: 53.5684s, Throughput: 93.189322 im/s
Epoch 30, Iter 39, Loss: 10241.6103516, Throughput: 93.697147 im/s
Epoch 30, Train Loss: 10082.3017578, Time: 53.2782s, Throughput: 93.696876 im/s
Epoch 31, Iter 39, Loss: 9704.3095703, Throughput: 95.829135 im/s
Epoch 31, Train Loss: 10011.7714844, Time: 52.0929s, Throughput: 95.828830 im/s
Epoch 32, Iter 39, Loss: 10085.3427734, Throughput: 94.940445 im/s
Epoch 32, Train Loss: 9984.6503906, Time: 52.5805s, Throughput: 94.940143 im/s
Epoch 33, Iter 39, Loss: 10048.2597656, Throughput: 93.857036 im/s
Epoch 33, Train Loss: 9906.6250000, Time: 53.1874s, Throughput: 93.856763 im/s
Epoch 34, Iter 39, Loss: 9687.9277344, Throughput: 93.122883 im/s
Epoch 34, Train Loss: 9919.3671875, Time: 53.6067s, Throughput: 93.122629 im/s
Epoch 35, Iter 39, Loss: 9698.9433594, Throughput: 93.443910 im/s
Epoch 35, Train Loss: 9849.5312500, Time: 53.4226s, Throughput: 93.443612 im/s
Epoch 36, Iter 39, Loss: 9590.4785156, Throughput: 93.743734 im/s
Epoch 36, Train Loss: 9841.6962891, Time: 53.2517s, Throughput: 93.743463 im/s
Epoch 37, Iter 39, Loss: 9748.9121094, Throughput: 94.897919 im/s
Epoch 37, Train Loss: 9887.2705078, Time: 52.6041s, Throughput: 94.897616 im/s
Epoch 38, Iter 39, Loss: 10758.8398438, Throughput: 93.106153 im/s
Epoch 38, Train Loss: 9807.1406250, Time: 53.6164s, Throughput: 93.105883 im/s
Epoch 39, Iter 39, Loss: 9202.5878906, Throughput: 94.561374 im/s
Epoch 39, Train Loss: 9730.7812500, Time: 52.7913s, Throughput: 94.561107 im/s
Epoch 40, Iter 39, Loss: 10093.2324219, Throughput: 93.599625 im/s
Epoch 40, Train Loss: 9712.9492188, Time: 53.3337s, Throughput: 93.599332 im/s
Epoch 41, Iter 39, Loss: 10219.1210938, Throughput: 92.668737 im/s
Epoch 41, Train Loss: 9648.4921875, Time: 53.8695s, Throughput: 92.668428 im/s
Epoch 42, Iter 39, Loss: 9947.5156250, Throughput: 93.623448 im/s
Epoch 42, Train Loss: 9628.9550781, Time: 53.3201s, Throughput: 93.623160 im/s
Epoch 43, Iter 39, Loss: 9660.0292969, Throughput: 94.017236 im/s
Epoch 43, Train Loss: 9634.7578125, Time: 53.0968s, Throughput: 94.016922 im/s
Epoch 44, Iter 39, Loss: 9761.9785156, Throughput: 94.335096 im/s
Epoch 44, Train Loss: 9599.5156250, Time: 52.9179s, Throughput: 94.334845 im/s
Epoch 45, Iter 39, Loss: 9259.5175781, Throughput: 93.530002 im/s
Epoch 45, Train Loss: 9515.9794922, Time: 53.3734s, Throughput: 93.529715 im/s
Epoch 46, Iter 39, Loss: 9944.0810547, Throughput: 92.955128 im/s
Epoch 46, Train Loss: 9489.8535156, Time: 53.7035s, Throughput: 92.954846 im/s
Epoch 47, Iter 39, Loss: 9651.5800781, Throughput: 93.045539 im/s
Epoch 47, Train Loss: 9605.1933594, Time: 53.6513s, Throughput: 93.045272 im/s
Epoch 48, Iter 39, Loss: 9639.5537109, Throughput: 93.082157 im/s
Epoch 48, Train Loss: 9485.1425781, Time: 53.6302s, Throughput: 93.081886 im/s
Epoch 49, Iter 39, Loss: 9601.8300781, Throughput: 94.371247 im/s
Epoch 49, Train Loss: 9441.6943359, Time: 52.8976s, Throughput: 94.370949 im/s
Epoch 50, Iter 39, Loss: 9134.3046875, Throughput: 94.849734 im/s
Epoch 50, Train Loss: 9454.4326172, Time: 52.6308s, Throughput: 94.849476 im/s

Appendix

julia
using InteractiveUtils
InteractiveUtils.versioninfo()

if @isdefined(MLDataDevices)
    if @isdefined(CUDA) && MLDataDevices.functional(CUDADevice)
        println()
        CUDA.versioninfo()
    end

    if @isdefined(AMDGPU) && MLDataDevices.functional(AMDGPUDevice)
        println()
        AMDGPU.versioninfo()
    end
end
Julia Version 1.11.8
Commit cf1da5e20e3 (2025-11-06 17:49 UTC)
Build Info:
  Official https://julialang.org/ release
Platform Info:
  OS: Linux (x86_64-linux-gnu)
  CPU: 4 × AMD EPYC 7763 64-Core Processor
  WORD_SIZE: 64
  LLVM: libLLVM-16.0.6 (ORCJIT, znver3)
Threads: 4 default, 0 interactive, 2 GC (on 4 virtual cores)
Environment:
  JULIA_DEBUG = Literate
  LD_LIBRARY_PATH = 
  JULIA_NUM_THREADS = 4
  JULIA_CPU_HARD_MEMORY_LIMIT = 100%
  JULIA_PKG_PRECOMPILE_AUTO = 0

This page was generated using Literate.jl.