Convolutional VAE for MNIST using Reactant
Convolutional variational autoencoder (CVAE) implementation in MLX using MNIST. This is based on the CVAE implementation in MLX.
julia
using Lux,
Reactant,
MLDatasets,
Random,
Statistics,
Enzyme,
MLUtils,
DataAugmentation,
ConcreteStructs,
OneHotArrays,
ImageShow,
Images,
Printf,
Optimisers
const xdev = reactant_device(; force=true)
const cdev = cpu_device()
const IN_VSCODE = isdefined(Main, :VSCodeServer)
false
Model Definition
First we will define the encoder.It maps the input to a normal distribution in latent space and sample a latent vector from that distribution.
julia
function cvae_encoder(
rng=Random.default_rng();
num_latent_dims::Int,
image_shape::Dims{3},
max_num_filters::Int,
)
flattened_dim = prod(image_shape[1:2] .÷ 8) * max_num_filters
return @compact(;
embed=Chain(
Chain(
Conv((3, 3), image_shape[3] => max_num_filters ÷ 4; stride=2, pad=1),
BatchNorm(max_num_filters ÷ 4, leakyrelu),
),
Chain(
Conv((3, 3), max_num_filters ÷ 4 => max_num_filters ÷ 2; stride=2, pad=1),
BatchNorm(max_num_filters ÷ 2, leakyrelu),
),
Chain(
Conv((3, 3), max_num_filters ÷ 2 => max_num_filters; stride=2, pad=1),
BatchNorm(max_num_filters, leakyrelu),
),
FlattenLayer(),
),
proj_mu=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
proj_log_var=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
rng
) do x
y = embed(x)
μ = proj_mu(y)
logσ² = proj_log_var(y)
T = eltype(logσ²)
logσ² = clamp.(logσ², -T(20.0f0), T(10.0f0))
σ = exp.(logσ² .* T(0.5))
# Generate a tensor of random values from a normal distribution
ϵ = randn_like(Lux.replicate(rng), σ)
# Reparameterization trick to brackpropagate through sampling
z = ϵ .* σ .+ μ
@return z, μ, logσ²
end
end
cvae_encoder (generic function with 2 methods)
Similarly we define the decoder.
julia
function cvae_decoder(; num_latent_dims::Int, image_shape::Dims{3}, max_num_filters::Int)
flattened_dim = prod(image_shape[1:2] .÷ 8) * max_num_filters
return @compact(;
linear=Dense(num_latent_dims, flattened_dim),
upchain=Chain(
Chain(
Upsample(2),
Conv((3, 3), max_num_filters => max_num_filters ÷ 2; stride=1, pad=1),
BatchNorm(max_num_filters ÷ 2, leakyrelu),
),
Chain(
Upsample(2),
Conv((3, 3), max_num_filters ÷ 2 => max_num_filters ÷ 4; stride=1, pad=1),
BatchNorm(max_num_filters ÷ 4, leakyrelu),
),
Chain(
Upsample(2),
Conv(
(3, 3), max_num_filters ÷ 4 => image_shape[3], sigmoid; stride=1, pad=1
),
),
),
max_num_filters
) do x
y = linear(x)
img = reshape(y, image_shape[1] ÷ 8, image_shape[2] ÷ 8, max_num_filters, :)
@return upchain(img)
end
end
@concrete struct CVAE <: AbstractLuxContainerLayer{(:encoder, :decoder)}
encoder <: AbstractLuxLayer
decoder <: AbstractLuxLayer
end
function CVAE(
rng=Random.default_rng();
num_latent_dims::Int,
image_shape::Dims{3},
max_num_filters::Int,
)
decoder = cvae_decoder(; num_latent_dims, image_shape, max_num_filters)
encoder = cvae_encoder(rng; num_latent_dims, image_shape, max_num_filters)
return CVAE(encoder, decoder)
end
function (cvae::CVAE)(x, ps, st)
(z, μ, logσ²), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
return (x_rec, μ, logσ²), (; encoder=st_enc, decoder=st_dec)
end
function encode(cvae::CVAE, x, ps, st)
(z, _, _), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
return z, (; encoder=st_enc, st.decoder)
end
function decode(cvae::CVAE, z, ps, st)
x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
return x_rec, (; decoder=st_dec, st.encoder)
end
decode (generic function with 1 method)
Loading MNIST
julia
@concrete struct TensorDataset
dataset
transform
total_samples::Int
end
Base.length(ds::TensorDataset) = ds.total_samples
function Base.getindex(ds::TensorDataset, idxs::Union{Vector{<:Integer},AbstractRange})
img = Image.(eachslice(convert2image(ds.dataset, idxs); dims=3))
return stack(parent ∘ itemdata ∘ Base.Fix1(apply, ds.transform), img)
end
function loadmnist(batchsize, image_size::Dims{2})
# Load MNIST: Only 1500 for demonstration purposes on CI
train_dataset = MNIST(; split=:train)
N = parse(Bool, get(ENV, "CI", "false")) ? 5000 : length(train_dataset)
train_transform = ScaleKeepAspect(image_size) |> ImageToTensor()
trainset = TensorDataset(train_dataset, train_transform, N)
trainloader = DataLoader(trainset; batchsize, shuffle=true, partial=false)
return trainloader
end
loadmnist (generic function with 1 method)
Helper Functions
Generate an Image Grid from a list of images
julia
function create_image_grid(imgs::AbstractArray, grid_rows::Int, grid_cols::Int)
total_images = grid_rows * grid_cols
imgs = map(eachslice(imgs[:, :, :, 1:total_images]; dims=4)) do img
cimg = if size(img, 3) == 1
colorview(Gray, view(img, :, :, 1))
else
colorview(RGB, permutedims(img, (3, 1, 2)))
end
return cimg'
end
return create_image_grid(imgs, grid_rows, grid_cols)
end
function create_image_grid(images::Vector, grid_rows::Int, grid_cols::Int)
# Check if the number of images matches the grid
total_images = grid_rows * grid_cols
@assert length(images) == total_images
# Get the size of a single image (assuming all images are the same size)
img_height, img_width = size(images[1])
# Create a blank grid canvas
grid_height = img_height * grid_rows
grid_width = img_width * grid_cols
grid_canvas = similar(images[1], grid_height, grid_width)
# Place each image in the correct position on the canvas
for idx in 1:total_images
row = div(idx - 1, grid_cols) + 1
col = mod(idx - 1, grid_cols) + 1
start_row = (row - 1) * img_height + 1
start_col = (col - 1) * img_width + 1
grid_canvas[start_row:(start_row + img_height - 1), start_col:(start_col + img_width - 1)] .= images[idx]
end
return grid_canvas
end
function loss_function(model, ps, st, X)
(y, μ, logσ²), st = model(X, ps, st)
reconstruction_loss = MSELoss(; agg=sum)(y, X)
kldiv_loss = -sum(1 .+ logσ² .- μ .^ 2 .- exp.(logσ²)) / 2
loss = reconstruction_loss + kldiv_loss
return loss, st, (; y, μ, logσ², reconstruction_loss, kldiv_loss)
end
function generate_images(
model, ps, st; num_samples::Int=128, num_latent_dims::Int, decode_compiled=nothing
)
z = get_device((ps, st))(randn(Float32, num_latent_dims, num_samples))
if decode_compiled === nothing
images, _ = decode(model, z, ps, Lux.testmode(st))
else
images, _ = decode_compiled(model, z, ps, Lux.testmode(st))
images = cpu_device()(images)
end
return create_image_grid(images, 8, num_samples ÷ 8)
end
function reconstruct_images(model, ps, st, X)
(recon, _, _), _ = model(X, ps, Lux.testmode(st))
recon = cpu_device()(recon)
return create_image_grid(recon, 8, size(X, ndims(X)) ÷ 8)
end
reconstruct_images (generic function with 1 method)
Training the Model
julia
function main(;
batchsize=128,
image_size=(64, 64),
num_latent_dims=8,
max_num_filters=64,
seed=0,
epochs=50,
weight_decay=1.0e-5,
learning_rate=1.0e-3,
num_samples=batchsize,
)
rng = Xoshiro()
Random.seed!(rng, seed)
cvae = CVAE(rng; num_latent_dims, image_shape=(image_size..., 1), max_num_filters)
ps, st = xdev(Lux.setup(rng, cvae))
z = xdev(randn(Float32, num_latent_dims, num_samples))
decode_compiled = Reactant.with_config(;
dot_general_precision=PrecisionConfig.HIGH,
convolution_precision=PrecisionConfig.HIGH,
) do
@compile decode(cvae, z, ps, Lux.testmode(st))
end
x = xdev(randn(Float32, image_size..., 1, batchsize))
cvae_compiled = Reactant.with_config(;
dot_general_precision=PrecisionConfig.HIGH,
convolution_precision=PrecisionConfig.HIGH,
) do
@compile cvae(x, ps, Lux.testmode(st))
end
train_dataloader = xdev(loadmnist(batchsize, image_size))
opt = AdamW(; eta=learning_rate, lambda=weight_decay)
train_state = Training.TrainState(cvae, ps, st, opt)
@printf "Total Trainable Parameters: %0.4f M\n" (Lux.parameterlength(ps) / 1.0e6)
empty_row, model_img_full = nothing, nothing
for epoch in 1:epochs
loss_total = 0.0f0
total_samples = 0
start_time = time()
for (i, X) in enumerate(train_dataloader)
(_, loss, _, train_state) = Training.single_train_step!(
AutoEnzyme(), loss_function, X, train_state; return_gradients=Val(false)
)
loss_total += loss
total_samples += size(X, ndims(X))
if i % 250 == 0 || i == length(train_dataloader)
throughput = total_samples / (time() - start_time)
@printf "Epoch %d, Iter %d, Loss: %.7f, Throughput: %.6f im/s\n" epoch i loss throughput
end
end
total_time = time() - start_time
train_loss = loss_total / length(train_dataloader)
throughput = total_samples / total_time
@printf "Epoch %d, Train Loss: %.7f, Time: %.4fs, Throughput: %.6f im/s\n" epoch train_loss total_time throughput
if IN_VSCODE || epoch == epochs
recon_images = reconstruct_images(
cvae_compiled,
train_state.parameters,
train_state.states,
first(train_dataloader),
)
gen_images = generate_images(
cvae,
train_state.parameters,
train_state.states;
num_samples,
num_latent_dims,
decode_compiled,
)
if empty_row === nothing
empty_row = similar(gen_images, image_size[1], size(gen_images, 2))
fill!(empty_row, 0)
end
model_img_full = vcat(recon_images, empty_row, gen_images)
IN_VSCODE && display(model_img_full)
end
end
return model_img_full
end
img = main()
2025-07-14 00:39:46.323761: I external/xla/xla/service/service.cc:153] XLA service 0x17dbcd70 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
2025-07-14 00:39:46.323790: I external/xla/xla/service/service.cc:161] StreamExecutor device (0): Quadro RTX 5000, Compute Capability 7.5
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1752453586.324580 2811010 se_gpu_pjrt_client.cc:1370] Using BFC allocator.
I0000 00:00:1752453586.324648 2811010 gpu_helpers.cc:136] XLA backend allocating 12528893952 bytes on device 0 for BFCAllocator.
I0000 00:00:1752453586.324705 2811010 gpu_helpers.cc:177] XLA backend will use up to 4176297984 bytes on device 0 for CollectiveBFCAllocator.
I0000 00:00:1752453586.335179 2811010 cuda_dnn.cc:471] Loaded cuDNN version 90800
┌ Warning: `training` is set to `Val{false}()` but is being used within an autodiff call (gradient, jacobian, etc...). This might lead to incorrect results. If you are using a `Lux.jl` model, set it to training mode using `LuxCore.trainmode`.
└ @ LuxLib.Utils /var/lib/buildkite-agent/builds/gpuci-15/julialang/lux-dot-jl/lib/LuxLib/src/utils.jl:344
Total Trainable Parameters: 0.1493 M
Epoch 1, Iter 39, Loss: 24963.9121094, Throughput: 47.987542 im/s
Epoch 1, Train Loss: 39725.4726562, Time: 104.4212s, Throughput: 47.806382 im/s
Epoch 2, Iter 39, Loss: 19044.4667969, Throughput: 2082.211548 im/s
Epoch 2, Train Loss: 20419.7441406, Time: 2.3977s, Throughput: 2081.999738 im/s
Epoch 3, Iter 39, Loss: 15161.2285156, Throughput: 2126.058430 im/s
Epoch 3, Train Loss: 16778.8945312, Time: 2.3484s, Throughput: 2125.736168 im/s
Epoch 4, Iter 39, Loss: 14570.8203125, Throughput: 2137.613137 im/s
Epoch 4, Train Loss: 15131.2021484, Time: 2.3356s, Throughput: 2137.328379 im/s
Epoch 5, Iter 39, Loss: 13666.5195312, Throughput: 2117.689601 im/s
Epoch 5, Train Loss: 14056.8427734, Time: 2.3575s, Throughput: 2117.492783 im/s
Epoch 6, Iter 39, Loss: 13660.4101562, Throughput: 2144.175021 im/s
Epoch 6, Train Loss: 13488.5273438, Time: 2.3284s, Throughput: 2143.921001 im/s
Epoch 7, Iter 39, Loss: 13200.5878906, Throughput: 2145.849061 im/s
Epoch 7, Train Loss: 12939.5117188, Time: 2.3265s, Throughput: 2145.683914 im/s
Epoch 8, Iter 39, Loss: 12785.8242188, Throughput: 2140.180037 im/s
Epoch 8, Train Loss: 12699.9560547, Time: 2.3327s, Throughput: 2139.970923 im/s
Epoch 9, Iter 39, Loss: 12512.6591797, Throughput: 2133.556665 im/s
Epoch 9, Train Loss: 12194.4003906, Time: 2.3400s, Throughput: 2133.291462 im/s
Epoch 10, Iter 39, Loss: 11931.3603516, Throughput: 2136.434665 im/s
Epoch 10, Train Loss: 11969.3125000, Time: 2.3369s, Throughput: 2136.138452 im/s
Epoch 11, Iter 39, Loss: 11969.0126953, Throughput: 2125.152329 im/s
Epoch 11, Train Loss: 11706.8593750, Time: 2.3492s, Throughput: 2124.942476 im/s
Epoch 12, Iter 39, Loss: 10892.3681641, Throughput: 2121.703966 im/s
Epoch 12, Train Loss: 11548.7705078, Time: 2.3531s, Throughput: 2121.489204 im/s
Epoch 13, Iter 39, Loss: 11890.1816406, Throughput: 2124.600502 im/s
Epoch 13, Train Loss: 11389.4414062, Time: 2.3499s, Throughput: 2124.317691 im/s
Epoch 14, Iter 39, Loss: 11148.0781250, Throughput: 2124.694717 im/s
Epoch 14, Train Loss: 11249.4951172, Time: 2.3499s, Throughput: 2124.366402 im/s
Epoch 15, Iter 39, Loss: 12428.2695312, Throughput: 2122.825344 im/s
Epoch 15, Train Loss: 11243.3144531, Time: 2.3519s, Throughput: 2122.563662 im/s
Epoch 16, Iter 39, Loss: 10584.3125000, Throughput: 2120.375678 im/s
Epoch 16, Train Loss: 11035.5458984, Time: 2.3546s, Throughput: 2120.112667 im/s
Epoch 17, Iter 39, Loss: 10781.1025391, Throughput: 2142.089318 im/s
Epoch 17, Train Loss: 10910.2470703, Time: 2.3307s, Throughput: 2141.831191 im/s
Epoch 18, Iter 39, Loss: 11188.6679688, Throughput: 2135.304528 im/s
Epoch 18, Train Loss: 10866.1191406, Time: 2.3380s, Throughput: 2135.141871 im/s
Epoch 19, Iter 39, Loss: 10977.6933594, Throughput: 2138.025025 im/s
Epoch 19, Train Loss: 10704.9384766, Time: 2.3351s, Throughput: 2137.834668 im/s
Epoch 20, Iter 39, Loss: 10515.7031250, Throughput: 2128.767307 im/s
Epoch 20, Train Loss: 10579.8818359, Time: 2.3453s, Throughput: 2128.532288 im/s
Epoch 21, Iter 39, Loss: 11011.9023438, Throughput: 2122.664583 im/s
Epoch 21, Train Loss: 10576.5283203, Time: 2.3521s, Throughput: 2122.364001 im/s
Epoch 22, Iter 39, Loss: 11570.4335938, Throughput: 2136.732923 im/s
Epoch 22, Train Loss: 10418.4853516, Time: 2.3379s, Throughput: 2135.204797 im/s
Epoch 23, Iter 39, Loss: 10536.6171875, Throughput: 2138.314555 im/s
Epoch 23, Train Loss: 10381.0732422, Time: 2.3348s, Throughput: 2138.047949 im/s
Epoch 24, Iter 39, Loss: 10173.6308594, Throughput: 2137.481332 im/s
Epoch 24, Train Loss: 10392.9023438, Time: 2.3357s, Throughput: 2137.269909 im/s
Epoch 25, Iter 39, Loss: 10634.0761719, Throughput: 2133.167360 im/s
Epoch 25, Train Loss: 10259.5888672, Time: 2.3404s, Throughput: 2132.956790 im/s
Epoch 26, Iter 39, Loss: 10298.3105469, Throughput: 2131.105827 im/s
Epoch 26, Train Loss: 10244.0332031, Time: 2.3427s, Throughput: 2130.914748 im/s
Epoch 27, Iter 39, Loss: 9792.1474609, Throughput: 2143.297733 im/s
Epoch 27, Train Loss: 10139.9082031, Time: 2.3294s, Throughput: 2143.053133 im/s
Epoch 28, Iter 39, Loss: 9947.4726562, Throughput: 2129.487621 im/s
Epoch 28, Train Loss: 10111.6171875, Time: 2.3445s, Throughput: 2129.251360 im/s
Epoch 29, Iter 39, Loss: 10051.9628906, Throughput: 2108.902414 im/s
Epoch 29, Train Loss: 10093.0546875, Time: 2.3674s, Throughput: 2108.648612 im/s
Epoch 30, Iter 39, Loss: 9121.6894531, Throughput: 2111.488295 im/s
Epoch 30, Train Loss: 9996.5156250, Time: 2.3645s, Throughput: 2111.189167 im/s
Epoch 31, Iter 39, Loss: 9374.0000000, Throughput: 2089.392535 im/s
Epoch 31, Train Loss: 9986.2109375, Time: 2.3895s, Throughput: 2089.119019 im/s
Epoch 32, Iter 39, Loss: 9205.7529297, Throughput: 2103.262923 im/s
Epoch 32, Train Loss: 9949.0781250, Time: 2.3738s, Throughput: 2102.980272 im/s
Epoch 33, Iter 39, Loss: 9994.6074219, Throughput: 2098.634960 im/s
Epoch 33, Train Loss: 9892.6269531, Time: 2.3789s, Throughput: 2098.424212 im/s
Epoch 34, Iter 39, Loss: 9604.3779297, Throughput: 2122.442742 im/s
Epoch 34, Train Loss: 9867.8066406, Time: 2.3523s, Throughput: 2122.188252 im/s
Epoch 35, Iter 39, Loss: 9663.6933594, Throughput: 2121.640758 im/s
Epoch 35, Train Loss: 9794.8916016, Time: 2.3532s, Throughput: 2121.364967 im/s
Epoch 36, Iter 39, Loss: 9755.2070312, Throughput: 2145.652471 im/s
Epoch 36, Train Loss: 9756.2968750, Time: 2.3268s, Throughput: 2145.459654 im/s
Epoch 37, Iter 39, Loss: 9758.8398438, Throughput: 2122.884102 im/s
Epoch 37, Train Loss: 9851.5722656, Time: 2.3520s, Throughput: 2122.434566 im/s
Epoch 38, Iter 39, Loss: 9855.1406250, Throughput: 2140.813313 im/s
Epoch 38, Train Loss: 9666.5595703, Time: 2.3321s, Throughput: 2140.575626 im/s
Epoch 39, Iter 39, Loss: 9814.4208984, Throughput: 2144.833953 im/s
Epoch 39, Train Loss: 9759.0839844, Time: 2.3278s, Throughput: 2144.551881 im/s
Epoch 40, Iter 39, Loss: 9452.7255859, Throughput: 2091.892307 im/s
Epoch 40, Train Loss: 9581.2265625, Time: 2.3866s, Throughput: 2091.636315 im/s
Epoch 41, Iter 39, Loss: 9793.8613281, Throughput: 2124.046591 im/s
Epoch 41, Train Loss: 9552.0205078, Time: 2.3504s, Throughput: 2123.857853 im/s
Epoch 42, Iter 39, Loss: 10476.1083984, Throughput: 2128.672082 im/s
Epoch 42, Train Loss: 9549.0957031, Time: 2.3454s, Throughput: 2128.421721 im/s
Epoch 43, Iter 39, Loss: 8788.9824219, Throughput: 2105.273878 im/s
Epoch 43, Train Loss: 9522.1318359, Time: 2.3715s, Throughput: 2105.004019 im/s
Epoch 44, Iter 39, Loss: 9386.2304688, Throughput: 2107.526884 im/s
Epoch 44, Train Loss: 9615.7519531, Time: 2.3689s, Throughput: 2107.294622 im/s
Epoch 45, Iter 39, Loss: 9548.5800781, Throughput: 2135.294293 im/s
Epoch 45, Train Loss: 9542.6269531, Time: 2.3406s, Throughput: 2132.780152 im/s
Epoch 46, Iter 39, Loss: 9409.0742188, Throughput: 2125.491892 im/s
Epoch 46, Train Loss: 9436.5380859, Time: 2.3489s, Throughput: 2125.243789 im/s
Epoch 47, Iter 39, Loss: 9575.2812500, Throughput: 2129.046975 im/s
Epoch 47, Train Loss: 9397.6328125, Time: 2.3450s, Throughput: 2128.790033 im/s
Epoch 48, Iter 39, Loss: 9519.2021484, Throughput: 2127.378503 im/s
Epoch 48, Train Loss: 9361.0732422, Time: 2.3469s, Throughput: 2127.083931 im/s
Epoch 49, Iter 39, Loss: 8571.0097656, Throughput: 2123.060827 im/s
Epoch 49, Train Loss: 9439.6943359, Time: 2.3516s, Throughput: 2122.849665 im/s
Epoch 50, Iter 39, Loss: 10437.0605469, Throughput: 2137.482204 im/s
Epoch 50, Train Loss: 9315.0498047, Time: 2.3357s, Throughput: 2137.302852 im/s
Appendix
julia
using InteractiveUtils
InteractiveUtils.versioninfo()
if @isdefined(MLDataDevices)
if @isdefined(CUDA) && MLDataDevices.functional(CUDADevice)
println()
CUDA.versioninfo()
end
if @isdefined(AMDGPU) && MLDataDevices.functional(AMDGPUDevice)
println()
AMDGPU.versioninfo()
end
end
Julia Version 1.11.6
Commit 9615af0f269 (2025-07-09 12:58 UTC)
Build Info:
Official https://julialang.org/ release
Platform Info:
OS: Linux (x86_64-linux-gnu)
CPU: 48 × AMD EPYC 7402 24-Core Processor
WORD_SIZE: 64
LLVM: libLLVM-16.0.6 (ORCJIT, znver2)
Threads: 48 default, 0 interactive, 24 GC (on 2 virtual cores)
Environment:
JULIA_CPU_THREADS = 2
LD_LIBRARY_PATH = /usr/local/nvidia/lib:/usr/local/nvidia/lib64
JULIA_PKG_SERVER =
JULIA_NUM_THREADS = 48
JULIA_CUDA_HARD_MEMORY_LIMIT = 100%
JULIA_PKG_PRECOMPILE_AUTO = 0
JULIA_DEBUG = Literate
JULIA_DEPOT_PATH = /root/.cache/julia-buildkite-plugin/depots/01872db4-8c79-43af-ab7d-12abac4f24f6
This page was generated using Literate.jl.