Convolutional VAE for MNIST
Convolutional variational autoencoder (CVAE) implementation in MLX using MNIST. This is based on the CVAE implementation in MLX.
julia
using Lux,
Reactant,
MLDatasets,
Random,
Statistics,
Enzyme,
MLUtils,
DataAugmentation,
ConcreteStructs,
OneHotArrays,
ImageShow,
Images,
Printf,
Optimisers
const xdev = reactant_device(; force=true)
const cdev = cpu_device()
const IN_VSCODE = isdefined(Main, :VSCodeServer)
false
Model Definition
First we will define the encoder.It maps the input to a normal distribution in latent space and sample a latent vector from that distribution.
julia
function cvae_encoder(
rng=Random.default_rng();
num_latent_dims::Int,
image_shape::Dims{3},
max_num_filters::Int,
)
flattened_dim = prod(image_shape[1:2] .÷ 8) * max_num_filters
return @compact(;
embed=Chain(
Chain(
Conv((3, 3), image_shape[3] => max_num_filters ÷ 4; stride=2, pad=1),
BatchNorm(max_num_filters ÷ 4, leakyrelu),
),
Chain(
Conv((3, 3), max_num_filters ÷ 4 => max_num_filters ÷ 2; stride=2, pad=1),
BatchNorm(max_num_filters ÷ 2, leakyrelu),
),
Chain(
Conv((3, 3), max_num_filters ÷ 2 => max_num_filters; stride=2, pad=1),
BatchNorm(max_num_filters, leakyrelu),
),
FlattenLayer(),
),
proj_mu=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
proj_log_var=Dense(flattened_dim, num_latent_dims; init_bias=zeros32),
rng
) do x
y = embed(x)
μ = proj_mu(y)
logσ² = proj_log_var(y)
T = eltype(logσ²)
logσ² = clamp.(logσ², -T(20.0f0), T(10.0f0))
σ = exp.(logσ² .* T(0.5))
# Generate a tensor of random values from a normal distribution
ϵ = randn_like(Lux.replicate(rng), σ)
# Reparameterization trick to backpropagate through sampling
z = ϵ .* σ .+ μ
@return z, μ, logσ²
end
end
Similarly we define the decoder.
julia
function cvae_decoder(; num_latent_dims::Int, image_shape::Dims{3}, max_num_filters::Int)
flattened_dim = prod(image_shape[1:2] .÷ 8) * max_num_filters
return @compact(;
linear=Dense(num_latent_dims, flattened_dim),
upchain=Chain(
Chain(
Upsample(2),
Conv((3, 3), max_num_filters => max_num_filters ÷ 2; stride=1, pad=1),
BatchNorm(max_num_filters ÷ 2, leakyrelu),
),
Chain(
Upsample(2),
Conv((3, 3), max_num_filters ÷ 2 => max_num_filters ÷ 4; stride=1, pad=1),
BatchNorm(max_num_filters ÷ 4, leakyrelu),
),
Chain(
Upsample(2),
Conv(
(3, 3), max_num_filters ÷ 4 => image_shape[3], sigmoid; stride=1, pad=1
),
),
),
max_num_filters
) do x
y = linear(x)
img = reshape(y, image_shape[1] ÷ 8, image_shape[2] ÷ 8, max_num_filters, :)
@return upchain(img)
end
end
@concrete struct CVAE <: AbstractLuxContainerLayer{(:encoder, :decoder)}
encoder <: AbstractLuxLayer
decoder <: AbstractLuxLayer
end
function CVAE(
rng=Random.default_rng();
num_latent_dims::Int,
image_shape::Dims{3},
max_num_filters::Int,
)
decoder = cvae_decoder(; num_latent_dims, image_shape, max_num_filters)
encoder = cvae_encoder(rng; num_latent_dims, image_shape, max_num_filters)
return CVAE(encoder, decoder)
end
function (cvae::CVAE)(x, ps, st)
(z, μ, logσ²), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
return (x_rec, μ, logσ²), (; encoder=st_enc, decoder=st_dec)
end
function encode(cvae::CVAE, x, ps, st)
(z, _, _), st_enc = cvae.encoder(x, ps.encoder, st.encoder)
return z, (; encoder=st_enc, st.decoder)
end
function decode(cvae::CVAE, z, ps, st)
x_rec, st_dec = cvae.decoder(z, ps.decoder, st.decoder)
return x_rec, (; decoder=st_dec, st.encoder)
end
Loading MNIST
julia
@concrete struct TensorDataset
dataset
transform
total_samples::Int
end
Base.length(ds::TensorDataset) = ds.total_samples
function Base.getindex(ds::TensorDataset, idxs::Union{Vector{<:Integer},AbstractRange})
img = Image.(eachslice(convert2image(ds.dataset, idxs); dims=3))
return stack(parent ∘ itemdata ∘ Base.Fix1(apply, ds.transform), img)
end
function loadmnist(batchsize, image_size::Dims{2})
# Load MNIST: Only 1500 for demonstration purposes on CI
train_dataset = MNIST(; split=:train)
N = parse(Bool, get(ENV, "CI", "false")) ? 5000 : length(train_dataset)
train_transform = ScaleKeepAspect(image_size) |> ImageToTensor()
trainset = TensorDataset(train_dataset, train_transform, N)
trainloader = DataLoader(trainset; batchsize, shuffle=true, partial=false)
return trainloader
end
Helper Functions
Generate an Image Grid from a list of images
julia
function create_image_grid(imgs::AbstractArray, grid_rows::Int, grid_cols::Int)
total_images = grid_rows * grid_cols
imgs = map(eachslice(imgs[:, :, :, 1:total_images]; dims=4)) do img
cimg = if size(img, 3) == 1
colorview(Gray, view(img, :, :, 1))
else
colorview(RGB, permutedims(img, (3, 1, 2)))
end
return cimg'
end
return create_image_grid(imgs, grid_rows, grid_cols)
end
function create_image_grid(images::Vector, grid_rows::Int, grid_cols::Int)
# Check if the number of images matches the grid
total_images = grid_rows * grid_cols
@assert length(images) == total_images
# Get the size of a single image (assuming all images are the same size)
img_height, img_width = size(images[1])
# Create a blank grid canvas
grid_height = img_height * grid_rows
grid_width = img_width * grid_cols
grid_canvas = similar(images[1], grid_height, grid_width)
# Place each image in the correct position on the canvas
for idx in 1:total_images
row = div(idx - 1, grid_cols) + 1
col = mod(idx - 1, grid_cols) + 1
start_row = (row - 1) * img_height + 1
start_col = (col - 1) * img_width + 1
grid_canvas[start_row:(start_row + img_height - 1), start_col:(start_col + img_width - 1)] .= images[idx]
end
return grid_canvas
end
function loss_function(model, ps, st, X)
(y, μ, logσ²), st = model(X, ps, st)
reconstruction_loss = MSELoss(; agg=sum)(y, X)
kldiv_loss = -sum(1 .+ logσ² .- μ .^ 2 .- exp.(logσ²)) / 2
loss = reconstruction_loss + kldiv_loss
return loss, st, (; y, μ, logσ², reconstruction_loss, kldiv_loss)
end
function generate_images(
model, ps, st; num_samples::Int=128, num_latent_dims::Int, decode_compiled=nothing
)
z = get_device((ps, st))(randn(Float32, num_latent_dims, num_samples))
if decode_compiled === nothing
images, _ = decode(model, z, ps, Lux.testmode(st))
else
images, _ = decode_compiled(model, z, ps, Lux.testmode(st))
images = cpu_device()(images)
end
return create_image_grid(images, 8, num_samples ÷ 8)
end
function reconstruct_images(model, ps, st, X)
(recon, _, _), _ = model(X, ps, Lux.testmode(st))
recon = cpu_device()(recon)
return create_image_grid(recon, 8, size(X, ndims(X)) ÷ 8)
end
reconstruct_images (generic function with 1 method)
Training the Model
julia
function main(;
batchsize=128,
image_size=(64, 64),
num_latent_dims=8,
max_num_filters=64,
seed=0,
epochs=50,
weight_decay=1.0e-5,
learning_rate=1.0e-3,
num_samples=batchsize,
)
rng = Xoshiro()
Random.seed!(rng, seed)
cvae = CVAE(rng; num_latent_dims, image_shape=(image_size..., 1), max_num_filters)
ps, st = xdev(Lux.setup(rng, cvae))
z = xdev(randn(Float32, num_latent_dims, num_samples))
decode_compiled = Reactant.with_config(;
dot_general_precision=PrecisionConfig.HIGH,
convolution_precision=PrecisionConfig.HIGH,
) do
@compile decode(cvae, z, ps, Lux.testmode(st))
end
x = xdev(randn(Float32, image_size..., 1, batchsize))
cvae_compiled = Reactant.with_config(;
dot_general_precision=PrecisionConfig.HIGH,
convolution_precision=PrecisionConfig.HIGH,
) do
@compile cvae(x, ps, Lux.testmode(st))
end
train_dataloader = xdev(loadmnist(batchsize, image_size))
opt = AdamW(; eta=learning_rate, lambda=weight_decay)
train_state = Training.TrainState(cvae, ps, st, opt)
@printf "Total Trainable Parameters: %0.4f M\n" (Lux.parameterlength(ps) / 1.0e6)
empty_row, model_img_full = nothing, nothing
for epoch in 1:epochs
loss_total = 0.0f0
total_samples = 0
start_time = time()
for (i, X) in enumerate(train_dataloader)
(_, loss, _, train_state) = Training.single_train_step!(
AutoEnzyme(), loss_function, X, train_state; return_gradients=Val(false)
)
loss_total += loss
total_samples += size(X, ndims(X))
if i % 250 == 0 || i == length(train_dataloader)
throughput = total_samples / (time() - start_time)
@printf "Epoch %d, Iter %d, Loss: %.7f, Throughput: %.6f im/s\n" epoch i loss throughput
end
end
total_time = time() - start_time
train_loss = loss_total / length(train_dataloader)
throughput = total_samples / total_time
@printf "Epoch %d, Train Loss: %.7f, Time: %.4fs, Throughput: %.6f im/s\n" epoch train_loss total_time throughput
if IN_VSCODE || epoch == epochs
recon_images = reconstruct_images(
cvae_compiled,
train_state.parameters,
train_state.states,
first(train_dataloader),
)
gen_images = generate_images(
cvae,
train_state.parameters,
train_state.states;
num_samples,
num_latent_dims,
decode_compiled,
)
if empty_row === nothing
empty_row = similar(gen_images, image_size[1], size(gen_images, 2))
fill!(empty_row, 0)
end
model_img_full = vcat(recon_images, empty_row, gen_images)
IN_VSCODE && display(model_img_full)
end
end
return model_img_full
end
img = main()
WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
I0000 00:00:1760388315.128891 2927840 service.cc:158] XLA service 0x3b8e8f0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
I0000 00:00:1760388315.128951 2927840 service.cc:166] StreamExecutor device (0): NVIDIA A100-PCIE-40GB MIG 1g.5gb, Compute Capability 8.0
I0000 00:00:1760388315.129738 2927840 se_gpu_pjrt_client.cc:1339] Using BFC allocator.
I0000 00:00:1760388315.129785 2927840 gpu_helpers.cc:136] XLA backend allocating 3825205248 bytes on device 0 for BFCAllocator.
I0000 00:00:1760388315.129835 2927840 gpu_helpers.cc:177] XLA backend will use up to 1275068416 bytes on device 0 for CollectiveBFCAllocator.
I0000 00:00:1760388315.142064 2927840 cuda_dnn.cc:463] Loaded cuDNN version 91200
Total Trainable Parameters: 0.1493 M
┌ Warning: `training` is set to `Val{true}()` but is not being used within an autodiff call (gradient, jacobian, etc...). This will be slow. If you are using a `Lux.jl` model, set it to inference (test) mode using `LuxCore.testmode`. Reliance on this behavior is discouraged, and is not guaranteed by Semantic Versioning, and might be removed without a deprecation cycle. It is recommended to fix this issue in your code.
└ @ LuxLib.Utils /var/lib/buildkite-agent/builds/gpuci-10/julialang/lux-dot-jl/lib/LuxLib/src/utils.jl:334
Epoch 1, Iter 39, Loss: 24320.4042969, Throughput: 49.691621 im/s
Epoch 1, Train Loss: 39766.1835938, Time: 100.9143s, Throughput: 49.467713 im/s
Epoch 2, Iter 39, Loss: 17969.5566406, Throughput: 1838.242761 im/s
Epoch 2, Train Loss: 20105.5253906, Time: 2.7159s, Throughput: 1838.032820 im/s
Epoch 3, Iter 39, Loss: 15397.4375000, Throughput: 1862.959814 im/s
Epoch 3, Train Loss: 16569.3652344, Time: 2.6799s, Throughput: 1862.724137 im/s
Epoch 4, Iter 39, Loss: 14519.0126953, Throughput: 1879.714045 im/s
Epoch 4, Train Loss: 15061.7822266, Time: 2.6561s, Throughput: 1879.431596 im/s
Epoch 5, Iter 39, Loss: 12885.1621094, Throughput: 1877.542355 im/s
Epoch 5, Train Loss: 14141.6542969, Time: 2.6592s, Throughput: 1877.281766 im/s
Epoch 6, Iter 39, Loss: 13919.7705078, Throughput: 1883.877415 im/s
Epoch 6, Train Loss: 13481.8417969, Time: 2.6502s, Throughput: 1883.617268 im/s
Epoch 7, Iter 39, Loss: 13095.1250000, Throughput: 1879.863402 im/s
Epoch 7, Train Loss: 12918.6923828, Time: 2.6560s, Throughput: 1879.528604 im/s
Epoch 8, Iter 39, Loss: 12809.9335938, Throughput: 1877.249955 im/s
Epoch 8, Train Loss: 12568.8203125, Time: 2.6596s, Throughput: 1876.943513 im/s
Epoch 9, Iter 39, Loss: 12642.7041016, Throughput: 1873.378747 im/s
Epoch 9, Train Loss: 12231.4003906, Time: 2.6651s, Throughput: 1873.071724 im/s
Epoch 10, Iter 39, Loss: 11866.3896484, Throughput: 1882.342992 im/s
Epoch 10, Train Loss: 12029.3818359, Time: 2.6524s, Throughput: 1882.044866 im/s
Epoch 11, Iter 39, Loss: 12329.8447266, Throughput: 1881.527853 im/s
Epoch 11, Train Loss: 11729.1865234, Time: 2.6536s, Throughput: 1881.244859 im/s
Epoch 12, Iter 39, Loss: 11806.0390625, Throughput: 1885.666325 im/s
Epoch 12, Train Loss: 11620.2832031, Time: 2.6478s, Throughput: 1885.354412 im/s
Epoch 13, Iter 39, Loss: 10813.1914062, Throughput: 1880.931704 im/s
Epoch 13, Train Loss: 11404.0517578, Time: 2.6544s, Throughput: 1880.622032 im/s
Epoch 14, Iter 39, Loss: 11351.7285156, Throughput: 1882.727041 im/s
Epoch 14, Train Loss: 11245.6455078, Time: 2.6519s, Throughput: 1882.440301 im/s
Epoch 15, Iter 39, Loss: 10681.4492188, Throughput: 1887.715079 im/s
Epoch 15, Train Loss: 11158.2314453, Time: 2.6449s, Throughput: 1887.415930 im/s
Epoch 16, Iter 39, Loss: 11940.0820312, Throughput: 1883.570330 im/s
Epoch 16, Train Loss: 11046.5341797, Time: 2.6506s, Throughput: 1883.321618 im/s
Epoch 17, Iter 39, Loss: 10395.8466797, Throughput: 1885.825971 im/s
Epoch 17, Train Loss: 10866.1289062, Time: 2.6476s, Throughput: 1885.512478 im/s
Epoch 18, Iter 39, Loss: 11629.0058594, Throughput: 1876.236099 im/s
Epoch 18, Train Loss: 10782.0419922, Time: 2.6609s, Throughput: 1876.052689 im/s
Epoch 19, Iter 39, Loss: 11116.1015625, Throughput: 1879.692276 im/s
Epoch 19, Train Loss: 10695.6855469, Time: 2.6561s, Throughput: 1879.477147 im/s
Epoch 20, Iter 39, Loss: 10637.7207031, Throughput: 1882.155003 im/s
Epoch 20, Train Loss: 10704.9306641, Time: 2.6525s, Throughput: 1881.976862 im/s
Epoch 21, Iter 39, Loss: 11380.5683594, Throughput: 1890.793754 im/s
Epoch 21, Train Loss: 10558.8369141, Time: 2.6404s, Throughput: 1890.609024 im/s
Epoch 22, Iter 39, Loss: 9980.1835938, Throughput: 1884.298379 im/s
Epoch 22, Train Loss: 10469.0078125, Time: 2.6495s, Throughput: 1884.120510 im/s
Epoch 23, Iter 39, Loss: 10823.9414062, Throughput: 1890.191550 im/s
Epoch 23, Train Loss: 10410.6367188, Time: 2.6413s, Throughput: 1889.996189 im/s
Epoch 24, Iter 39, Loss: 10122.3359375, Throughput: 1880.793496 im/s
Epoch 24, Train Loss: 10464.9111328, Time: 2.6546s, Throughput: 1880.543320 im/s
Epoch 25, Iter 39, Loss: 10371.3300781, Throughput: 1887.400107 im/s
Epoch 25, Train Loss: 10251.6337891, Time: 2.6452s, Throughput: 1887.179638 im/s
Epoch 26, Iter 39, Loss: 10408.0947266, Throughput: 1889.064983 im/s
Epoch 26, Train Loss: 10202.6376953, Time: 2.6429s, Throughput: 1888.819248 im/s
Epoch 27, Iter 39, Loss: 9803.3544922, Throughput: 1885.359505 im/s
Epoch 27, Train Loss: 10145.0009766, Time: 2.6481s, Throughput: 1885.155807 im/s
Epoch 28, Iter 39, Loss: 9735.6494141, Throughput: 1890.408456 im/s
Epoch 28, Train Loss: 10114.7568359, Time: 2.6410s, Throughput: 1890.172268 im/s
Epoch 29, Iter 39, Loss: 10351.3847656, Throughput: 1879.043326 im/s
Epoch 29, Train Loss: 10174.0498047, Time: 2.6571s, Throughput: 1878.764788 im/s
Epoch 30, Iter 39, Loss: 10683.1894531, Throughput: 1883.806397 im/s
Epoch 30, Train Loss: 9999.1777344, Time: 2.6503s, Throughput: 1883.576769 im/s
Epoch 31, Iter 39, Loss: 10395.9277344, Throughput: 1871.338732 im/s
Epoch 31, Train Loss: 9921.5615234, Time: 2.6680s, Throughput: 1871.090397 im/s
Epoch 32, Iter 39, Loss: 10500.8750000, Throughput: 1876.395330 im/s
Epoch 32, Train Loss: 9926.8867188, Time: 2.6608s, Throughput: 1876.140775 im/s
Epoch 33, Iter 39, Loss: 10173.0703125, Throughput: 1862.904785 im/s
Epoch 33, Train Loss: 9924.2558594, Time: 2.6800s, Throughput: 1862.683041 im/s
Epoch 34, Iter 39, Loss: 10270.1210938, Throughput: 1877.766809 im/s
Epoch 34, Train Loss: 9907.3417969, Time: 2.6588s, Throughput: 1877.508346 im/s
Epoch 35, Iter 39, Loss: 9953.6669922, Throughput: 1876.526501 im/s
Epoch 35, Train Loss: 9792.7734375, Time: 2.6606s, Throughput: 1876.279645 im/s
Epoch 36, Iter 39, Loss: 9761.8662109, Throughput: 1883.775042 im/s
Epoch 36, Train Loss: 9719.7958984, Time: 2.6503s, Throughput: 1883.533392 im/s
Epoch 37, Iter 39, Loss: 10040.1962891, Throughput: 1882.383607 im/s
Epoch 37, Train Loss: 9743.4443359, Time: 2.6523s, Throughput: 1882.170568 im/s
Epoch 38, Iter 39, Loss: 9430.0693359, Throughput: 1888.549217 im/s
Epoch 38, Train Loss: 9666.2382812, Time: 2.6436s, Throughput: 1888.311279 im/s
Epoch 39, Iter 39, Loss: 9567.1738281, Throughput: 1854.210496 im/s
Epoch 39, Train Loss: 9603.4873047, Time: 2.6926s, Throughput: 1853.949612 im/s
Epoch 40, Iter 39, Loss: 9339.3291016, Throughput: 1878.506050 im/s
Epoch 40, Train Loss: 9568.0390625, Time: 2.6578s, Throughput: 1878.265076 im/s
Epoch 41, Iter 39, Loss: 9838.9951172, Throughput: 1858.307055 im/s
Epoch 41, Train Loss: 9659.6523438, Time: 2.6867s, Throughput: 1858.046337 im/s
Epoch 42, Iter 39, Loss: 9967.7011719, Throughput: 1869.863900 im/s
Epoch 42, Train Loss: 9652.2792969, Time: 2.6702s, Throughput: 1869.538330 im/s
Epoch 43, Iter 39, Loss: 9135.7558594, Throughput: 1878.676961 im/s
Epoch 43, Train Loss: 9558.3515625, Time: 2.6576s, Throughput: 1878.411844 im/s
Epoch 44, Iter 39, Loss: 9459.3417969, Throughput: 1889.027147 im/s
Epoch 44, Train Loss: 9502.3789062, Time: 2.6430s, Throughput: 1888.749901 im/s
Epoch 45, Iter 39, Loss: 9424.4492188, Throughput: 1871.960446 im/s
Epoch 45, Train Loss: 9501.8750000, Time: 2.6671s, Throughput: 1871.701571 im/s
Epoch 46, Iter 39, Loss: 9629.8046875, Throughput: 1885.235584 im/s
Epoch 46, Train Loss: 9436.7304688, Time: 2.6483s, Throughput: 1885.012735 im/s
Epoch 47, Iter 39, Loss: 8997.0751953, Throughput: 1878.666510 im/s
Epoch 47, Train Loss: 9451.2978516, Time: 2.6575s, Throughput: 1878.456502 im/s
Epoch 48, Iter 39, Loss: 9560.6386719, Throughput: 1884.994916 im/s
Epoch 48, Train Loss: 9353.5917969, Time: 2.6486s, Throughput: 1884.759400 im/s
Epoch 49, Iter 39, Loss: 9440.8046875, Throughput: 1868.292861 im/s
Epoch 49, Train Loss: 9381.7900391, Time: 2.6723s, Throughput: 1868.081666 im/s
Epoch 50, Iter 39, Loss: 9808.9218750, Throughput: 1884.377405 im/s
Epoch 50, Train Loss: 9369.7177734, Time: 2.6495s, Throughput: 1884.131191 im/s
Appendix
julia
using InteractiveUtils
InteractiveUtils.versioninfo()
if @isdefined(MLDataDevices)
if @isdefined(CUDA) && MLDataDevices.functional(CUDADevice)
println()
CUDA.versioninfo()
end
if @isdefined(AMDGPU) && MLDataDevices.functional(AMDGPUDevice)
println()
AMDGPU.versioninfo()
end
end
Julia Version 1.11.7
Commit f2b3dbda30a (2025-09-08 12:10 UTC)
Build Info:
Official https://julialang.org/ release
Platform Info:
OS: Linux (x86_64-linux-gnu)
CPU: 48 × AMD EPYC 7402 24-Core Processor
WORD_SIZE: 64
LLVM: libLLVM-16.0.6 (ORCJIT, znver2)
Threads: 48 default, 0 interactive, 24 GC (on 2 virtual cores)
Environment:
JULIA_CPU_THREADS = 2
JULIA_DEPOT_PATH = /root/.cache/julia-buildkite-plugin/depots/01872db4-8c79-43af-ab7d-12abac4f24f6
LD_LIBRARY_PATH = /usr/local/nvidia/lib:/usr/local/nvidia/lib64
JULIA_PKG_SERVER =
JULIA_NUM_THREADS = 48
JULIA_CUDA_HARD_MEMORY_LIMIT = 100%
JULIA_PKG_PRECOMPILE_AUTO = 0
JULIA_DEBUG = Literate
This page was generated using Literate.jl.