This tiny model is for debugging. It is randomly initialized with the config adapted from black-forest-labs/FLUX.2-dev.

File size:

  • 2MB text_encoder/model.safetensors
  • 0.9MB transformer/diffusion_pytorch_model.safetensors
  • 0.5MB vae/diffusion_pytorch_model.safetensors

Example usage:

import io

import requests
import torch
from diffusers import Flux2Pipeline
from diffusers.utils import load_image
from huggingface_hub import get_token

model_id = "yujiepan/flux2-tiny-random"
device = "cuda:0"
torch_dtype = torch.bfloat16

pipe = Flux2Pipeline.from_pretrained(
    model_id, torch_dtype=torch_dtype
).to(device)

prompt = "Realistic macro photograph of a hermit crab using a soda can as its shell"
cat_image = load_image(
    "https://huggingface.co/spaces/zerogpu-aoti/FLUX.1-Kontext-Dev-fp8-dynamic/resolve/main/cat.png")
image = pipe(
    prompt=prompt,
    image=[cat_image],  # optional multi-image input
    generator=torch.Generator(device=device).manual_seed(42),
    num_inference_steps=4,
    guidance_scale=4,
    text_encoder_out_layers=(1,),
).images[0]
print(image)

Codes to create this repo:

import json

import torch
from diffusers import (
    AutoencoderKLFlux2,
    FlowMatchEulerDiscreteScheduler,
    Flux2Pipeline,
    Flux2Transformer2DModel,
)
from huggingface_hub import hf_hub_download
from transformers import (
    AutoConfig,
    AutoTokenizer,
    Mistral3ForConditionalGeneration,
    PixtralProcessor,
)
from transformers.generation import GenerationConfig

source_model_id = "black-forest-labs/FLUX.2-dev"
save_folder = "/tmp/yujiepan/flux2-tiny-random"

torch.set_default_dtype(torch.bfloat16)
scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
    source_model_id, subfolder='scheduler')
tokenizer = PixtralProcessor.from_pretrained(
    source_model_id, subfolder='tokenizer')

def save_json(path, obj):
    import json
    from pathlib import Path
    Path(path).parent.mkdir(parents=True, exist_ok=True)
    with open(path, 'w', encoding='utf-8') as f:
        json.dump(obj, f, indent=2, ensure_ascii=False)

def init_weights(model):
    import torch
    from transformers import set_seed
    set_seed(42)
    model = model.cpu()
    with torch.no_grad():
        for name, p in sorted(model.named_parameters()):
            torch.nn.init.normal_(p, 0, 0.1)
            print(name, p.shape, p.dtype, p.device)

with open(hf_hub_download(source_model_id, filename='text_encoder/config.json', repo_type='model'), 'r', encoding='utf - 8') as f:
    config = json.load(f)
    config['text_config'].update({
        'hidden_size': 8,
        'intermediate_size': 64,
        "head_dim": 32,
        'num_attention_heads': 8,
        'num_hidden_layers': 2,
        'num_key_value_heads': 4,
        'tie_word_embeddings': True,
    })
    config['vision_config'].update(
        {
            "head_dim": 32,
            "hidden_size": 32,
            "intermediate_size": 64,
            "num_attention_heads": 1,
            "num_hidden_layers": 2,
        }
    )
    save_json(f'{save_folder}/text_encoder/config.json', config)
    text_encoder_config = AutoConfig.from_pretrained(
        f'{save_folder}/text_encoder')
    text_encoder = Mistral3ForConditionalGeneration(
        text_encoder_config).to(torch.bfloat16)
    generation_config = GenerationConfig.from_pretrained(
        source_model_id, subfolder='text_encoder')
    # text_encoder.config.generation_config = generation_config
    text_encoder.generation_config = generation_config
    init_weights(text_encoder)

with open(hf_hub_download(source_model_id, filename='transformer/config.json', repo_type='model'), 'r', encoding='utf-8') as f:
    config = json.load(f)
    config.update({
        'attention_head_dim': 32,
        "in_channels": 32,
        'axes_dims_rope': [8, 12, 12],
        'joint_attention_dim': 8,
        'num_attention_heads': 2,
        'num_layers': 2,
        'num_single_layers': 2,
    })
    save_json(f'{save_folder}/transformer/config.json', config)
    transformer_config = Flux2Transformer2DModel.load_config(
        f'{save_folder}/transformer')
    transformer = Flux2Transformer2DModel.from_config(transformer_config)
    init_weights(transformer)

with open(hf_hub_download(source_model_id, filename='vae/config.json', repo_type='model'), 'r', encoding='utf-8') as f:
    config = json.load(f)
    config.update({
        'layers_per_block': 1,
        'block_out_channels': [32, 32],
        'latent_channels': 8,
        'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
        'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D']
    })
    save_json(f'{save_folder}/vae/config.json', config)
    vae_config = AutoencoderKLFlux2.load_config(f'{save_folder}/vae')
    vae = AutoencoderKLFlux2.from_config(vae_config)
    init_weights(vae)

pipeline = Flux2Pipeline(
    scheduler=scheduler,
    text_encoder=text_encoder,
    tokenizer=tokenizer,
    transformer=transformer,
    vae=vae,
)
pipeline = pipeline.to(torch.bfloat16)
pipeline.save_pretrained(save_folder, safe_serialization=True)
print(pipeline)
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. 🙋 Ask for provider support

Model tree for yujiepan/flux2-tiny-random

Finetuned
(7)
this model

Collection including yujiepan/flux2-tiny-random