tiny-sd3-pipe / README.md
sayakpaul's picture
sayakpaul HF staff
Update README.md
309fbef verified
metadata
library_name: diffusers

Pipeline generated with

import torch 
from diffusers import AutoencoderKL, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler, StableDiffusion3Pipeline
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, T5EncoderModel, CLIPTokenizer, AutoTokenizer

def get_dummy_components():
    torch.manual_seed(0)
    transformer = SD3Transformer2DModel(
        sample_size=32,
        patch_size=1,
        in_channels=4,
        num_layers=1,
        attention_head_dim=8,
        num_attention_heads=4,
        caption_projection_dim=32,
        joint_attention_dim=32,
        pooled_projection_dim=64,
        out_channels=4,
    )
    clip_text_encoder_config = CLIPTextConfig(
        bos_token_id=0,
        eos_token_id=2,
        hidden_size=32,
        intermediate_size=37,
        layer_norm_eps=1e-05,
        num_attention_heads=4,
        num_hidden_layers=5,
        pad_token_id=1,
        vocab_size=1000,
        hidden_act="gelu",
        projection_dim=32,
    )

    torch.manual_seed(0)
    text_encoder = CLIPTextModelWithProjection(clip_text_encoder_config)

    torch.manual_seed(0)
    text_encoder_2 = CLIPTextModelWithProjection(clip_text_encoder_config)

    text_encoder_3 = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5")

    tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
    tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
    tokenizer_3 = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5")

    torch.manual_seed(0)
    vae = AutoencoderKL(
        sample_size=32,
        in_channels=3,
        out_channels=3,
        block_out_channels=(4,),
        layers_per_block=1,
        latent_channels=4,
        norm_num_groups=1,
        use_quant_conv=False,
        use_post_quant_conv=False,
        shift_factor=0.0609,
        scaling_factor=1.5035,
    )

    scheduler = FlowMatchEulerDiscreteScheduler()

    return {
        "scheduler": scheduler,
        "text_encoder": text_encoder,
        "text_encoder_2": text_encoder_2,
        "text_encoder_3": text_encoder_3,
        "tokenizer": tokenizer,
        "tokenizer_2": tokenizer_2,
        "tokenizer_3": tokenizer_3,
        "transformer": transformer,
        "vae": vae,
    }


if __name__ == "__main__":
    components = get_dummy_components()
    pipeline = StableDiffusion3Pipeline(**components)
    pipeline.push_to_hub("hf-internal-testing/tiny-sd3-pipe")