import gradio as gr import torch from PIL import Image from diffusers import DiffusionPipeline import numpy as np multi_view_diffusion_pipeline = DiffusionPipeline.from_pretrained( "2gnak/multi-view-diffusion-demo", custom_pipeline="dylanebert/multi-view-diffusion", torch_dtype=torch.float16, trust_remote_code=True, ).to("cpu") def run(image): image = np.array(image, dtype=np.float32) / 255.0 images = multi_view_diffusion_pipeline("", image, guidance_scale=5, num_inference_steps=30, elevation=0) images = [Image.fromarray((img * 255).astype("uint8")) for img in images] width, height = images[0].size grid_img = Image.new("RGB", (2 * width, 2 * height)) grid_img.paste(images[0], (0, 0)) grid_img.paste(images[1], (width, 0)) grid_img.paste(images[2], (0, height)) grid_img.paste(images[3], (width, height)) return grid_img demo = gr.Interface(fn=run, inputs="image", outputs="image") demo.launch()