import subprocess import os import gradio as gr import torch if torch.cuda.is_available(): device = "cuda" print("Using GPU") else: device = "cpu" print("Using CPU") subprocess.run(["git", "clone", "https://github.com/Nick088Official/Stable_Diffusion_Finetuned_Minecraft_Skin_Generator.git"]) os.chdir("Stable_Diffusion_Finetuned_Minecraft_Skin_Generator") def run_inference(prompt, stable_diffusion_model, num_inference_steps, guidance_scale, num_images_per_prompt, model_precision_type, output_image_name, verbose): if stable_diffusion_model == '2': sd_model = "minecraft-skins" else: sd_model = "minecraft-skins-sdxl" command = f"Python_Scripts/{sd_model}.py '{prompt}' {num_inference_steps} {guidance_scale} {num_images_per_prompt} {model_precision_type} {output_image_name} {'--verbose' if verbose else ''}" subprocess.run(["python", command], shell=True, check=True) return output_image_name prompt = gr.Textbox(label="Prompt", interactive=True) stable_diffusion_model = gr.Dropdown(["2", "xl"], interactive=True, label="Stable Diffusion Model", value="xl", info="Choose which Stable Diffusion Model to use, xl understands prompts better") num_inference_steps = gr.Number(value=50, minimum=1, interactive=True, label="Inference Steps",) guidance_scale = gr.Number(value=7.5, minimum=0.1, interactive=True, label="Guidance Scale", info="How closely the generated image adheres to the prompt") num_images_per_prompt = gr.Number(value=1, minimum=1, interactive=True, label="Images Per Prompt", info="The number of images to make with the prompt") model_precision_type = gr.Dropdown(["fp16", "fp32"], value="fp16", interactive=True, label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results") seed = gr.Number(value=42, interactive=True, label="Seed", info="A starting point to initiate the generation process, put 0 for a random one") output_image_name = gr.Textbox(label="Name of Generated Skin Output", interactive=True, value="output.png") verbose = gr.Checkbox(label="Verbose Output", interactive=True, value=False, info="Produce verbose output while running") # Define Gradio UI components prompt_input = gr.Textbox(label="Your Prompt", info="What the Minecraft Skin should look like") stable_diffusion_model_input = gr.Dropdown(['2', 'xl'], label="Stable Diffusion Model", info="Choose which Stable Diffusion Model to use, xl understands prompts better") num_inference_steps_input = gr.Number(label="Number of Inference Steps", precision=0, value=25) guidance_scale_input = gr.Number(minimum=0.1, value=7.5, label="Guidance Scale", info="The number of denoising steps of the image. More denoising steps usually lead to a higher quality image at the cost of slower inference") num_images_per_prompt_input = gr.inputs.Number(minimum=1, value=1, precision=0, label="Number of Images per Prompt", info="The number of images to make with the prompt") model_precision_type_input = gr.Dropdown(["fp16", "fp32"], value="fp16", label="Model Precision Type", info="The precision type to load the model, like fp16 which is faster, or fp32 which gives better results") output_image_name_input = gr.Textbox(label="Output Image Name", info="The name of the file of the output image skin, keep the .png", value="output-skin.png") verbose_input = gr.Checkbox(label="Verbose Output", info="Produce more detailed output while running", value=False) examples = [ [ "A man in a purple suit wearing a tophat.", "xl", 25, 7.5, 1, "fp16", 42, "output.png", False ] ] # Create the Gradio interface gr.Interface( fn=run_inference, inputs=[ prompt_input, stable_diffusion_model_input, num_inference_steps_input, guidance_scale_input, num_images_per_prompt_input, model_precision_type_input, output_image_name_input, verbose_input ], outputs=gr.outputs.Image(label="Generated Image"), title="Minecraft Skin Generator", description="Make AI generated Minecraft Skins by a Finetuned Stable Diffusion Version!
Model used: https://github.com/Nick088Official/Stable_Diffusion_Finetuned_Minecraft_Skin_Generator
Hugging Face Space made by [Nick088](https://linktr.ee/Nick088)", examples=examples, ).launch(show_api=False, share=True) # return os.path.join(f"output_minecraft_skins/{output_image_name}")