sd-audio-cpu / app.py
Drew Skillman
switch to cache model and not use zero
2a76b54
raw
history blame contribute delete
No virus
4.46 kB
import torch
import torchaudio
from einops import rearrange
import gradio as gr
import spaces
import os
import uuid
from pydub import AudioSegment
# Importing the model-related functions
from stable_audio_tools import get_pretrained_model
from stable_audio_tools.inference.generation import generate_diffusion_cond
global model, model_config
# Load the model outside of the GPU-decorated function
def load_model():
global model, model_config
print("Loading model...")
model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
print("Model loaded successfully.")
return model, model_config
# Function to set up, generate, and process the audio
def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
global model, model_config
print(f"Prompt received: {prompt}")
print(f"Settings: Duration={seconds_total}s, Steps={steps}, CFG Scale={cfg_scale}")
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# Fetch the Hugging Face token from the environment variable
hf_token = os.getenv('HF_TOKEN')
print(f"Hugging Face token: {hf_token}")
# Use pre-loaded model and configuration
#model, model_config = load_model()
sample_rate = model_config["sample_rate"]
sample_size = model_config["sample_size"]
print(f"Sample rate: {sample_rate}, Sample size: {sample_size}")
model = model.to(device)
print("Model moved to device.")
# Set up text and timing conditioning
conditioning = [{
"prompt": prompt,
"seconds_start": 0,
"seconds_total": seconds_total
}]
print(f"Conditioning: {conditioning}")
# Generate stereo audio
print("Generating audio...")
output = generate_diffusion_cond(
model,
steps=steps,
cfg_scale=cfg_scale,
conditioning=conditioning,
sample_size=sample_size,
sigma_min=0.3,
sigma_max=500,
sampler_type="dpmpp-3m-sde",
device=device
)
print("Audio generated.")
# Rearrange audio batch to a single sequence
output = rearrange(output, "b d n -> d (b n)")
print("Audio rearranged.")
# Peak normalize, clip, convert to int16
output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
print("Audio normalized and converted.")
# Generate a unique filename for the output
unique_filename = f"output_{uuid.uuid4().hex}.wav"
print(f"Saving audio to file: {unique_filename}")
# Save to file
torchaudio.save(unique_filename, output, sample_rate)
print(f"Audio saved: {unique_filename}")
return unique_filename
'''
# Convert WAV to MP3 using pydub without ffmpeg
audio = AudioSegment.from_wav(unique_filename)
full_path_mp3 = unique_filename.replace('wav', 'mp3')
audio.export(full_path_mp3, format="mp3")
print(f"Audio converted and saved to MP3: {full_path_mp3}")
# Return the path to the generated audio file
return full_path_mp3
'''
# Setting up the Gradio Interface
interface = gr.Interface(
fn=generate_audio,
inputs=[
gr.Textbox(label="Prompt", placeholder="Enter your text prompt here"),
gr.Slider(0, 47, value=30, label="Duration in Seconds"),
gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps"),
gr.Slider(1, 15, value=7, step=0.1, label="CFG Scale")
],
outputs=gr.Audio(type="filepath", label="Generated Audio"),
title="Stable Audio Generator",
description="Generate variable-length stereo audio at 44.1kHz from text prompts using Stable Audio Open 1.0.",
)
with gr.Blocks() as demo:
with gr.Tab("Audio"):
audio_prompt = gr.Textbox(label="Prompt", placeholder="Enter your text prompt here")
audio_duration = gr.Slider(0, 47, value=30, label="Duration in Seconds")
audio_steps = gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps")
audio_cfg = gr.Slider(1, 15, value=7, step=0.1, label="CFG Scale")
audio_process_button = gr.Button("Process Audio")
audio_output = gr.Audio(type="filepath", label="Generated Audio")
audio_process_button.click(generate_audio, [audio_prompt, audio_duration, audio_steps, audio_cfg], [audio_output])
# Pre-load the model to avoid multiprocessing issues
model, model_config = load_model()
demo.launch(share=True)