ameerazam08 commited on
Commit
2142ea2
1 Parent(s): b3812f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -29
app.py CHANGED
@@ -1,3 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import torch
2
  import torchaudio
3
  from einops import rearrange
@@ -10,12 +94,6 @@ import uuid
10
  from stable_audio_tools import get_pretrained_model
11
  from stable_audio_tools.inference.generation import generate_diffusion_cond
12
 
13
-
14
- from huggingface_hub import login
15
-
16
- hf_token = os.getenv('HF_TOKEN')
17
- login(token=hf_token,add_to_git_credential=True)
18
-
19
  # Load the model outside of the GPU-decorated function
20
  def load_model():
21
  print("Loading model...")
@@ -23,30 +101,43 @@ def load_model():
23
  print("Model loaded successfully.")
24
  return model, model_config
25
 
26
- # Define the function to generate audio
27
- @spaces.GPU(duration=120)
28
- def generate_audio(prompt, bpm, seconds_total):
 
 
 
29
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
30
 
31
- # Download model
 
 
 
 
32
  model, model_config = load_model()
33
  sample_rate = model_config["sample_rate"]
34
  sample_size = model_config["sample_size"]
35
 
 
 
36
  model = model.to(device)
 
37
 
38
  # Set up text and timing conditioning
39
  conditioning = [{
40
- "prompt": f"{bpm} BPM {prompt}",
41
  "seconds_start": 0,
42
  "seconds_total": seconds_total
43
  }]
 
44
 
45
  # Generate stereo audio
 
46
  output = generate_diffusion_cond(
47
  model,
48
- steps=100,
49
- cfg_scale=7,
50
  conditioning=conditioning,
51
  sample_size=sample_size,
52
  sigma_min=0.3,
@@ -54,30 +145,44 @@ def generate_audio(prompt, bpm, seconds_total):
54
  sampler_type="dpmpp-3m-sde",
55
  device=device
56
  )
 
57
 
58
  # Rearrange audio batch to a single sequence
59
  output = rearrange(output, "b d n -> d (b n)")
 
60
 
61
- # Peak normalize, clip, convert to int16, and save to file
62
  output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
63
-
64
- output_path = "output.wav"
65
- torchaudio.save(output_path, output, sample_rate)
66
-
67
- return output_path
 
 
 
 
68
 
69
- # Define the Gradio interface
70
- iface = gr.Interface(
 
 
 
71
  fn=generate_audio,
72
  inputs=[
73
- gr.Textbox(label="Prompt", placeholder="Enter the description of the audio (e.g., tech house drum loop)"),
74
- gr.Number(label="BPM", value=128),
75
- gr.Number(label="Duration (seconds)", value=30)
 
76
  ],
77
- outputs=gr.Audio(label="Generated Audio"),
78
- title="Stable Audio Generation",
79
- description="Generate audio based on a text prompt using stable audio tools.",
80
  )
81
 
82
- # Launch the interface
83
- iface.launch()
 
 
 
 
 
1
+ # import torch
2
+ # import torchaudio
3
+ # from einops import rearrange
4
+ # import gradio as gr
5
+ # import spaces
6
+ # import os
7
+ # import uuid
8
+
9
+ # # Importing the model-related functions
10
+ # from stable_audio_tools import get_pretrained_model
11
+ # from stable_audio_tools.inference.generation import generate_diffusion_cond
12
+
13
+
14
+ # from huggingface_hub import login
15
+
16
+ # hf_token = os.getenv('HF_TOKEN')
17
+ # login(token=hf_token,add_to_git_credential=True)
18
+
19
+ # # Load the model outside of the GPU-decorated function
20
+ # def load_model():
21
+ # print("Loading model...")
22
+ # model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
23
+ # print("Model loaded successfully.")
24
+ # return model, model_config
25
+
26
+ # # Define the function to generate audio
27
+ # @spaces.GPU(duration=120)
28
+ # def generate_audio(prompt, bpm, seconds_total):
29
+ # device = "cuda" if torch.cuda.is_available() else "cpu"
30
+
31
+ # # Download model
32
+ # model, model_config = load_model()
33
+ # sample_rate = model_config["sample_rate"]
34
+ # sample_size = model_config["sample_size"]
35
+
36
+ # model = model.to(device)
37
+
38
+ # # Set up text and timing conditioning
39
+ # conditioning = [{
40
+ # "prompt": f"{bpm} BPM {prompt}",
41
+ # "seconds_start": 0,
42
+ # "seconds_total": seconds_total
43
+ # }]
44
+
45
+ # # Generate stereo audio
46
+ # output = generate_diffusion_cond(
47
+ # model,
48
+ # steps=100,
49
+ # cfg_scale=7,
50
+ # conditioning=conditioning,
51
+ # sample_size=sample_size,
52
+ # sigma_min=0.3,
53
+ # sigma_max=500,
54
+ # sampler_type="dpmpp-3m-sde",
55
+ # device=device
56
+ # )
57
+
58
+ # # Rearrange audio batch to a single sequence
59
+ # output = rearrange(output, "b d n -> d (b n)")
60
+
61
+ # # Peak normalize, clip, convert to int16, and save to file
62
+ # output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
63
+
64
+ # output_path = "output.wav"
65
+ # torchaudio.save(output_path, output, sample_rate)
66
+
67
+ # return output_path
68
+
69
+ # # Define the Gradio interface
70
+ # iface = gr.Interface(
71
+ # fn=generate_audio,
72
+ # inputs=[
73
+ # gr.Textbox(label="Prompt", placeholder="Enter the description of the audio (e.g., tech house drum loop)"),
74
+ # gr.Number(label="BPM", value=128),
75
+ # gr.Number(label="Duration (seconds)", value=30)
76
+ # ],
77
+ # outputs=gr.Audio(label="Generated Audio"),
78
+ # title="Stable Audio Generation",
79
+ # description="Generate audio based on a text prompt using stable audio tools.",
80
+ # )
81
+
82
+ # # Launch the interface
83
+ # iface.launch()
84
+
85
  import torch
86
  import torchaudio
87
  from einops import rearrange
 
94
  from stable_audio_tools import get_pretrained_model
95
  from stable_audio_tools.inference.generation import generate_diffusion_cond
96
 
 
 
 
 
 
 
97
  # Load the model outside of the GPU-decorated function
98
  def load_model():
99
  print("Loading model...")
 
101
  print("Model loaded successfully.")
102
  return model, model_config
103
 
104
+ # Function to set up, generate, and process the audio
105
+ @spaces.GPU(duration=120) # Allocate GPU only when this function is called
106
+ def generate_audio(prompt, seconds_total=30, steps=100, cfg_scale=7):
107
+ print(f"Prompt received: {prompt}")
108
+ print(f"Settings: Duration={seconds_total}s, Steps={steps}, CFG Scale={cfg_scale}")
109
+
110
  device = "cuda" if torch.cuda.is_available() else "cpu"
111
+ print(f"Using device: {device}")
112
 
113
+ # Fetch the Hugging Face token from the environment variable
114
+ hf_token = os.getenv('HF_TOKEN')
115
+ print(f"Hugging Face token: {hf_token}")
116
+
117
+ # Use pre-loaded model and configuration
118
  model, model_config = load_model()
119
  sample_rate = model_config["sample_rate"]
120
  sample_size = model_config["sample_size"]
121
 
122
+ print(f"Sample rate: {sample_rate}, Sample size: {sample_size}")
123
+
124
  model = model.to(device)
125
+ print("Model moved to device.")
126
 
127
  # Set up text and timing conditioning
128
  conditioning = [{
129
+ "prompt": prompt,
130
  "seconds_start": 0,
131
  "seconds_total": seconds_total
132
  }]
133
+ print(f"Conditioning: {conditioning}")
134
 
135
  # Generate stereo audio
136
+ print("Generating audio...")
137
  output = generate_diffusion_cond(
138
  model,
139
+ steps=steps,
140
+ cfg_scale=cfg_scale,
141
  conditioning=conditioning,
142
  sample_size=sample_size,
143
  sigma_min=0.3,
 
145
  sampler_type="dpmpp-3m-sde",
146
  device=device
147
  )
148
+ print("Audio generated.")
149
 
150
  # Rearrange audio batch to a single sequence
151
  output = rearrange(output, "b d n -> d (b n)")
152
+ print("Audio rearranged.")
153
 
154
+ # Peak normalize, clip, convert to int16
155
  output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
156
+ print("Audio normalized and converted.")
157
+
158
+ # Generate a unique filename for the output
159
+ unique_filename = f"output_{uuid.uuid4().hex}.wav"
160
+ print(f"Saving audio to file: {unique_filename}")
161
+
162
+ # Save to file
163
+ torchaudio.save(unique_filename, output, sample_rate)
164
+ print(f"Audio saved: {unique_filename}")
165
 
166
+ # Return the path to the generated audio file
167
+ return unique_filename
168
+
169
+ # Setting up the Gradio Interface
170
+ interface = gr.Interface(
171
  fn=generate_audio,
172
  inputs=[
173
+ gr.Textbox(label="Prompt", placeholder="Enter your text prompt here"),
174
+ gr.Slider(0, 47, value=30, label="Duration in Seconds"),
175
+ gr.Slider(10, 150, value=100, step=10, label="Number of Diffusion Steps"),
176
+ gr.Slider(1, 15, value=7, step=0.1, label="CFG Scale")
177
  ],
178
+ outputs=gr.Audio(type="filepath", label="Generated Audio"),
179
+ title="Stable Audio Generator",
180
+ description="Generate variable-length stereo audio at 44.1kHz from text prompts using Stable Audio Open 1.0."
181
  )
182
 
183
+ # Pre-load the model to avoid multiprocessing issues
184
+ model, model_config = load_model()
185
+
186
+ # Launch the Interface
187
+ interface.launch()
188
+