jonathanagustin's picture
Upload folder using huggingface_hub
3dad746 verified
raw
history blame
8.7 kB
import gradio as gr
import tempfile
import openai
import requests
import os
def tts(input_text: str, model: str, voice: str, api_key: str) -> str:
# (Same as before)
if not api_key.strip():
raise gr.Error(
"API key is required. Get an API key at: https://platform.openai.com/account/api-keys"
)
if not input_text.strip():
raise gr.Error("Input text cannot be empty.")
openai.api_key = api_key
try:
response = openai.Audio.create(text=input_text, voice=voice, model=model)
except openai.OpenAIError as e:
raise gr.Error(f"An OpenAI error occurred: {e}")
except Exception as e:
raise gr.Error(f"An unexpected error occurred: {e}")
if not hasattr(response, "audio"):
raise gr.Error(
"Invalid response from OpenAI API. The response does not contain audio content."
)
with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
temp_file.write(response.audio)
temp_file_path = temp_file.name
return temp_file_path
def main():
MODEL_OPTIONS = ["tts-1", "tts-1-hd"]
VOICE_OPTIONS = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]
# Predefine voice previews URLs
VOICE_PREVIEW_URLS = {
voice: f"https://cdn.openai.com/API/docs/audio/{voice}.wav"
for voice in VOICE_OPTIONS
}
# Download audio previews to disk before initiating the interface
PREVIEW_DIR = "voice_previews"
os.makedirs(PREVIEW_DIR, exist_ok=True)
VOICE_PREVIEW_FILES = {}
for voice, url in VOICE_PREVIEW_URLS.items():
local_file_path = os.path.join(PREVIEW_DIR, f"{voice}.wav")
if not os.path.exists(local_file_path):
try:
response = requests.get(url)
response.raise_for_status()
with open(local_file_path, "wb") as f:
f.write(response.content)
except requests.exceptions.RequestException as e:
print(f"Failed to download {voice} preview: {e}")
VOICE_PREVIEW_FILES[voice] = local_file_path
# Set static paths for Gradio to serve
gr.static(PREVIEW_DIR)
with gr.Blocks(theme=gr.themes.Default()) as demo:
# Include global CSS styles for the audio elements
gr.HTML("""
<style>
.audio-player {
width: 5rem;
height: 5rem;
margin-bottom: 10px;
cursor: pointer;
}
.icon-container {
width: 100%;
height: 100%;
background-color: var(--color-background-secondary);
color: var(--color-text-inverse);
display: flex;
justify-content: center;
align-items: center;
border-radius: 50%;
position: relative;
}
.audio-icon {
width: 50%;
height: 50%;
}
.audio-player.playing .icon-container {
background-color: var(--color-brand-primary);
}
.voice-preview {
display: flex;
align-items: center;
margin-bottom: 1rem;
}
.voice-preview p {
margin-left: 10px;
font-weight: bold;
color: var(--color-text-primary);
}
</style>
""")
# Add JavaScript for handling play/pause
gr.HTML("""
<script>
function togglePlay(voice) {
const player = document.getElementById('audio-player-' + voice);
const container = document.getElementById('icon-container-' + voice);
const icon = document.getElementById('audio-icon-' + voice);
if (player.paused) {
// Pause any other playing audios
const audios = document.querySelectorAll('audio');
audios.forEach(function(audio) {
if (audio !== player) {
audio.pause();
audio.currentTime = 0;
const otherVoice = audio.id.replace('audio-player-', '');
document.getElementById('icon-container-' + otherVoice).classList.remove('playing');
document.getElementById('audio-icon-' + otherVoice).innerHTML = playIcon;
}
});
player.play();
container.classList.add('playing');
icon.innerHTML = pauseIcon;
} else {
player.pause();
container.classList.remove('playing');
icon.innerHTML = playIcon;
}
player.onended = function() {
container.classList.remove('playing');
icon.innerHTML = playIcon;
};
}
const playIcon = `
<svg class="audio-icon" xmlns="http://www.w3.org/2000/svg" fill="currentColor" viewBox="0 0 20 20">
<path d="M6.5 5.5v9l7-4.5-7-4.5z"/>
</svg>`;
const pauseIcon = `
<svg class="audio-icon" xmlns="http://www.w3.org/2000/svg" fill="currentColor" viewBox="0 0 20 20">
<path d="M6 5h3v10H6V5zm5 0h3v10h-3V5z"/>
</svg>`;
</script>
""")
with gr.Row():
with gr.Column(scale=1):
api_key_input = gr.Textbox(
label="OpenAI API Key",
info="https://platform.openai.com/account/api-keys",
type="password",
placeholder="Enter your OpenAI API Key",
)
model_dropdown = gr.Dropdown(
choices=MODEL_OPTIONS, label="Model", value="tts-1"
)
voice_dropdown = gr.Dropdown(
choices=VOICE_OPTIONS, label="Voice Options", value="echo"
)
# Add voice previews using custom HTML audio players
gr.Markdown("### Voice Previews")
for voice in VOICE_OPTIONS:
# Use the relative path for the audio file
audio_url = f"file/{PREVIEW_DIR}/{voice}.wav"
# Create custom HTML audio player
html_snippet = f'''
<div class="voice-preview">
<div class="audio-player" onclick="togglePlay('{voice}')">
<div class="icon-container" id="icon-container-{voice}">
<div id="audio-icon-{voice}">
<!-- Heroicon Play Button SVG -->
<svg class="audio-icon" xmlns="http://www.w3.org/2000/svg" fill="currentColor" viewBox="0 0 20 20">
<path d="M6.5 5.5v9l7-4.5-7-4.5z"/>
</svg>
</div>
</div>
<audio id="audio-player-{voice}" style="display:none;">
<source src="{audio_url}" type="audio/wav">
Your browser does not support the audio element.
</audio>
</div>
<p>{voice.capitalize()}</p>
</div>
'''
gr.HTML(html_snippet)
with gr.Column(scale=2):
input_textbox = gr.Textbox(
label="Input Text", lines=10, placeholder="Type your text here..."
)
submit_button = gr.Button(
"Convert Text to Speech", variant="primary"
)
with gr.Column(scale=1):
output_audio = gr.Audio(label="Output Audio")
# Define the event handler for the submit button with error handling
def on_submit(input_text, model, voice, api_key):
audio_file = tts(input_text, model, voice, api_key)
return audio_file
# Trigger the conversion when the submit button is clicked
submit_button.click(
fn=on_submit,
inputs=[input_textbox, model_dropdown, voice_dropdown, api_key_input],
outputs=output_audio,
)
# Launch the Gradio app with error display enabled
demo.launch(show_error=True)
if __name__ == "__main__":
main()