File size: 4,811 Bytes
42966de
 
 
f43c5ad
42966de
9813b68
42966de
 
 
 
97e46d3
 
 
 
 
 
 
 
 
 
 
42966de
97e46d3
9813b68
 
 
97e46d3
42966de
3020c69
42966de
 
 
 
9813b68
2449b0e
97e46d3
2449b0e
dac7224
2449b0e
dac7224
42966de
9813b68
 
 
 
42966de
 
dfb71ed
42966de
 
 
 
9813b68
42966de
 
97e46d3
42966de
 
 
 
f43c5ad
 
9813b68
 
f43c5ad
 
42966de
 
 
 
db69a9c
9c76877
3020c69
 
42966de
 
 
 
 
 
 
f43c5ad
 
 
 
 
 
 
 
 
 
9813b68
 
 
 
 
 
 
 
 
 
 
f43c5ad
9813b68
 
 
f43c5ad
42966de
 
9813b68
3020c69
9813b68
42966de
 
 
3020c69
 
97e46d3
 
42966de
dac7224
 
3020c69
42966de
9813b68
dfb71ed
 
dac7224
7068a4f
42966de
9813b68
42966de
9813b68
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
import gradio as gr
import tempfile
import openai
import requests


def tts(input_text: str, model: str, voice: str, api_key: str) -> str:
    """
    Convert input text to speech using OpenAI's Text-to-Speech API.

    Parameters:
        input_text (str): The text to be converted to speech.
        model (str): The model to use for synthesis (e.g., 'tts-1', 'tts-1-hd').
        voice (str): The voice profile to use (e.g., 'alloy', 'echo', 'fable', etc.).
        api_key (str): OpenAI API key.

    Returns:
        str: File path to the generated audio file.

    Raises:
        gr.Error: If input parameters are invalid or API call fails.
    """
    if not api_key.strip():
        raise gr.Error(
            "API key is required. Get an API key at: https://platform.openai.com/account/api-keys"
        )

    if not input_text.strip():
        raise gr.Error("Input text cannot be empty.")

    openai.api_key = api_key

    try:
        response = openai.Audio.create(text=input_text, voice=voice, model=model)
    except openai.OpenAIError as e:
        # Catch-all for OpenAI exceptions
        raise gr.Error(f"An OpenAI error occurred: {e}")
    except Exception as e:
        # Catch any other exceptions
        raise gr.Error(f"An unexpected error occurred: {e}")

    if not hasattr(response, "audio"):
        raise gr.Error(
            "Invalid response from OpenAI API. The response does not contain audio content."
        )

    with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
        temp_file.write(response.audio)
        temp_file_path = temp_file.name

    return temp_file_path


def main():
    """
    Main function to create and launch the Gradio interface.
    """
    MODEL_OPTIONS = ["tts-1", "tts-1-hd"]
    VOICE_OPTIONS = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]

    # Predefine voice previews URLs
    VOICE_PREVIEWS = {
        voice: f"https://cdn.openai.com/API/docs/audio/{voice}.wav"
        for voice in VOICE_OPTIONS
    }

    with gr.Blocks() as demo:
        with gr.Row():
            with gr.Column(scale=1):
                api_key_input = gr.Textbox(
                    label="OpenAI API Key",
                    info="https://platform.openai.com/account/api-keys",
                    type="password",
                    placeholder="Enter your OpenAI API Key",
                )
                model_dropdown = gr.Dropdown(
                    choices=MODEL_OPTIONS, label="Model", value="tts-1"
                )
                voice_dropdown = gr.Dropdown(
                    choices=VOICE_OPTIONS, label="Voice Options", value="echo"
                )

                # Add voice previews
                gr.Markdown("### Voice Previews")
                for voice in VOICE_OPTIONS:
                    audio_url = VOICE_PREVIEWS[voice]
                    # Fetch the audio data
                    try:
                        response = requests.get(audio_url)
                        response.raise_for_status()
                        audio_data = response.content
                        gr.Audio(
                            value=audio_data,
                            waveform_options=gr.WaveformOptions(
                                waveform_color="#01C6FF",
                                waveform_progress_color="#0066B4",
                                skip_length=2,
                                show_controls=False,
                            ),
                            label=f"{voice.capitalize()}",
                            autoplay=False,
                        )
                    except requests.exceptions.RequestException as e:
                        gr.Markdown(
                            f"Could not load preview for {voice.capitalize()}: {e}"
                        )

            with gr.Column(scale=2):
                input_textbox = gr.Textbox(
                    label="Input Text", lines=10, placeholder="Type your text here..."
                )
                submit_button = gr.Button("Convert Text to Speech", variant="primary")
            with gr.Column(scale=1):
                output_audio = gr.Audio(label="Output Audio")

        # Define the event handler for the submit button with error handling
        def on_submit(input_text, model, voice, api_key):
            audio_file = tts(input_text, model, voice, api_key)
            return audio_file

        # Trigger the conversion when the submit button is clicked
        submit_button.click(
            fn=on_submit,
            inputs=[input_textbox, model_dropdown, voice_dropdown, api_key_input],
            outputs=output_audio,
        )

    # Launch the Gradio app with error display enabled
    demo.launch(show_error=True)


if __name__ == "__main__":
    main()