File size: 5,682 Bytes
42966de
 
 
f43c5ad
6385f1b
1a29ae8
42966de
a9471a7
42966de
a9471a7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97e46d3
9813b68
 
 
97e46d3
42966de
3020c69
42966de
 
 
 
9813b68
2449b0e
a9471a7
2449b0e
dac7224
a9471a7
dac7224
42966de
9813b68
 
 
 
42966de
 
dfb71ed
42966de
 
 
 
a9471a7
42966de
a9471a7
 
 
42966de
 
 
f43c5ad
6385f1b
9813b68
 
f43c5ad
 
6385f1b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
614dae3
a9471a7
 
6385f1b
e847c01
42966de
 
 
db69a9c
9c76877
3020c69
 
42966de
 
 
 
 
 
 
f43c5ad
e472def
 
1a29ae8
 
 
 
 
e472def
1a29ae8
 
 
 
 
 
 
 
 
 
 
 
 
f43c5ad
42966de
 
9813b68
3020c69
1777fa6
 
 
42966de
 
 
3020c69
 
97e46d3
 
42966de
dac7224
 
3020c69
42966de
9813b68
dfb71ed
 
614dae3
 
42966de
9813b68
42966de
a9471a7
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
import gradio as gr
import tempfile
import openai
import requests
import os
from functools import partial


def tts(input_text: str, model: str, voice: str, api_key: str) -> str:
    """
    Convert input text to speech using OpenAI's Text-to-Speech API.

    Parameters:
        input_text (str): The text to be converted to speech.
        model (str): The model to use for synthesis (e.g., 'tts-1', 'tts-1-hd').
        voice (str): The voice profile to use (e.g., 'alloy', 'echo', 'fable', etc.).
        api_key (str): OpenAI API key.

    Returns:
        str: File path to the generated audio file.

    Raises:
        gr.Error: If input parameters are invalid or API call fails.
    """
    if not api_key.strip():
        raise gr.Error(
            "API key is required. Get an API key at: https://platform.openai.com/account/api-keys"
        )

    if not input_text.strip():
        raise gr.Error("Input text cannot be empty.")

    openai.api_key = api_key

    try:
        response = openai.Audio.create(text=input_text, voice=voice, model=model)
    except openai.OpenAIError as e:
        # Catch-all for OpenAI exceptions
        raise gr.Error(f"An OpenAI error occurred: {e}")
    except Exception as e:
        # Catch any other exceptions
        raise gr.Error(f"An unexpected error occurred: {e}")

    if not hasattr(response, "audio"):
        raise gr.Error(
            "Invalid response from OpenAI API. The response does not contain audio content."
        )

    with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as temp_file:
        temp_file.write(response.audio)
        temp_file_path = temp_file.name

    return temp_file_path


def main():
    """
    Main function to create and launch the Gradio interface.
    """
    MODEL_OPTIONS = ["tts-1", "tts-1-hd"]
    VOICE_OPTIONS = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"]

    # Predefine voice previews URLs
    VOICE_PREVIEW_URLS = {
        voice: f"https://cdn.openai.com/API/docs/audio/{voice}.wav"
        for voice in VOICE_OPTIONS
    }

    # Download audio previews to disk before initiating the interface
    PREVIEW_DIR = "voice_previews"
    os.makedirs(PREVIEW_DIR, exist_ok=True)

    VOICE_PREVIEW_FILES = {}
    for voice, url in VOICE_PREVIEW_URLS.items():
        local_file_path = os.path.join(PREVIEW_DIR, f"{voice}.wav")
        if not os.path.exists(local_file_path):
            try:
                response = requests.get(url)
                response.raise_for_status()
                with open(local_file_path, "wb") as f:
                    f.write(response.content)
            except requests.exceptions.RequestException as e:
                print(f"Failed to download {voice} preview: {e}")
        VOICE_PREVIEW_FILES[voice] = local_file_path

    # Set static paths for Gradio to serve
    # This needs to be done before creating the Gradio app
    gr.set_static_paths([PREVIEW_DIR])

    with gr.Blocks(title="OpenAI - Text to Speech") as demo:
        with gr.Row():
            with gr.Column(scale=1):
                api_key_input = gr.Textbox(
                    label="OpenAI API Key",
                    info="https://platform.openai.com/account/api-keys",
                    type="password",
                    placeholder="Enter your OpenAI API Key",
                )
                model_dropdown = gr.Dropdown(
                    choices=MODEL_OPTIONS, label="Model", value="tts-1"
                )
                voice_dropdown = gr.Dropdown(
                    choices=VOICE_OPTIONS, label="Voice Options", value="echo"
                )

                # Wrap the voice previews inside an Accordion that is closed by default
                with gr.Accordion(label="Voice Previews", open=False):
                    # Create an audio component to play the samples
                    preview_audio = gr.Audio(
                        interactive=False, label="Preview Audio", value=None
                    )

                    for voice in VOICE_OPTIONS:
                        # Create a button for each voice
                        voice_button = gr.Button(
                            value=f"▶ {voice.capitalize()}", variant="secondary"
                        )

                        # Define the handler function for this voice using partial
                        def play_voice_sample(voice):
                            return VOICE_PREVIEW_FILES[voice]

                        voice_handler = partial(play_voice_sample, voice)

                        # Attach the click handler
                        voice_button.click(fn=voice_handler, outputs=preview_audio)

            with gr.Column(scale=2):
                input_textbox = gr.Textbox(
                    label="Input Text", lines=10, placeholder="Type your text here..."
                )
                submit_button = gr.Button(
                    "Convert Text to Speech", variant="primary"
                )
            with gr.Column(scale=1):
                output_audio = gr.Audio(label="Output Audio")

        # Define the event handler for the submit button with error handling
        def on_submit(input_text, model, voice, api_key):
            audio_file = tts(input_text, model, voice, api_key)
            return audio_file

        # Trigger the conversion when the submit button is clicked
        submit_button.click(
            fn=on_submit,
            inputs=[input_textbox, model_dropdown, voice_dropdown, api_key_input],
            outputs=output_audio,
        )

    # Launch the Gradio app with error display enabled
    demo.launch(show_error=True)


if __name__ == "__main__":
    main()