File size: 2,177 Bytes
d2686f5
 
942b161
d2686f5
33b18b9
 
 
d2686f5
 
942b161
51c1e80
 
4510960
33b18b9
51c1e80
33b18b9
51c1e80
33b18b9
 
 
4510960
 
 
 
 
 
 
 
d2686f5
1336633
942b161
33b18b9
 
f753e4b
33b18b9
f753e4b
33b18b9
1336633
33b18b9
 
f753e4b
 
 
 
 
 
33b18b9
51c1e80
33b18b9
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
import gradio as gr
from transformers import pipeline
import spaces

# Load the models using pipeline
audio_model = pipeline("audio-classification", model="MelodyMachine/Deepfake-audio-detection-V2")
image_model = pipeline("image-classification", model="dima806/deepfake_vs_real_image_detection")

# Define the prediction function
@spaces.GPU
def predict(audio, image, model_choice):
    print("Data received:", audio if model_choice == "Audio Deepfake Detection" else image)  # Debugging statement
    try:
        if model_choice == "Audio Deepfake Detection":
            result = audio_model(audio)
        elif model_choice == "Image Deepfake Detection":
            result = image_model(image)
        else:
            return {"error": "Invalid model choice"}
        
        print("Raw prediction result:", result)  # Debugging statement
        # Convert the result to the expected format
        output = {item['label']: item['score'] for item in result}
        print("Formatted prediction result:", output)  # Debugging statement
        return output
    except Exception as e:
        print("Error during prediction:", e)  # Debugging statement
        return {"error": str(e)}

# Update interface based on the selected model
@spaces.GPU
def update_interface(model_choice):
    if model_choice == "Audio Deepfake Detection":
        return gr.update(visible=True), gr.update(visible=False)
    elif model_choice == "Image Deepfake Detection":
        return gr.update(visible=False), gr.update(visible=True)

# Create Gradio interface
with gr.Blocks() as iface:
    model_choice = gr.Radio(choices=["Audio Deepfake Detection", "Image Deepfake Detection"], label="Select Model", value="Audio Deepfake Detection")
    audio_input = gr.Audio(type="filepath", label="Upload Audio File")
    image_input = gr.Image(type="filepath", label="Upload Image File", visible=False)
    output = gr.Label()

    model_choice.change(fn=update_interface, inputs=model_choice, outputs=[audio_input, image_input])

    submit_button = gr.Button("Submit")
    submit_button.click(fn=predict, inputs=[audio_input, image_input, model_choice], outputs=output)
    
    iface.launch()