import gradio as gr from transformers import pipeline # Load the models using pipeline audio_model = pipeline("audio-classification", model="MelodyMachine/Deepfake-audio-detection-V2") image_model = pipeline("image-classification", model="dima806/deepfake_vs_real_image_detection") # Define the prediction function def predict(data, model_choice): print("Data received:", data) # Debugging statement try: if model_choice == "Audio Deepfake Detection": result = audio_model(data) elif model_choice == "Image Deepfake Detection": result = image_model(data) else: return {"error": "Invalid model choice"} print("Raw prediction result:", result) # Debugging statement # Convert the result to the expected format output = {item['label']: item['score'] for item in result} print("Formatted prediction result:", output) # Debugging statement return output except Exception as e: print("Error during prediction:", e) # Debugging statement return {"error": str(e)} # Function to update the interface based on the selected model def update_interface(model_choice): if model_choice == "Audio Deepfake Detection": return gr.update(visible=True), gr.update(visible=False) elif model_choice == "Image Deepfake Detection": return gr.update(visible=False), gr.update(visible=True) # Create the Gradio interface with gr.Blocks() as iface: model_choice = gr.Radio(choices=["Audio Deepfake Detection", "Image Deepfake Detection"], label="Select Model", value="Audio Deepfake Detection") audio_input = gr.Audio(type="filepath", label="Upload Audio File") image_input = gr.Image(type="filepath", label="Upload Image File", visible=False) output = gr.Label() model_choice.change(fn=update_interface, inputs=model_choice, outputs=[audio_input, image_input]) submit_button = gr.Button("Submit") submit_button.click(fn=predict, inputs=[gr.State(audio_input), gr.State(image_input), model_choice], outputs=output) iface.launch()