import gradio as gr import os import cv2 import numpy as np from moviepy.editor import * token = os.environ.get('HF_TOKEN') pix2pix = gr.Blocks.load(name="spaces/fffiloni/instruct-pix2pix-clone", api_key=token) def get_frames(video_in): frames = [] # Opens the Video file cap= cv2.VideoCapture(video_in) fps = cap.get(cv2.CAP_PROP_FPS) i=0 while(cap.isOpened()): ret, frame = cap.read() if ret == False: break cv2.imwrite('kang'+str(i)+'.jpg',frame) frames.append('kang'+str(i)+'.jpg') i+=1 cap.release() cv2.destroyAllWindows() return frames, fps def create_video(frames, fps): clip = ImageSequenceClip(frames, fps=fps) clip.write_videofile("movie.mp4", fps=fps) return 'movie.mp4' def infer(prompt,video_in, seed_in): break_vid = get_frames(video_in) frames_list= break_vid[0] fps = break_vid[1] result_frames = [] for i in frames_list: pix2pix_img = pix2pix(prompt,5.5,1.5,i,15,"",512,512,seed_in,fn_index=0) images = [os.path.join(pix2pix_img[0], img) for img in os.listdir(pix2pix_img[0])] result_frames.append(images[0]) final_vid = create_video(result_frames, fps) return final_vid with gr.Blocks(css='style.css') as demo: with gr.Column(elem_id="col-container"): with gr.Row(): with gr.Column(): prompt = gr.Textbox(placeholder="enter prompt") video_inp = gr.Video(label="Video source", source="upload", type="filepath") seed_inp = gr.Number(label="Seed", value=123456) with gr.Column(): video_out = gr.Video(label="Pix2pix video result") submit_btn = gr.Button("Generate Pix2Pix video") inputs = [prompt,video_inp,seed_inp] outputs = [video_out] submit_btn.click(infer, inputs, outputs) demo.launch().queue(max_size=12)