"use client" import React, { useEffect, useRef, useTransition } from 'react' import { IoMdPhonePortrait } from 'react-icons/io' import { ClapProject } from '@aitube/clap' import Image from 'next/image' import { useFilePicker } from 'use-file-picker' import { DeviceFrameset } from 'react-device-frameset' import 'react-device-frameset/styles/marvel-devices.min.css' import { Card, CardContent, CardHeader } from '@/components/ui/card' import { Button } from '@/components/ui/button' import { Toaster } from '@/components/ui/sonner' import { TextareaField } from '@/components/form/textarea-field' import { cn } from '@/lib/utils/cn' import { createClap } from './server/aitube/createClap' import { editClapEntities } from './server/aitube/editClapEntities' import { editClapDialogues } from './server/aitube/editClapDialogues' import { editClapStoryboards } from './server/aitube/editClapStoryboards' import { exportClapToVideo } from './server/aitube/exportClapToVideo' import { useStore } from './store' import HFLogo from "./hf-logo.svg" import { fileToBase64 } from '@/lib/base64/fileToBase64' import { Input } from '@/components/ui/input' import { Field } from '@/components/form/field' import { Label } from '@/components/form/label' import { VideoOrientation } from './types' import { getParam } from '@/lib/utils/getParam' export function Main() { const [_isPending, startTransition] = useTransition() const storyPromptDraft = useStore(s => s.storyPromptDraft) const promptDraft = useRef("") promptDraft.current = storyPromptDraft const storyPrompt = useStore(s => s.storyPrompt) const mainCharacterImage = useStore(s => s.mainCharacterImage) const mainCharacterVoice = useStore(s => s.mainCharacterVoice) const orientation = useStore(s => s.orientation) const status = useStore(s => s.status) const storyGenerationStatus = useStore(s => s.storyGenerationStatus) const assetGenerationStatus = useStore(s => s.assetGenerationStatus) const voiceGenerationStatus = useStore(s => s.voiceGenerationStatus) const imageGenerationStatus = useStore(s => s.imageGenerationStatus) const videoGenerationStatus = useStore(s => s.videoGenerationStatus) const currentClap = useStore(s => s.currentClap) const currentVideo = useStore(s => s.currentVideo) const currentVideoOrientation = useStore(s => s.currentVideoOrientation) const setStoryPromptDraft = useStore(s => s.setStoryPromptDraft) const setStoryPrompt = useStore(s => s.setStoryPrompt) const setMainCharacterImage = useStore(s => s.setMainCharacterImage) const setMainCharacterVoice = useStore(s => s.setMainCharacterVoice) const setStatus = useStore(s => s.setStatus) const toggleOrientation = useStore(s => s.toggleOrientation) const error = useStore(s => s.error) const setError = useStore(s => s.setError) const setStoryGenerationStatus = useStore(s => s.setStoryGenerationStatus) const setAssetGenerationStatus = useStore(s => s.setAssetGenerationStatus) const setVoiceGenerationStatus = useStore(s => s.setVoiceGenerationStatus) const setImageGenerationStatus = useStore(s => s.setImageGenerationStatus) const setVideoGenerationStatus = useStore(s => s.setVideoGenerationStatus) const setCurrentClap = useStore(s => s.setCurrentClap) const setGeneratedVideo = useStore(s => s.setGeneratedVideo) const progress = useStore(s => s.progress) const setProgress = useStore(s => s.setProgress) const saveClap = useStore(s => s.saveClap) const loadClap = useStore(s => s.loadClap) const canSeeBetaFeatures = getParam("beta", false) const hasPendingTasks = storyGenerationStatus === "generating" || assetGenerationStatus === "generating" || voiceGenerationStatus === "generating" || imageGenerationStatus === "generating" || videoGenerationStatus === "generating" const isBusy = status === "generating" || hasPendingTasks const { openFilePicker, filesContent, loading } = useFilePicker({ accept: '.clap', readAs: "ArrayBuffer" }) const fileData = filesContent[0] useEffect(() => { const fn = async () => { if (fileData?.name) { try { const blob = new Blob([fileData.content]) await loadClap(blob, fileData.name) } catch (err) { console.error("failed to load the Clap file:", err) } } } fn() }, [fileData?.name]) const handleSubmit = async () => { startTransition(async () => { console.log(`handleSubmit(): generating a clap using prompt = "${promptDraft.current}" `) let clap: ClapProject | undefined = undefined try { setProgress(1) setStatus("generating") setStoryGenerationStatus("generating") setStoryPrompt(promptDraft.current) clap = await createClap({ prompt: promptDraft.current, orientation: useStore.getState().orientation, }) if (!clap) { throw new Error(`failed to create the clap`) } if (clap.segments.length <= 1) { throw new Error(`failed to generate more than one segments`) } console.log(`handleSubmit(): received a clap = `, clap) setCurrentClap(clap) setStoryGenerationStatus("finished") } catch (err) { setStoryGenerationStatus("error") setStatus("error") setError(`${err}`) return } if (!clap) { return } console.log("-------- GENERATED STORY --------") console.table(clap.segments, [ // 'startTimeInMs', 'endTimeInMs', // 'track', 'category', 'prompt' ]) try { setProgress(10) setAssetGenerationStatus("generating") clap = await editClapEntities({ clap }) if (!clap) { throw new Error(`failed to edit the entities`) } console.log(`handleSubmit(): received a clap with entities = `, clap) setCurrentClap(clap) setAssetGenerationStatus("finished") } catch (err) { setAssetGenerationStatus("error") setStatus("error") setError(`${err}`) return } if (!clap) { return } /* if (mainCharacterImage) { console.log("handleSubmit(): User specified a main character image") // various strategies here, for instance we can assume that the first character is the main character, // or maybe a more reliable way is to count the number of occurrences. // there is a risk of misgendering, so ideally we should add some kind of UI to do this, // such as a list of characters. } */ // TODO Julian console.log("handleSubmit(): TODO Julian: generate images in parallel of the dialogue using Promise.all()") // this is not trivial to do btw, since we will have to merge the clap together // (this could be a helper function inside @aitube/clap) try { setProgress(40) setImageGenerationStatus("generating") clap = await editClapStoryboards({ clap }) if (!clap) { throw new Error(`failed to edit the storyboards`) } console.log(`handleSubmit(): received a clap with images = `, clap) setCurrentClap(clap) setImageGenerationStatus("finished") } catch (err) { setImageGenerationStatus("error") setStatus("error") setError(`${err}`) return } if (!clap) { return } try { setProgress(60) setVoiceGenerationStatus("generating") clap = await editClapDialogues({ clap }) if (!clap) { throw new Error(`failed to edit the dialogues`) } console.log(`handleSubmit(): received a clap with dialogues = `, clap) setCurrentClap(clap) setVoiceGenerationStatus("finished") } catch (err) { setVoiceGenerationStatus("error") setStatus("error") setError(`${err}`) return } if (!clap) { return } let assetUrl = "" try { setProgress(80) setVideoGenerationStatus("generating") assetUrl = await exportClapToVideo({ clap }) console.log(`handleSubmit(): received a video: ${assetUrl.slice(0, 60)}...`) setVideoGenerationStatus("finished") } catch (err) { setVideoGenerationStatus("error") setStatus("error") setError(`${err}`) return } if (!assetUrl) { return } setGeneratedVideo(assetUrl) setStatus("finished") setError("") }) } // note: we are interested in the *current* video orientation, // not the requested video orientation requested for the next video const isLandscape = currentVideoOrientation === VideoOrientation.LANDSCAPE const isPortrait = currentVideoOrientation === VideoOrientation.PORTRAIT const isSquare = currentVideoOrientation === VideoOrientation.SQUARE return (
AI
Stories Factory

Make video stories using AI ✨

{/* LEFT MENU BUTTONS + MAIN PROMPT INPUT */}
{/* TODO: To finish by Julian a bit later
) => { if (e.target.files && e.target.files.length > 0) { const file = e.target.files[0]; const newImageBase64 = await fileToBase64(file) setMainCharacterImage(newImageBase64) } }} accept="image/*" />
*/} {/* MAIN PROMPT INPUT */}
{ setStoryPromptDraft(e.target.value) promptDraft.current = e.target.value }} placeholder="Yesterday I was at my favorite pizza place and.." inputClassName=" transition-all duration-200 ease-in-out h-32 md:h-56 lg:h-64 " disabled={isBusy} value={storyPromptDraft} /> {/* END OF MAIN PROMPT INPUT */}
{/* END OF LEFT MENU BUTTONS + MAIN PROMPT INPUT */}
{/* ACTION BAR */}
{/* */} {canSeeBetaFeatures ? :
}
{/* ORIENTATION SWITCH */}
toggleOrientation()}>
Orientation:
{/* END OF ORIENTATION SWITCH */}
{/* END OF ACTION BAR */}
{isBusy ?

{progress}%

{isBusy ? ( storyGenerationStatus === "generating" ? "Enhancing the story.." : assetGenerationStatus === "generating" ? "Creating characters.." : imageGenerationStatus === "generating" ? "Generating storyboards.." : voiceGenerationStatus === "generating" ? "Generating voices.." : videoGenerationStatus === "generating" ? "Assembling final video.." : "Please wait.." ) : status === "error" ? {error || ""} :   // to prevent layout changes }

: currentVideo ?
Powered by Hugging Face Hugging Face
); }