zeeshan391 commited on
Commit
3ee9503
1 Parent(s): 10daf2e

created app.py file

Browse files
Files changed (1) hide show
  1. app.py +100 -0
app.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from langchain_huggingface.llms import HuggingFacePipeline
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
5
+ from langchain_core.callbacks import CallbackManager, StreamingStdOutCallbackHandler
6
+ from langchain_core.prompts import ChatPromptTemplate
7
+
8
+ import os
9
+ import fal_client
10
+
11
+ # FastAPI app
12
+ app = FastAPI()
13
+
14
+ # Set the environment variable
15
+ os.environ['FAL_KEY'] = 'bb79b746-999d-4bec-af22-04fddb05d087:49350e8b76fd8dda0fb9dd8442a9ccf5'
16
+
17
+ # Request body model
18
+ class StoryRequest(BaseModel):
19
+ mood: str
20
+ story_type: str
21
+ theme: str
22
+ num_scenes: int
23
+ txt: str
24
+
25
+ # Initialize the LLM
26
+ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
27
+
28
+ tokenizer = AutoTokenizer.from_pretrained("tohur/natsumura-storytelling-rp-1.0-llama-3.1-8b")
29
+ model = AutoModelForCausalLM.from_pretrained("tohur/natsumura-storytelling-rp-1.0-llama-3.1-8b")
30
+ pipe = pipeline("text-generation", model=model, tokenizer=tokenizer, max_new_tokens=2000)
31
+ llm = HuggingFacePipeline(pipeline=pipe)
32
+
33
+ # Create a prompt template
34
+ # system = """You are a helpful and creative assistant that specializes in generating engaging and imaginative stories for kids.
35
+ # Based on the user's provided mood, preferred story type, theme, age, and desired story length of 500-600 words, create a unique and captivating story.
36
+ # Always start with Story Title then generate a single story and dont ask for any feedback at the end just sign off with a cute closing inviting the reader
37
+ # to create another adventure soon!
38
+ # """
39
+
40
+ system = """You are a helpful and creative assistant that specializes in generating engaging and imaginative short storie for kids.
41
+ Based on the user's provided mood, preferred story type, theme, age, and desired story length of 500-600 words, create a unique and captivating story.
42
+ Always start with Story Title then generate a single story.Storie begin on Page 1(also mention the all pages headings in bold) and end on Page 7.
43
+ Total pages in storie are seven each page have one short paragraph and dont ask for any feedback at the end just sign off with a cute closing inviting the reader
44
+ to create another adventure soon!
45
+ """
46
+
47
+ prompt_template = ChatPromptTemplate.from_messages([("system", system), ("human", "{text}")])
48
+
49
+ # FastAPI endpoint to generate the story
50
+ @app.post("/generate_story/")
51
+ async def generate_story(story_request: StoryRequest):
52
+ story = f"""here are the inputs from user:
53
+ - **Mood:** {story_request.mood}
54
+ - **Story Type:** {story_request.story_type}
55
+ - **Theme:** {story_request.theme}
56
+ - **Details Provided:** {story_request.txt}
57
+ """
58
+
59
+ final_prompt = prompt_template.format(text=story)
60
+
61
+ # Create the LLMChain
62
+ # chain = LLMChain(llm=llm, prompt=prompt_template)
63
+ chain = llm | prompt_template
64
+
65
+ # try:
66
+ # response = chain.invoke(final_prompt)
67
+ # return {"story": response}
68
+ # except Exception as e:
69
+ # raise HTTPException(status_code=500, detail=str(e))
70
+ response = chain.invoke(final_prompt)
71
+
72
+ if not response:
73
+ raise HTTPException(status_code=500, detail="Failed to generate the story")
74
+
75
+ images = []
76
+ for i in range(story_request.num_scenes):
77
+ # image_prompt = f"Generate an image for Scene {i+1} based on this story: Mood: {story_request.mood}, Story Type: {story_request.story_type}, Theme: {story_request.theme}. Story: {response}"
78
+ image_prompt = (
79
+ f"Generate an image for Scene {i+1}. "
80
+ f"This image should represent the details described in paragraph {i+1} of the story. "
81
+ f"Mood: {story_request.mood}, Story Type: {', '.join(story_request.story_type)}, Theme: {story_request.theme}. "
82
+ f"Story: {response} "
83
+ f"Focus on the key elements in paragraph {i+1}."
84
+ )
85
+ handler = fal_client.submit(
86
+ "fal-ai/flux/schnell",
87
+ arguments={
88
+ "prompt": image_prompt,
89
+ "num_images": 1,
90
+ "enable_safety_checker": True
91
+ },
92
+ )
93
+ result = handler.get()
94
+ image_url = result['images'][0]['url']
95
+ images.append(image_url)
96
+
97
+ return {
98
+ "story": response,
99
+ "images": images
100
+ }