Web3Daily commited on
Commit
86d8051
1 Parent(s): 48e1709

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -35
app.py CHANGED
@@ -1,39 +1,15 @@
1
- import openai
2
  import gradio as gr
3
- import os
4
- from functools import reduce
5
 
6
- openai.api_key = os.getenv("OPENAI_API_KEY")
 
 
 
 
7
 
8
- def manage_conversation_history(messages, max_tokens=4096):
9
- total_tokens = sum([len(msg["content"]) for msg in messages])
10
- while total_tokens > max_tokens:
11
- # Remove the oldest user and assistant messages
12
- messages.pop(1)
13
- messages.pop(1)
14
- total_tokens = sum([len(msg["content"]) for msg in messages])
15
- return messages
16
 
17
- messages = [{"role": "system", "content": "You are a Web3 and cryptocurrency expert that explains Web3, cryptocurrency, blockchain, and financial terminology in terms so simple even a five year old could understand it. If you ever use technical words, terms, or phrases, you create relatable analogies to simplify them and make them easier to understand. In fact, you always open with an analogy when possible."}]
18
-
19
- def CustomChatGPT(user_input):
20
- messages.append({"role": "user", "content": user_input})
21
- managed_messages = manage_conversation_history(messages)
22
- response = openai.ChatCompletion.create(
23
- model="gpt-3.5-turbo",
24
- messages=managed_messages
25
- )
26
- ChatGPT_reply = response["choices"][0]["message"]["content"]
27
- messages.append({"role": "assistant", "content": ChatGPT_reply})
28
- return ChatGPT_reply
29
-
30
- description = "Check for ChatGPT outages <a href=https://status.openai.com/>here.</a>"
31
-
32
- demo = gr.Interface(
33
- fn=CustomChatGPT,
34
- inputs=gr.Textbox(label="Ask a question:", placeholder="E.g. What are gas fees? ...can you simplify that with an analogy?"),
35
- outputs=gr.Textbox(label="Answer:"),
36
- title=""
37
- )
38
-
39
- demo.launch(inline=True)
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
 
3
 
4
+ def chat_with_gpt(input_text):
5
+ model_name = "gpt-3.5-turbo" # Replace this with the appropriate GPT model you want to use.
6
+ chat_gpt = pipeline("text-generation", model=model_name, device=0)
7
+ generated_text = chat_gpt(input_text, max_length=150, do_sample=True, top_k=50, top_p=0.95)
8
+ return generated_text[0]["generated_text"]
9
 
10
+ # Gradio user interface
11
+ input_text = gr.inputs.Textbox(lines=5, label="Your question about AI:")
12
+ output_text = gr.outputs.Textbox(label="ChatGPT Response:")
 
 
 
 
 
13
 
14
+ iface = gr.Interface(fn=chat_with_gpt, inputs=input_text, outputs=output_text, title="Chat with ChatGPT", description="Ask ChatGPT questions about AI.")
15
+ iface.launch()