Spaces:
Runtime error
Runtime error
Web3Daily
commited on
Commit
•
48e1709
0
Parent(s):
Duplicate from Web3Daily/WebGPT3
Browse files- .gitattributes +34 -0
- README.md +15 -0
- app.py +39 -0
- requirements.txt +2 -0
- style.css +7 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: WebGPT3
|
3 |
+
emoji: 🐨
|
4 |
+
colorFrom: indigo
|
5 |
+
colorTo: red
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.21.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
duplicated_from: Web3Daily/WebGPT3
|
11 |
+
---
|
12 |
+
"CSS": [".css"],
|
13 |
+
|
14 |
+
|
15 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import gradio as gr
|
3 |
+
import os
|
4 |
+
from functools import reduce
|
5 |
+
|
6 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
7 |
+
|
8 |
+
def manage_conversation_history(messages, max_tokens=4096):
|
9 |
+
total_tokens = sum([len(msg["content"]) for msg in messages])
|
10 |
+
while total_tokens > max_tokens:
|
11 |
+
# Remove the oldest user and assistant messages
|
12 |
+
messages.pop(1)
|
13 |
+
messages.pop(1)
|
14 |
+
total_tokens = sum([len(msg["content"]) for msg in messages])
|
15 |
+
return messages
|
16 |
+
|
17 |
+
messages = [{"role": "system", "content": "You are a Web3 and cryptocurrency expert that explains Web3, cryptocurrency, blockchain, and financial terminology in terms so simple even a five year old could understand it. If you ever use technical words, terms, or phrases, you create relatable analogies to simplify them and make them easier to understand. In fact, you always open with an analogy when possible."}]
|
18 |
+
|
19 |
+
def CustomChatGPT(user_input):
|
20 |
+
messages.append({"role": "user", "content": user_input})
|
21 |
+
managed_messages = manage_conversation_history(messages)
|
22 |
+
response = openai.ChatCompletion.create(
|
23 |
+
model="gpt-3.5-turbo",
|
24 |
+
messages=managed_messages
|
25 |
+
)
|
26 |
+
ChatGPT_reply = response["choices"][0]["message"]["content"]
|
27 |
+
messages.append({"role": "assistant", "content": ChatGPT_reply})
|
28 |
+
return ChatGPT_reply
|
29 |
+
|
30 |
+
description = "Check for ChatGPT outages <a href=https://status.openai.com/>here.</a>"
|
31 |
+
|
32 |
+
demo = gr.Interface(
|
33 |
+
fn=CustomChatGPT,
|
34 |
+
inputs=gr.Textbox(label="Ask a question:", placeholder="E.g. What are gas fees? ...can you simplify that with an analogy?"),
|
35 |
+
outputs=gr.Textbox(label="Answer:"),
|
36 |
+
title=""
|
37 |
+
)
|
38 |
+
|
39 |
+
demo.launch(inline=True)
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
openai
|
2 |
+
gradio
|
style.css
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
body {
|
2 |
+
background-color: #f0f0f0;
|
3 |
+
}
|
4 |
+
.gradio_submit_button {
|
5 |
+
background-color: #DF3840;
|
6 |
+
color: white;
|
7 |
+
}
|