KingNish commited on
Commit
3f99ff3
1 Parent(s): edd1760

Update voice_chat.py

Browse files
Files changed (1) hide show
  1. voice_chat.py +63 -49
voice_chat.py CHANGED
@@ -23,54 +23,58 @@ def extract_text_from_webpage(html_content):
23
  visible_text = soup.get_text(strip=True)
24
  return visible_text
25
 
26
- # Perform a Google search and return the results
27
- def search(term, num_results=3, lang="en", advanced=True, timeout=5, safe="active", ssl_verify=None):
28
  """Performs a Google search and returns the results."""
29
  escaped_term = urllib.parse.quote_plus(term)
30
  start = 0
31
  all_results = []
32
- # Limit the number of characters from each webpage to stay under the token limit
33
- max_chars_per_page = 3000 # Adjust this value based on your token limit and average webpage length
34
-
35
- with requests.Session() as session:
36
- while start < num_results:
37
- resp = session.get(
38
- url="https://www.google.com/search",
39
- headers={"User-Agent":'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62'},
40
- params={
41
- "q": term,
42
- "num": num_results - start,
43
- "hl": lang,
44
- "start": start,
45
- "safe": safe,
46
- },
47
- timeout=timeout,
48
- verify=ssl_verify,
49
- )
50
- resp.raise_for_status()
51
- soup = BeautifulSoup(resp.text, "html.parser")
52
- result_block = soup.find_all("div", attrs={"class": "g"})
53
- if not result_block:
54
- start += 1
55
- continue
56
- for result in result_block:
57
- link = result.find("a", href=True)
58
- if link:
59
- link = link["href"]
60
- try:
61
- webpage = session.get(link, headers={"User-Agent": get_useragent()})
62
- webpage.raise_for_status()
63
- visible_text = extract_text_from_webpage(webpage.text)
64
- # Truncate text if it's too long
65
- if len(visible_text) > max_chars_per_page:
66
- visible_text = visible_text[:max_chars_per_page] + "..."
67
- all_results.append({"text": visible_text})
68
- except requests.exceptions.RequestException as e:
69
- print(f"Error fetching or processing {link}: {e}")
70
- all_results.append({"text": None})
71
- else:
72
- all_results.append({"text": None})
73
- start += len(result_block)
 
 
 
 
 
74
  return all_results
75
 
76
  # Speech Recognition Model Configuration
@@ -84,7 +88,7 @@ tokenizer = spm.SentencePieceProcessor(hf_hub_download(model_name, "tokenizer.sp
84
 
85
  # Mistral Model Configuration
86
  client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
87
- system_instructions1 = "<s>[SYSTEM] Answer as Real OpenGPT 4o, Made by 'KingNish', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses. The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
88
 
89
  def resample(audio_fp32, sr):
90
  return soxr.resample(audio_fp32, sr, sample_rate)
@@ -116,12 +120,12 @@ def model(text, web_search):
116
  if web_search is True:
117
  """Performs a web search, feeds the results to a language model, and returns the answer."""
118
  web_results = search(text)
119
- web2 = ' '.join([f"Text: {res['text']}\n\n" for res in web_results])
120
- formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[OpenGPT 4o]"
121
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
122
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
123
  else:
124
- formatted_prompt = system_instructions1 + text + "[OpenGPT 4o]"
125
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
126
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
127
 
@@ -132,4 +136,14 @@ async def respond(audio, web_search):
132
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
133
  tmp_path = tmp_file.name
134
  await communicate.save(tmp_path)
135
- return tmp_path
 
 
 
 
 
 
 
 
 
 
 
23
  visible_text = soup.get_text(strip=True)
24
  return visible_text
25
 
26
+ def search(term, num_results=1, lang="en", advanced=True, sleep_interval=0, timeout=5, safe="active", ssl_verify=None):
 
27
  """Performs a Google search and returns the results."""
28
  escaped_term = urllib.parse.quote_plus(term)
29
  start = 0
30
  all_results = []
31
+
32
+ # Fetch results in batches
33
+ while start < num_results:
34
+ resp = requests.get(
35
+ url="https://www.google.com/search",
36
+ headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 Edg/111.0.1661.62"}, # Set random user agent
37
+ params={
38
+ "q": term,
39
+ "num": num_results - start, # Number of results to fetch in this batch
40
+ "hl": lang,
41
+ "start": start,
42
+ "safe": safe,
43
+ },
44
+ timeout=timeout,
45
+ verify=ssl_verify,
46
+ )
47
+ resp.raise_for_status() # Raise an exception if request fails
48
+
49
+ soup = BeautifulSoup(resp.text, "html.parser")
50
+ result_block = soup.find_all("div", attrs={"class": "g"})
51
+
52
+ # If no results, continue to the next batch
53
+ if not result_block:
54
+ start += 1
55
+ continue
56
+
57
+ # Extract link and text from each result
58
+ for result in result_block:
59
+ link = result.find("a", href=True)
60
+ if link:
61
+ link = link["href"]
62
+ try:
63
+ # Fetch webpage content
64
+ webpage = requests.get(link, headers={"User-Agent": get_useragent()})
65
+ webpage.raise_for_status()
66
+ # Extract visible text from webpage
67
+ visible_text = extract_text_from_webpage(webpage.text)
68
+ all_results.append({"link": link, "text": visible_text})
69
+ except requests.exceptions.RequestException as e:
70
+ # Handle errors fetching or processing webpage
71
+ print(f"Error fetching or processing {link}: {e}")
72
+ all_results.append({"link": link, "text": None})
73
+ else:
74
+ all_results.append({"link": None, "text": None})
75
+
76
+ start += len(result_block) # Update starting index for next batch
77
+
78
  return all_results
79
 
80
  # Speech Recognition Model Configuration
 
88
 
89
  # Mistral Model Configuration
90
  client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
91
+ system_instructions1 = "<s>[SYSTEM] Answer as Real Jarvis JARVIS, Made by 'Tony Stark', Keep conversation very short, clear, friendly and concise. The text provided is a request for a specific type of response from you, the virtual assistant. The request asks you to provide friendly responses as if You are the character Jarvis, made by 'Tony Stark.' The expectation is that I will avoid introductions and start answering the query directly, elaborating on all aspects of the request. As an AI-powered assistant, your task is to generate responses that appear as if they are created by the user and not an AI-powered assistant. [USER]"
92
 
93
  def resample(audio_fp32, sr):
94
  return soxr.resample(audio_fp32, sr, sample_rate)
 
120
  if web_search is True:
121
  """Performs a web search, feeds the results to a language model, and returns the answer."""
122
  web_results = search(text)
123
+ web2 = ' '.join([f"Link: {res['link']}\nText: {res['text']}\n\n" for res in web_results])
124
+ formatted_prompt = system_instructions1 + text + "[WEB]" + str(web2) + "[ANSWER]"
125
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
126
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
127
  else:
128
+ formatted_prompt = system_instructions1 + text + "[JARVIS]"
129
  stream = client1.text_generation(formatted_prompt, max_new_tokens=512, stream=True, details=True, return_full_text=False)
130
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
131
 
 
136
  with tempfile.NamedTemporaryFile(delete=False, suffix=".wav") as tmp_file:
137
  tmp_path = tmp_file.name
138
  await communicate.save(tmp_path)
139
+ return tmp_path
140
+
141
+ with gr.Blocks() as demo:
142
+ with gr.Row():
143
+ web_search = gr.Checkbox(label="Web Search", value=False)
144
+ input = gr.Audio(label="Voice Chat (BETA)", sources="microphone", type="filepath", waveform_options=False)
145
+ output = gr.Audio(label="JARVIS", type="filepath", interactive=False, autoplay=True, elem_classes="audio")
146
+ gr.Interface(fn=respond, inputs=[input, web_search], outputs=[output], live=True)
147
+
148
+ if __name__ == "__main__":
149
+ demo.queue(max_size=200).launch()