awacke1 commited on
Commit
3a8955f
1 Parent(s): 50170f4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +46 -46
app.py CHANGED
@@ -130,6 +130,49 @@ def reset_conversation():
130
  st.session_state.conversation = []
131
  st.session_state.messages = []
132
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133
  # Function to process text with selected model
134
  def process_text(user_name, text_input, selected_model, temp_values):
135
  timestamp = datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')
@@ -283,7 +326,7 @@ def main_column(column_name):
283
  process_video(st.session_state.current_user['name'], video_input, text_input)
284
 
285
  # Main Streamlit app
286
- st.title("Multiuser Chat with Llama 3.1 and GPT-4o")
287
 
288
  # Sidebar
289
  with st.sidebar:
@@ -319,50 +362,7 @@ with col1:
319
  with col2:
320
  main_column("Column 2")
321
 
322
- # Function to generate filenames
323
- def generate_filename(prompt, file_type):
324
- central = pytz.timezone('US/Central')
325
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
326
- replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
327
- safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
328
- return f"{safe_date_time}_{safe_prompt}.{file_type}"
329
-
330
- # Function to create files
331
- def create_file(filename, prompt, response, user_name, timestamp):
332
- with open(filename, "w", encoding="utf-8") as f:
333
- f.write(f"User: {user_name}\nTimestamp: {timestamp}\n\nPrompt:\n{prompt}\n\nResponse:\n{response}")
334
-
335
- # Function to extract video frames
336
- def extract_video_frames(video_path, seconds_per_frame=2):
337
- base64Frames = []
338
- video = cv2.VideoCapture(video_path)
339
- total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
340
- fps = video.get(cv2.CAP_PROP_FPS)
341
- frames_to_skip = int(fps * seconds_per_frame)
342
- curr_frame = 0
343
- while curr_frame < total_frames - 1:
344
- video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
345
- success, frame = video.read()
346
- if not success:
347
- break
348
- _, buffer = cv2.imencode(".jpg", frame)
349
- base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
350
- curr_frame += frames_to_skip
351
- video.release()
352
- return base64Frames, None
353
-
354
- # Function to process audio for video
355
- def process_audio_for_video(video_input):
356
- try:
357
- transcription = client.audio.transcriptions.create(
358
- model="whisper-1",
359
- file=video_input,
360
- )
361
- return transcription.text
362
- except:
363
- return ''
364
-
365
  # Run the Streamlit app
366
  if __name__ == "__main__":
367
- st.markdown("*Generated content may be inaccurate or false.*")
368
- st.markdown("\n...")
 
130
  st.session_state.conversation = []
131
  st.session_state.messages = []
132
 
133
+ # Function to generate filenames
134
+ def generate_filename(prompt, file_type):
135
+ central = pytz.timezone('US/Central')
136
+ safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
137
+ replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
138
+ safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
139
+ return f"{safe_date_time}_{safe_prompt}.{file_type}"
140
+
141
+ # Function to create files
142
+ def create_file(filename, prompt, response, user_name, timestamp):
143
+ with open(filename, "w", encoding="utf-8") as f:
144
+ f.write(f"User: {user_name}\nTimestamp: {timestamp}\n\nPrompt:\n{prompt}\n\nResponse:\n{response}")
145
+
146
+ # Function to extract video frames
147
+ def extract_video_frames(video_path, seconds_per_frame=2):
148
+ base64Frames = []
149
+ video = cv2.VideoCapture(video_path)
150
+ total_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
151
+ fps = video.get(cv2.CAP_PROP_FPS)
152
+ frames_to_skip = int(fps * seconds_per_frame)
153
+ curr_frame = 0
154
+ while curr_frame < total_frames - 1:
155
+ video.set(cv2.CAP_PROP_POS_FRAMES, curr_frame)
156
+ success, frame = video.read()
157
+ if not success:
158
+ break
159
+ _, buffer = cv2.imencode(".jpg", frame)
160
+ base64Frames.append(base64.b64encode(buffer).decode("utf-8"))
161
+ curr_frame += frames_to_skip
162
+ video.release()
163
+ return base64Frames, None
164
+
165
+ # Function to process audio for video
166
+ def process_audio_for_video(video_input):
167
+ try:
168
+ transcription = client.audio.transcriptions.create(
169
+ model="whisper-1",
170
+ file=video_input,
171
+ )
172
+ return transcription.text
173
+ except:
174
+ return ''
175
+
176
  # Function to process text with selected model
177
  def process_text(user_name, text_input, selected_model, temp_values):
178
  timestamp = datetime.now(pytz.utc).strftime('%Y-%m-%d %H:%M:%S %Z')
 
326
  process_video(st.session_state.current_user['name'], video_input, text_input)
327
 
328
  # Main Streamlit app
329
+ st.title("Personalized Real-Time Chat")
330
 
331
  # Sidebar
332
  with st.sidebar:
 
362
  with col2:
363
  main_column("Column 2")
364
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
365
  # Run the Streamlit app
366
  if __name__ == "__main__":
367
+ st.markdown("*by Aaron Wacker*")
368
+ st.markdown("\n[Aaron Wacker](https://huggingface.co/spaces/awacke1/).")