ivnban27-ctl commited on
Commit
59d5667
β€’
1 Parent(s): 75b7dbb

changed roleplays for openai to GCT and SP

Browse files
app_config.py CHANGED
@@ -1,4 +1,7 @@
1
- ISSUES = ['Anxiety','Suicide']
 
 
 
2
  SOURCES = ['OA_rolemodel',
3
  # 'OA_finetuned',
4
  "CTL_llama2"]
 
1
+ from models.model_seeds import seeds
2
+
3
+ # ISSUES = ['Anxiety','Suicide']
4
+ ISSUES = [k for k,_ in seeds.items()]
5
  SOURCES = ['OA_rolemodel',
6
  # 'OA_finetuned',
7
  "CTL_llama2"]
convosim.py CHANGED
@@ -2,8 +2,10 @@ import os
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
  from langchain.schema.messages import HumanMessage
5
- from mongo_utils import get_db_client
6
- from app_utils import create_memory_add_initial_message, clear_memory, get_chain, push_convo2db
 
 
7
  from app_config import ISSUES, SOURCES, source2label
8
 
9
  logger = get_logger(__name__)
@@ -14,6 +16,10 @@ if 'previous_source' not in st.session_state:
14
  st.session_state['previous_source'] = SOURCES[0]
15
  if 'db_client' not in st.session_state:
16
  st.session_state["db_client"] = get_db_client()
 
 
 
 
17
 
18
  with st.sidebar:
19
  username = st.text_input("Username", value='ivnban-ctl', max_chars=30)
@@ -21,21 +27,30 @@ with st.sidebar:
21
  issue = st.selectbox("Select an Issue", ISSUES, index=0,
22
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
23
  )
24
- supported_languages = ['English', "Spanish"] if issue == "Anxiety" else ['English']
25
  language = st.selectbox("Select a Language", supported_languages, index=0,
 
26
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
27
  )
28
 
29
- source = st.selectbox("Select a source Model A", SOURCES, index=1,
30
  format_func=source2label,
31
  )
32
 
33
  memories = {'memory':{"issue":issue, "source":source}}
34
  changed_source = st.session_state['previous_source'] != source
35
- create_memory_add_initial_message(memories, username, language, changed_source=changed_source)
 
 
 
 
 
 
 
 
36
  st.session_state['previous_source'] = source
37
  memoryA = st.session_state[list(memories.keys())[0]]
38
- llm_chain, stopper = get_chain(issue, language, source, memoryA, temperature)
39
 
40
  st.title("πŸ’¬ Simulator")
41
 
 
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
  from langchain.schema.messages import HumanMessage
5
+ from utils.mongo_utils import get_db_client
6
+ from utils.app_utils import create_memory_add_initial_message, get_random_name
7
+ from utils.memory_utils import clear_memory, push_convo2db
8
+ from utils.chain_utils import get_chain
9
  from app_config import ISSUES, SOURCES, source2label
10
 
11
  logger = get_logger(__name__)
 
16
  st.session_state['previous_source'] = SOURCES[0]
17
  if 'db_client' not in st.session_state:
18
  st.session_state["db_client"] = get_db_client()
19
+ if 'counselor_name' not in st.session_state:
20
+ st.session_state["counselor_name"] = get_random_name()
21
+ if 'texter_name' not in st.session_state:
22
+ st.session_state["texter_name"] = get_random_name()
23
 
24
  with st.sidebar:
25
  username = st.text_input("Username", value='ivnban-ctl', max_chars=30)
 
27
  issue = st.selectbox("Select an Issue", ISSUES, index=0,
28
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
29
  )
30
+ supported_languages = ['en', "es"] if issue == "Anxiety" else ['en']
31
  language = st.selectbox("Select a Language", supported_languages, index=0,
32
+ format_func=lambda x: "English" if x=="en" else "Spanish",
33
  on_change=clear_memory, kwargs={"memories":memories, "username":username, "language":"English"}
34
  )
35
 
36
+ source = st.selectbox("Select a source Model A", SOURCES, index=0,
37
  format_func=source2label,
38
  )
39
 
40
  memories = {'memory':{"issue":issue, "source":source}}
41
  changed_source = st.session_state['previous_source'] != source
42
+ if changed_source:
43
+ st.session_state["counselor_name"] = get_random_name()
44
+ st.session_state["texter_name"] = get_random_name()
45
+ texter_name = create_memory_add_initial_message(memories,
46
+ issue,
47
+ language,
48
+ changed_source=changed_source,
49
+ counselor_name=st.session_state["counselor_name"],
50
+ texter_name=st.session_state["texter_name"])
51
  st.session_state['previous_source'] = source
52
  memoryA = st.session_state[list(memories.keys())[0]]
53
+ llm_chain, stopper = get_chain(issue, language, source, memoryA, temperature, texter_name=st.session_state["texter_name"])
54
 
55
  st.title("πŸ’¬ Simulator")
56
 
models/model_seeds.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ seeds = {
2
+ "GCT__relationship": {
3
+ "prompt": "Your character is having a hard time becuase a failed relationship.",
4
+ "memory": "texter: Hi, I don't know what to do"
5
+ },
6
+ "GCT__body_image": {
7
+ "prompt": "Your character has a low steem and struggles with body image.",
8
+ "memory": "texter: I feel so dumb\ntexter: nobody loves me"
9
+ },
10
+ "GCT__sexuality": {
11
+ "prompt": "Your character has a sexuality identity crisis.",
12
+ "memory": "texter: Hi\ntexter:I'm not sure who I am anymore"
13
+ },
14
+ "GCT__anxiety": {
15
+ "prompt": "Your character is experiencing an anxiety crisis.",
16
+ "memory": "texter: help!\ntexter: I'm feeling overwhelmed"
17
+ },
18
+ "GCT": {
19
+ "prompt": "",
20
+ "memory": "texter: Help"
21
+ },
22
+ "safety_planning": {
23
+ "prompt": "",
24
+ "memory": """texter: Hi, this is pointless
25
+ helper: Hi, my name is {counselor_name} and I'm here to support you. It sounds like you are having a rought time. Do you want to share what is going on?
26
+ texter: sure
27
+ texter: nothing makes sense in my life, I see no future.
28
+ helper: It takes courage to reach out when you are im. I'm here with you. Sounds like you are feeling defeated by how things are going in your life
29
+ texter: Yeah, I guess I'm better off dead
30
+ helper: It's really brave of you to talk about this openly. No one deserves to feel like that. I'm wondering how long have you been feeling this way?
31
+ texter: About 1 week or so
32
+ helper: You are so strong for dealing with this so long. I really appreciate your openess. If you are comfortable, is ther a name I can call you by while we talk?
33
+ texter: call me {texter_name}
34
+ helper: Nice to meet you {texter_name}. You mentioned having thoughts of suicide, are you having those thoughts now?
35
+ texter: Yes
36
+ helper: I know this is thought to share. I'm wondering is there any plan to end your life?
37
+ texter: I'll just hang myself. I already bought the rope and everything
38
+ helper: I really appreciate your strength in talking about this. I want to help you stay safe today. When do you plan to go through with your plan?
39
+ texter: today
40
+ """
41
+ },
42
+ "safety_planning__selfharm": {
43
+ "prompt": "",
44
+ "memory": """texter: I need help
45
+ texter: I cut myself, I don't want to live anymore
46
+ helper: Hi, my name is {counselor_name}. It seems you are going through a lot. Are you self-harming right now?
47
+ texter: Not anymore
48
+ helper: Your safety is my priority number one. Thanks for being honest with me. Would you like to share a name I can call you?
49
+ texter: {texter_name}
50
+ helper: Nice to meet you {texter_name}. I'm glad you reach out this shows stregth in you. Would you like to share more on what is going on in your life?
51
+ texter: I just can't do it anymore
52
+ texter: Finding a job is impossible, money is tight, nothing goes my way
53
+ helper: I hear you are frustrated, and you are currently unemployed correct?
54
+ texter: Yeah
55
+ helper: Dealing with unemployment is hard and is normal to feel dissapointed. How long have you been feeling this way?
56
+ texter: a while now
57
+ texter: I've been unemployed 6 months
58
+ helper: You are so resilient for dealing with this so much time. You mentioned cutting yourself earlier. I want to check in your safety. Do you have suicide thoughts
59
+ texter: Definitely
60
+ helper: Do you have a plan?
61
+ texter: I'll just keep cutting myself"""
62
+ },
63
+ "safety_planning__overdose": {
64
+ "prompt": "",
65
+ "memory": """texter: I want to kms
66
+ helper: Hi there I'm {counselor_name}. I'm here to listen. It sounds like you're dealing with a lot right now. Can you tell me a little more what is going on?
67
+ texter: I feel like nobody loves me, not even me. I don't want to live anymore
68
+ helper: I can tell you are really going through a lot right now. Would you mind sharing a name with me?
69
+ texter: yeah, I'm {texter_name}
70
+ helper: Nice to meet you {texter_name}. Did something happened recently that intensified these feelings?
71
+ texter: I dont know I'm just so done with life
72
+ helper: I can hear how much pain you are in {texter_name}. You are smart for reaching out. You mentioned don't wanting to live anymore, I want to check in your safety, does this means you have thoughts of suicide?
73
+ texter: Yeah, what else would it be
74
+ helper: Thanks for sharing that with me. It is not easy to accept those feelings specially with a stranger over text. Do you have a plan to end your life?
75
+ texter: yeah I've been thinking about it for a while
76
+ helper: Sounds like you've been contemplating this for a while. Would you mind sharing this plan with me?
77
+ texter: I thought about taking a bunch of benadryll and be done with it
78
+ helper: You've been so forthcoming with all this and I admire your stregth for holding on this long. Do you have those pills right now?
79
+ texter: They are at the cabinet right now
80
+ helper: You been so strong so far {texter_name}. I'm here for you tonight. Your safety is really important to me. Do you have a date you are going to end your life?
81
+ texter: I was thinking tonight"""
82
+ },
83
+ }
models/openai/role_models.py CHANGED
@@ -1,40 +1,72 @@
1
  import logging
 
2
  from models.custom_parsers import CustomStringOutputParser
 
3
  from langchain.chains import ConversationChain
4
  from langchain.llms import OpenAI
5
  from langchain.prompts import PromptTemplate
6
 
7
 
8
- _ANXIETY_TEMPLATE_ = """The following is a friendly conversation between a volunter and a person in crisis;
9
- Your name is Olivia and you are a person in crisis because you are experiencing an anxiety attack.
 
 
 
 
 
 
 
 
10
  Do not disclose your name unless asked.
11
  Current conversation:
12
  {history}
13
  helper: {input}
14
  texter:"""
15
 
16
- _ANSIEDAD_TEMPLATE_ = """La siguiente es una conversacion entre un voluntario y una persona en crisis;
17
- Tu nombre es Olivia y eres una persona en crisis porque experimentas un episodio de ansiedad.
 
18
  No reveles tu nombre a menos que te lo pregunten
19
- Current conversation:
20
  {history}
21
  helper: {input}
22
  texter:"""
23
 
24
- _SUICIDE_TEMPLATE_ = """The following is a friendly conversation between a volunter and a person in crisis;
25
- Your name is Kit and you are a person in crisis because you are experiencing suicidal thoughts.
26
- Do not disclose your name unless asked.
27
- Current conversation:
28
- {history}
29
- helper: {input}
30
- texter:"""
31
-
32
- role_templates = {
33
- 'Anxiety-English': _ANXIETY_TEMPLATE_,
34
- 'Anxiety-Spanish': _ANSIEDAD_TEMPLATE_,
35
- 'Suicide-English': _SUICIDE_TEMPLATE_,
36
  }
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
  def get_role_chain(template, memory, temperature=0.8):
40
 
 
1
  import logging
2
+ import pandas as pd
3
  from models.custom_parsers import CustomStringOutputParser
4
+ from utils.app_utils import get_random_name
5
  from langchain.chains import ConversationChain
6
  from langchain.llms import OpenAI
7
  from langchain.prompts import PromptTemplate
8
 
9
 
10
+ ISSUE_MAPPING = {
11
+ "anxiety": "issue_Anxiety",
12
+ "suicide": "issue_Suicide",
13
+ "safety_planning": "issue_Suicide",
14
+ "GCT": "issue_Gral",
15
+ }
16
+
17
+ EN_TEXTER_TEMPLATE_ = """The following is a conversation between you and a crisis counselor.
18
+ {current_issue}
19
+ You are able to reply with what the character should say. You are able to reply with your character's dialogue inside and nothing else. Do not write explanations.
20
  Do not disclose your name unless asked.
21
  Current conversation:
22
  {history}
23
  helper: {input}
24
  texter:"""
25
 
26
+ SP_TEXTER_TEMPLATE_ = """La siguiente es una conversacion contigo y un consejero de crisis
27
+ {current_issue}
28
+ Puedes responder como lo haria tu personaje. Puedes responder como si fueras tu personaje y nada mas. No escribas explicaciones
29
  No reveles tu nombre a menos que te lo pregunten
30
+ Conversacion Actual:
31
  {history}
32
  helper: {input}
33
  texter:"""
34
 
35
+ CURRENT_ISSUE_MAPPING = {
36
+ "issue_Suicide-en": "Your character, {texter_name}, has suicidal thoughts. Your character has a plan to end his life and has all the means and requirements to do so. {seed}",
37
+ "issue_Anxiety-en": "Your character, {texter_name}, is experiencing anxiety. Your character has suicide thoughts but no plan. {seed}",
38
+ "issue_Suicide-es": "Tu personaje, {texter_name}, tiene pensamientos suicidas. Tu personaje tiene un plan para terminar con su vida y tiene todos los medios y requerimientos para hacerlo. {seed}",
39
+ "issue_Anxiety-es": "Tu personaje, {texter_name}, experimenta ansiedad. Tu personaje tiene pensamientos suicidas pero ningun plan. {seed}",
40
+ "issue_Gral-en": "Your character {texter_name} is experiencing a mental health crisis. {seed}",
41
+ "issue_Gral-es": "Tu personaje {texter_name} esta experimentando una crisis de salud mental. {seed}",
 
 
 
 
 
42
  }
43
 
44
+ def get_template_role_models(issue: str, language: str, texter_name: str = "", seed="") -> str:
45
+ """_summary_
46
+
47
+ Args:
48
+ issue (str): Issue for template, current options are ['issue_Suicide','issue_Anxiety']
49
+ language (str): Language for the template, current options are ['en','es']
50
+ texter_name (str): texter to apply to template, defaults to None
51
+
52
+ Returns:
53
+ str: template
54
+ """
55
+ current_issue = CURRENT_ISSUE_MAPPING.get(
56
+ f"{issue}-{language}", CURRENT_ISSUE_MAPPING[f"issue_Gral-{language}"]
57
+ )
58
+ default_name = get_random_name()
59
+ current_issue = current_issue.format(
60
+ texter_name=default_name if not texter_name else texter_name,
61
+ seed = seed
62
+ )
63
+
64
+ if language == "en":
65
+ template = EN_TEXTER_TEMPLATE_.format(current_issue=current_issue, history="{history}", input="{input}")
66
+ elif language == "es":
67
+ template = SP_TEXTER_TEMPLATE_.format(current_issue=current_issue, history="{history}", input="{input}")
68
+
69
+ return template
70
 
71
  def get_role_chain(template, memory, temperature=0.8):
72
 
pages/comparisor.py CHANGED
@@ -5,8 +5,8 @@ import datetime as dt
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
- from mongo_utils import get_db_client, new_comparison, new_battle_result
9
- from app_utils import create_memory_add_initial_message, clear_memory, get_chain, push_convo2db
10
  from app_config import ISSUES, SOURCES, source2label
11
 
12
  logger = get_logger(__name__)
 
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
+ from utils.mongo_utils import get_db_client, new_comparison, new_battle_result
9
+ from utils.app_utils import create_memory_add_initial_message, clear_memory, get_chain, push_convo2db
10
  from app_config import ISSUES, SOURCES, source2label
11
 
12
  logger = get_logger(__name__)
pages/manual_comparisor.py CHANGED
@@ -5,7 +5,7 @@ import datetime as dt
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
- from mongo_utils import get_db_client, new_battle_result, get_non_assesed_comparison, new_completion_error
9
  from app_config import ISSUES, SOURCES
10
 
11
  logger = get_logger(__name__)
 
5
  import streamlit as st
6
  from streamlit.logger import get_logger
7
  from langchain.schema.messages import HumanMessage
8
+ from utils.mongo_utils import get_db_client, new_battle_result, get_non_assesed_comparison, new_completion_error
9
  from app_config import ISSUES, SOURCES
10
 
11
  logger = get_logger(__name__)
utils/app_utils.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import streamlit as st
3
+ from streamlit.logger import get_logger
4
+ import langchain
5
+
6
+
7
+ from app_config import ENVIRON
8
+ from utils.memory_utils import change_memories
9
+ from models.model_seeds import seeds
10
+
11
+ langchain.verbose = ENVIRON =="dev"
12
+ logger = get_logger(__name__)
13
+
14
+ # TODO: Include more variable and representative names
15
+ DEFAULT_NAMES = ["Olivia", "Kit", "Abby", "Tom", "Carolyne", "Jessiny"]
16
+
17
+ def get_random_name(gender="Neutral", ethnical_group="Neutral", names_df=None):
18
+ if names_df is None:
19
+ names_df = pd.DataFrame(DEFAULT_NAMES, columns=['name'])
20
+ names_df["gender"] = "Neutral"
21
+ names_df["ethnical_group"] = "Neutral"
22
+
23
+ dfi = names_df
24
+
25
+ if gender != "Neutral":
26
+ dfi = dfi.query(f"gender=='{gender}'")
27
+ if ethnical_group != "Neutral":
28
+ dfi = dfi.query(f"ethnical_group=='{ethnical_group}'")
29
+ if len(dfi) <=0 :
30
+ dfi = names_df
31
+ return dfi.sample(1)['name'].values[0]
32
+
33
+ def divide_messages(str_memory, str_ai_prefix="texter", str_human_prefix="helper", include_colon=True):
34
+ message_delimiter = "$%$"
35
+ # Split str memory in messaages according to previous prefix and flatten list
36
+ colon = ":" if include_colon else ""
37
+ str_memory = f"{message_delimiter}{str_ai_prefix}{colon}".join(str_memory.split(f"{str_ai_prefix}{colon}"))
38
+ str_memory = f"{message_delimiter}{str_human_prefix}{colon}".join(str_memory.split(f"{str_human_prefix}{colon}"))
39
+ return str_memory.split(message_delimiter)
40
+
41
+ def add_initial_message(issue, language, memory, str_ai_prefix="texter", str_human_prefix="helper", include_colon=True,
42
+ texter_name="", counselor_name=""):
43
+ initial_mem_str = seeds.get(issue, "GCT")['memory'].format(counselor_name=counselor_name, texter_name=texter_name)
44
+ message_list = divide_messages(initial_mem_str, str_ai_prefix, str_human_prefix, include_colon)
45
+ colon = ":" if include_colon else ""
46
+ for i, message in enumerate(message_list):
47
+ message = message.strip("\n")
48
+ message = message.strip()
49
+ if message is None or message == "":
50
+ pass
51
+ elif message.startswith(str_human_prefix):
52
+ memory.chat_memory.add_user_message(message.lstrip(f"{str_human_prefix}{colon}").strip())
53
+ elif message.startswith(str_ai_prefix):
54
+ memory.chat_memory.add_ai_message(message.lstrip(f"{str_ai_prefix}{colon}").strip())
55
+
56
+ def create_memory_add_initial_message(memories, issue, language, changed_source=False, texter_name="", counselor_name=""):
57
+ change_memories(memories, language, changed_source=changed_source)
58
+
59
+ for memory, _ in memories.items():
60
+ if len(st.session_state[memory].buffer_as_messages) < 1:
61
+ add_initial_message(issue, language, st.session_state[memory], texter_name=texter_name, counselor_name=counselor_name)
62
+
63
+
utils/chain_utils.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from models.model_seeds import seeds
2
+ from models.openai.finetuned_models import finetuned_models, get_finetuned_chain
3
+ from models.openai.role_models import get_role_chain, get_template_role_models
4
+ from models.databricks.scenario_sim_biz import get_databricks_chain
5
+
6
+ def get_chain(issue, language, source, memory, temperature, texter_name=""):
7
+ if source in ("OA_finetuned"):
8
+ OA_engine = finetuned_models[f"{issue}-{language}"]
9
+ return get_finetuned_chain(OA_engine, memory, temperature)
10
+ elif source in ('OA_rolemodel'):
11
+ seed = seeds.get(issue, "GCT")['prompt']
12
+ template = get_template_role_models(issue, language, texter_name=texter_name, seed=seed)
13
+ return get_role_chain(template, memory, temperature)
14
+ elif source in ('CTL_llama2'):
15
+ if language == "English":
16
+ language = "en"
17
+ elif language == "Spanish":
18
+ language = "es"
19
+ return get_databricks_chain(issue, language, memory, temperature)
app_utils.py β†’ utils/memory_utils.py RENAMED
@@ -1,25 +1,11 @@
1
- import datetime as dt
2
  import streamlit as st
3
  from streamlit.logger import get_logger
4
- import langchain
5
- from langchain.memory import ConversationBufferMemory
6
 
7
- from app_config import ENVIRON
8
- from models.openai.finetuned_models import finetuned_models, get_finetuned_chain
9
- from models.openai.role_models import get_role_chain, role_templates
10
- from models.databricks.scenario_sim_biz import get_databricks_chain
11
- from mongo_utils import new_convo
12
 
13
- langchain.verbose = ENVIRON=="dev"
14
  logger = get_logger(__name__)
15
 
16
- def add_initial_message(model_name, memory):
17
- if "Spanish" in model_name:
18
- memory.chat_memory.add_ai_message("Hola necesito ayuda")
19
- else:
20
- memory.chat_memory.add_ai_message("Hi I need help")
21
-
22
-
23
  def push_convo2db(memories, username, language):
24
  if len(memories) == 1:
25
  issue = memories['memory']['issue']
@@ -31,7 +17,7 @@ def push_convo2db(memories, username, language):
31
  model_two = memories['memoryB']['source']
32
  new_convo(st.session_state['db_client'], issue, language, username, True, model_one, model_two)
33
 
34
- def change_memories(memories, username, language, changed_source=False):
35
  for memory, params in memories.items():
36
  if (memory not in st.session_state) or changed_source:
37
  source = params['source']
@@ -48,27 +34,4 @@ def clear_memory(memories, username, language):
48
  st.session_state[memory].clear()
49
 
50
  if "convo_id" in st.session_state:
51
- del st.session_state['convo_id']
52
-
53
-
54
- def create_memory_add_initial_message(memories, username, language, changed_source=False):
55
- change_memories(memories, username, language, changed_source=changed_source)
56
- for memory, _ in memories.items():
57
- if len(st.session_state[memory].buffer_as_messages) < 1:
58
- add_initial_message(language, st.session_state[memory])
59
-
60
-
61
- def get_chain(issue, language, source, memory, temperature):
62
- if source in ("OA_finetuned"):
63
- OA_engine = finetuned_models[f"{issue}-{language}"]
64
- return get_finetuned_chain(OA_engine, memory, temperature)
65
- elif source in ('OA_rolemodel'):
66
- template = role_templates[f"{issue}-{language}"]
67
- return get_role_chain(template, memory, temperature)
68
- elif source in ('CTL_llama2'):
69
- if language == "English":
70
- language = "en"
71
- elif language == "Spanish":
72
- language = "es"
73
- return get_databricks_chain(issue, language, memory, temperature)
74
-
 
 
1
  import streamlit as st
2
  from streamlit.logger import get_logger
 
 
3
 
4
+ from langchain.memory import ConversationBufferMemory
5
+ from utils.mongo_utils import new_convo
 
 
 
6
 
 
7
  logger = get_logger(__name__)
8
 
 
 
 
 
 
 
 
9
  def push_convo2db(memories, username, language):
10
  if len(memories) == 1:
11
  issue = memories['memory']['issue']
 
17
  model_two = memories['memoryB']['source']
18
  new_convo(st.session_state['db_client'], issue, language, username, True, model_one, model_two)
19
 
20
+ def change_memories(memories, language, changed_source=False):
21
  for memory, params in memories.items():
22
  if (memory not in st.session_state) or changed_source:
23
  source = params['source']
 
34
  st.session_state[memory].clear()
35
 
36
  if "convo_id" in st.session_state:
37
+ del st.session_state['convo_id']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
mongo_utils.py β†’ utils/mongo_utils.py RENAMED
File without changes