test2 / modules /semantic_analysis.py
AIdeaText's picture
Update modules/semantic_analysis.py
18525b6 verified
raw
history blame
No virus
7.1 kB
#semantic_analysis.py
import streamlit as st
import spacy
import networkx as nx
import matplotlib.pyplot as plt
from collections import Counter
# Remove the global nlp model loading
# Define colors for grammatical categories
POS_COLORS = {
'ADJ': '#FFA07A', # Light Salmon
'ADP': '#98FB98', # Pale Green
'ADV': '#87CEFA', # Light Sky Blue
'AUX': '#DDA0DD', # Plum
'CCONJ': '#F0E68C', # Khaki
'DET': '#FFB6C1', # Light Pink
'INTJ': '#FF6347', # Tomato
'NOUN': '#90EE90', # Light Green
'NUM': '#FAFAD2', # Light Goldenrod Yellow
'PART': '#D3D3D3', # Light Gray
'PRON': '#FFA500', # Orange
'PROPN': '#20B2AA', # Light Sea Green
'SCONJ': '#DEB887', # Burlywood
'SYM': '#7B68EE', # Medium Slate Blue
'VERB': '#FF69B4', # Hot Pink
'X': '#A9A9A9', # Dark Gray
}
POS_TRANSLATIONS = {
'es': {
'ADJ': 'Adjetivo',
'ADP': 'Adposición',
'ADV': 'Adverbio',
'AUX': 'Auxiliar',
'CCONJ': 'Conjunción Coordinante',
'DET': 'Determinante',
'INTJ': 'Interjección',
'NOUN': 'Sustantivo',
'NUM': 'Número',
'PART': 'Partícula',
'PRON': 'Pronombre',
'PROPN': 'Nombre Propio',
'SCONJ': 'Conjunción Subordinante',
'SYM': 'Símbolo',
'VERB': 'Verbo',
'X': 'Otro',
},
'en': {
'ADJ': 'Adjective',
'ADP': 'Adposition',
'ADV': 'Adverb',
'AUX': 'Auxiliary',
'CCONJ': 'Coordinating Conjunction',
'DET': 'Determiner',
'INTJ': 'Interjection',
'NOUN': 'Noun',
'NUM': 'Number',
'PART': 'Particle',
'PRON': 'Pronoun',
'PROPN': 'Proper Noun',
'SCONJ': 'Subordinating Conjunction',
'SYM': 'Symbol',
'VERB': 'Verb',
'X': 'Other',
},
'fr': {
'ADJ': 'Adjectif',
'ADP': 'Adposition',
'ADV': 'Adverbe',
'AUX': 'Auxiliaire',
'CCONJ': 'Conjonction de Coordination',
'DET': 'Déterminant',
'INTJ': 'Interjection',
'NOUN': 'Nom',
'NUM': 'Nombre',
'PART': 'Particule',
'PRON': 'Pronom',
'PROPN': 'Nom Propre',
'SCONJ': 'Conjonction de Subordination',
'SYM': 'Symbole',
'VERB': 'Verbe',
'X': 'Autre',
}
}
########################################################################################################################################
def count_pos(doc):
return Counter(token.pos_ for token in doc if token.pos_ != 'PUNCT')
def extract_entities(doc):
entities = {
"Personas": [],
"Conceptos": [],
"Lugares": [],
"Fechas": []
}
for ent in doc.ents:
if ent.label_ == "PER":
entities["Personas"].append(ent.text)
elif ent.label_ in ["LOC", "GPE"]:
entities["Lugares"].append(ent.text)
elif ent.label_ == "DATE":
entities["Fechas"].append(ent.text)
else:
entities["Conceptos"].append(ent.text)
return entities
def visualize_context_graph(doc, lang):
G = nx.Graph()
entities = extract_entities(doc)
# Add nodes
for category, items in entities.items():
for item in items:
G.add_node(item, category=category)
# Add edges
for sent in doc.sents:
sent_entities = [ent for ent in sent.ents if ent.text in G.nodes()]
person = next((ent for ent in sent_entities if ent.label_ == "PER"), None)
if person:
for ent in sent_entities:
if ent != person:
G.add_edge(person.text, ent.text)
# Visualize
plt.figure(figsize=(20, 15))
pos = nx.spring_layout(G, k=0.5, iterations=50)
color_map = {"Personas": "lightblue", "Conceptos": "lightgreen", "Lugares": "lightcoral", "Fechas": "lightyellow"}
node_colors = [color_map[G.nodes[node]['category']] for node in G.nodes()]
nx.draw(G, pos, node_color=node_colors, with_labels=True, node_size=3000, font_size=8, font_weight='bold')
# Add a legend
legend_elements = [plt.Rectangle((0,0),1,1,fc=color, edgecolor='none') for color in color_map.values()]
plt.legend(legend_elements, color_map.keys(), loc='upper left', bbox_to_anchor=(1, 1))
plt.title("Análisis del Contexto" if lang == 'es' else "Context Analysis" if lang == 'en' else "Analyse du Contexte", fontsize=20)
plt.axis('off')
return plt
def create_semantic_graph(doc, lang):
G = nx.Graph()
pos_counts = count_pos(doc)
for token in doc:
if token.pos_ != 'PUNCT':
G.add_node(token.text,
pos=token.pos_,
color=POS_COLORS.get(token.pos_, '#CCCCCC'), # Color gris por defecto
size=pos_counts.get(token.pos_, 1) * 100) # Tamaño mínimo si no hay conteo
for token in doc:
if token.dep_ != "ROOT" and token.head.text in G.nodes and token.text in G.nodes:
G.add_edge(token.head.text, token.text, label=token.dep_)
return G, pos_counts
def visualize_semantic_relations(doc, lang):
G = nx.Graph()
word_freq = Counter(token.text.lower() for token in doc if token.pos_ not in ['PUNCT', 'SPACE'])
top_words = [word for word, _ in word_freq.most_common(20)] # Top 20 most frequent words
for token in doc:
if token.text.lower() in top_words:
G.add_node(token.text, pos=token.pos_)
for token in doc:
if token.text.lower() in top_words and token.head.text.lower() in top_words:
G.add_edge(token.text, token.head.text, label=token.dep_)
plt.figure(figsize=(24, 18))
pos = nx.spring_layout(G, k=0.9, iterations=50)
node_colors = [POS_COLORS.get(G.nodes[node]['pos'], '#CCCCCC') for node in G.nodes()]
nx.draw(G, pos, node_color=node_colors, with_labels=True,
font_size=10, font_weight='bold', arrows=True, arrowsize=20, width=2, edge_color='gray')
edge_labels = nx.get_edge_attributes(G, 'label')
nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels, font_size=8)
plt.title("Relaciones Semánticas Relevantes" if lang == 'es' else "Relevant Semantic Relations" if lang == 'en' else "Relations Sémantiques Pertinentes",
fontsize=20, fontweight='bold')
plt.axis('off')
legend_elements = [plt.Rectangle((0,0),1,1, facecolor=POS_COLORS.get(pos, '#CCCCCC'), edgecolor='none',
label=f"{POS_TRANSLATIONS[lang].get(pos, pos)}")
for pos in set(nx.get_node_attributes(G, 'pos').values())]
plt.legend(handles=legend_elements, loc='center left', bbox_to_anchor=(1, 0.5), fontsize=12)
return plt
def perform_semantic_analysis(text, nlp, lang):
doc = nlp(text)
context_graph = visualize_context_graph(doc, lang)
relations_graph = visualize_semantic_relations(doc, lang)
# Extraer entidades para mostrar en forma de lista
entities = extract_entities(doc)
return context_graph, relations_graph, entities