recallritual / app.py
drengskapur
init
2b064f1
import gradio as gr
import pathlib
import json
import hashlib
import random
import os
def load_content(file_path):
if not file_path.exists():
raise FileNotFoundError(f"Content file not found: {file_path}")
with open(file_path, 'r') as file:
content = [json.loads(line) for line in file if line.strip()]
for item in content:
item["id"] = hashlib.md5((item["question"] + item["answer"]).encode()).hexdigest()
item["views"] = item.get("views", 0)
return content
def select_next_question(content, seen_questions):
unseen_questions = [item for item in content if item["id"] not in seen_questions]
if unseen_questions:
return random.choice(unseen_questions)
else:
min_views = min(item["views"] for item in content)
least_viewed = [item for item in content if item["views"] == min_views]
return random.choice(least_viewed)
class InterfaceCreator:
def __init__(self, content, audio_dir):
self.content = content
self.seen_questions = set()
self.audio_dir = audio_dir
def get_audio_path(self, item_id, audio_type):
return os.path.join(self.audio_dir, f"{item_id}_{audio_type}.mp3")
def update_interface(self, current_item=None):
if current_item is None or current_item.get('state') == 'answer':
new_item = select_next_question(self.content, self.seen_questions)
new_item['state'] = 'question'
new_item['views'] += 1
self.seen_questions.add(new_item["id"])
question_audio = self.get_audio_path(new_item['id'], 'question')
if not os.path.exists(question_audio):
print(f"Warning: Audio file not found: {question_audio}")
question_audio = None
else:
print(f"Audio file found: {question_audio}")
return ("Show Answer ⏎", new_item['question'], "", question_audio, new_item)
elif current_item.get('state') == 'question':
current_item['state'] = 'answer'
answer_audio = self.get_audio_path(current_item['id'], 'answer')
if not os.path.exists(answer_audio):
print(f"Warning: Audio file not found: {answer_audio}")
answer_audio = None
else:
print(f"Audio file found: {answer_audio}")
return ("Next Question ⏎", current_item['question'], current_item['answer'], answer_audio, current_item)
def create_interface(self):
custom_css = """
.center-text { text-align: center; }
#question-text, #answer-text { border: none; background: transparent; font-size: 20px; }
#question-text textarea, #answer-text textarea { border: none; background: transparent; font-size: 20px; resize: none; overflow: hidden; min-height: 100px; max-height: 400px; }
#audio-output { display: none !important; }
#audio-output audio { display: none !important; }
footer { display: none !important; }
"""
shortcut_js = """
<script>
function autoResize(textarea) {
textarea.style.height = 'auto';
textarea.style.height = (textarea.scrollHeight) + 'px';
}
document.addEventListener('keydown', (e) => {
if (e.key === "Enter" && e.target.tagName.toLowerCase() !== "textarea") {
document.getElementById("primary-button").click();
}
});
document.addEventListener('DOMContentLoaded', () => {
const audioElement = document.querySelector('#audio-output audio');
let audioQueue = [];
function playNextAudio() {
if (audioQueue.length > 0) {
const nextAudio = audioQueue.shift();
console.log("Attempting to play audio:", nextAudio);
if (nextAudio) {
audioElement.src = typeof nextAudio === 'string' ? nextAudio : URL.createObjectURL(new Blob([nextAudio], {type: 'audio/mpeg'}));
audioElement.play().catch(e => console.error("Error playing audio:", e));
} else {
console.log("No audio to play");
}
}
}
audioElement.addEventListener('ended', playNextAudio);
function setupGradioConfig() {
if (window.gradio_config) {
window.gradio_config.custom_interfaces = window.gradio_config.custom_interfaces || {};
window.gradio_config.custom_interfaces["audio"] = (data) => { audioQueue = data; playNextAudio(); };
}
}
setupGradioConfig();
if (window.gradio_config) { window.gradio_config.artifialEventLoop = setupGradioConfig; }
document.querySelectorAll('#question-text textarea, #answer-text textarea').forEach(textarea => {
textarea.addEventListener('input', () => autoResize(textarea));
autoResize(textarea);
});
// Use MutationObserver for dynamically added textareas
new MutationObserver((mutations) => {
mutations.forEach((mutation) => {
if (mutation.type === 'childList') {
mutation.addedNodes.forEach((node) => {
if (node.nodeType === Node.ELEMENT_NODE && node.tagName === 'TEXTAREA') {
autoResize(node);
node.addEventListener('input', () => autoResize(node));
}
});
}
});
}).observe(document.body, { childList: true, subtree: true });
});
</script>
"""
with gr.Blocks(css=custom_css, head=shortcut_js) as demo:
current_item = gr.State(None)
primary_button = gr.Button("Show Answer ⏎", variant="primary", elem_id="primary-button")
question_text = gr.Textbox(elem_id="question-text", show_label=False, label="Question", lines=5, max_lines=20, interactive=False)
answer_text = gr.Textbox(elem_id="answer-text", show_label=False, label="Answer", lines=5, max_lines=20, interactive=False)
audio_output = gr.Audio(elem_id="audio-output", visible=True, autoplay=True)
primary_button.click(self.update_interface, inputs=[current_item], outputs=[primary_button, question_text, answer_text, audio_output, current_item])
demo.load(self.update_interface, outputs=[primary_button, question_text, answer_text, audio_output, current_item])
return demo
if __name__ == "__main__":
BASE_DIR = pathlib.Path(os.getcwd())
CONTENT_FILE = BASE_DIR / "content.jsonl"
AUDIO_DIR = BASE_DIR / "audio"
content = load_content(CONTENT_FILE)
interface_creator = InterfaceCreator(content, AUDIO_DIR)
demo = interface_creator.create_interface()
demo.launch()