KeenWoo commited on
Commit
10a67b7
·
verified ·
1 Parent(s): d7c3452

Upload 2 files

Browse files
Files changed (2) hide show
  1. alz_companion/agent.py +338 -0
  2. alz_companion/prompts.py +231 -0
alz_companion/agent.py ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from __future__ import annotations
3
+ import os
4
+ import json
5
+ import base64
6
+ import time
7
+ import tempfile
8
+ import re
9
+
10
+ from typing import List, Dict, Any, Optional
11
+
12
+ try:
13
+ from openai import OpenAI
14
+ except Exception:
15
+ OpenAI = None
16
+
17
+ from langchain.schema import Document
18
+ from langchain_community.vectorstores import FAISS
19
+ from langchain_community.embeddings import HuggingFaceEmbeddings
20
+
21
+ try:
22
+ from gtts import gTTS
23
+ except Exception:
24
+ gTTS = None
25
+
26
+ from .prompts import (
27
+ SYSTEM_TEMPLATE, ANSWER_TEMPLATE_CALM, ANSWER_TEMPLATE_ADQ,
28
+ SAFETY_GUARDRAILS, RISK_FOOTER, render_emotion_guidelines,
29
+ NLU_ROUTER_PROMPT, SPECIALIST_CLASSIFIER_PROMPT,
30
+ ROUTER_PROMPT,
31
+ ANSWER_TEMPLATE_FACTUAL,
32
+ ANSWER_TEMPLATE_GENERAL_KNOWLEDGE,
33
+ ANSWER_TEMPLATE_GENERAL,
34
+ QUERY_EXPANSION_PROMPT
35
+ )
36
+
37
+ # -----------------------------
38
+ # Multimodal Processing Functions
39
+ # -----------------------------
40
+
41
+ def _openai_client() -> Optional[OpenAI]:
42
+ api_key = os.getenv("OPENAI_API_KEY", "").strip()
43
+ return OpenAI(api_key=api_key) if api_key and OpenAI else None
44
+
45
+ def describe_image(image_path: str) -> str:
46
+ client = _openai_client()
47
+ if not client:
48
+ return "(Image description failed: OpenAI API key not configured.)"
49
+ try:
50
+ extension = os.path.splitext(image_path)[1].lower()
51
+ mime_type = f"image/{'jpeg' if extension in ['.jpg', '.jpeg'] else extension.strip('.')}"
52
+ with open(image_path, "rb") as image_file:
53
+ base64_image = base64.b64encode(image_file.read()).decode('utf-8')
54
+ response = client.chat.completions.create(
55
+ model="gpt-4o",
56
+ messages=[
57
+ {
58
+ "role": "user",
59
+ "content": [
60
+ {"type": "text", "text": "Describe this image concisely for a memory journal. Focus on people, places, and key objects. Example: 'A photo of John and Mary smiling on a bench at the park.'"},
61
+ {"type": "image_url", "image_url": {"url": f"data:{mime_type};base64,{base64_image}"}}
62
+ ],
63
+ }
64
+ ], max_tokens=100)
65
+ return response.choices[0].message.content or "No description available."
66
+ except Exception as e:
67
+ return f"[Image description error: {e}]"
68
+
69
+ # -----------------------------
70
+ # NLU Classification Function (Dynamic Version)
71
+ # -----------------------------
72
+
73
+ def detect_tags_from_query(
74
+ query: str,
75
+ nlu_vectorstore: FAISS,
76
+ behavior_options: list,
77
+ emotion_options: list,
78
+ topic_options: list,
79
+ context_options: list,
80
+ settings: dict = None
81
+ ) -> Dict[str, Any]:
82
+ """Uses a dynamic two-step NLU process: Route -> Retrieve Examples -> Classify."""
83
+
84
+ # --- STEP 1: Route the query to determine the primary goal ---
85
+ router_prompt = NLU_ROUTER_PROMPT.format(query=query)
86
+ primary_goal_raw = call_llm([{"role": "user", "content": router_prompt}], temperature=0.0).strip().lower()
87
+
88
+ # --- FIX START: Use separate variables for the filter (lowercase) and the prompt (Title Case) ---
89
+ goal_for_filter = "practical_planning" if "practical" in primary_goal_raw else "emotional_support"
90
+ goal_for_prompt = "Practical Planning" if "practical" in primary_goal_raw else "Emotional Support"
91
+ # --- FIX END ---
92
+
93
+ if settings and settings.get("debug_mode"):
94
+ print(f"\n--- NLU Router ---\nGoal: {goal_for_prompt} (Filter: '{goal_for_filter}')\n------------------\n")
95
+
96
+ # --- STEP 2: Retrieve relevant examples from the NLU vector store ---
97
+ retriever = nlu_vectorstore.as_retriever(
98
+ search_kwargs={"k": 2, "filter": {"primary_goal": goal_for_filter}} # <-- Use the correct lowercase filter
99
+ )
100
+ retrieved_docs = retriever.invoke(query)
101
+
102
+ # Format the retrieved examples for the prompt
103
+ selected_examples = "\n".join(
104
+ f"User Query: \"{doc.page_content}\"\n{json.dumps(doc.metadata['classification'], indent=4)}"
105
+ for doc in retrieved_docs
106
+ )
107
+ if not selected_examples:
108
+ selected_examples = "(No relevant examples found)"
109
+ if settings and settings.get("debug_mode"):
110
+ print("WARNING: NLU retriever found no examples for this query.")
111
+
112
+
113
+ # --- STEP 3: Use the Specialist Classifier with retrieved examples ---
114
+ behavior_str = ", ".join(f'"{opt}"' for opt in behavior_options if opt != "None")
115
+ emotion_str = ", ".join(f'"{opt}"' for opt in emotion_options if opt != "None")
116
+ topic_str = ", ".join(f'"{opt}"' for opt in topic_options if opt != "None")
117
+ context_str = ", ".join(f'"{opt}"' for opt in context_options if opt != "None")
118
+
119
+ prompt = SPECIALIST_CLASSIFIER_PROMPT.format(
120
+ primary_goal=goal_for_prompt, # Use Title Case for the prompt text
121
+ examples=selected_examples,
122
+ behavior_options=behavior_str,
123
+ emotion_options=emotion_str,
124
+ topic_options=topic_str,
125
+ context_options=context_str,
126
+ query=query
127
+ )
128
+
129
+ messages = [{"role": "system", "content": "You are a helpful NLU classification assistant."}, {"role": "user", "content": prompt}]
130
+ response_str = call_llm(messages, temperature=0.1)
131
+
132
+ if settings and settings.get("debug_mode"):
133
+ print(f"\n--- NLU Specialist Full Response ---\n{response_str}\n----------------------------------\n")
134
+
135
+ # --- STEP 4: Parse the final result ---
136
+ result_dict = {"detected_behaviors": [], "detected_emotion": "None", "detected_topic": "None", "detected_contexts": []}
137
+ try:
138
+ start_brace = response_str.find('{')
139
+ end_brace = response_str.rfind('}')
140
+ if start_brace != -1 and end_brace > start_brace:
141
+ json_str = response_str[start_brace : end_brace + 1]
142
+ result = json.loads(json_str)
143
+ result_dict["detected_behaviors"] = [b for b in result.get("detected_behaviors", []) if b in behavior_options]
144
+ result_dict["detected_emotion"] = result.get("detected_emotion", "None")
145
+ result_dict["detected_topic"] = result.get("detected_topic", "None")
146
+ result_dict["detected_contexts"] = [c for c in result.get("detected_contexts", []) if c in context_options]
147
+ return result_dict
148
+ except (json.JSONDecodeError, AttributeError) as e:
149
+ print(f"ERROR parsing NLU Specialist JSON: {e}")
150
+ return result_dict
151
+
152
+ # -----------------------------
153
+ # Embeddings & VectorStore
154
+ # -----------------------------
155
+
156
+ def _default_embeddings():
157
+ model_name = os.getenv("EMBEDDINGS_MODEL", "sentence-transformers/all-MiniLM-L6-v2")
158
+ return HuggingFaceEmbeddings(model_name=model_name)
159
+
160
+ def build_or_load_vectorstore(docs: List[Document], index_path: str, is_personal: bool = False) -> FAISS:
161
+ os.makedirs(os.path.dirname(index_path), exist_ok=True)
162
+ if os.path.isdir(index_path) and os.path.exists(os.path.join(index_path, "index.faiss")):
163
+ try:
164
+ return FAISS.load_local(index_path, _default_embeddings(), allow_dangerous_deserialization=True)
165
+ except Exception: pass
166
+ if is_personal and not docs:
167
+ docs = [Document(page_content="(This is the start of the personal memory journal.)", metadata={"source": "placeholder"})]
168
+ vs = FAISS.from_documents(docs, _default_embeddings())
169
+ vs.save_local(index_path)
170
+ return vs
171
+
172
+ def texts_from_jsonl(path: str) -> List[Document]:
173
+ out: List[Document] = []
174
+ try:
175
+ with open(path, "r", encoding="utf-8") as f:
176
+ for i, line in enumerate(f):
177
+ obj = json.loads(line.strip())
178
+ txt = obj.get("text") or ""
179
+ if not txt.strip(): continue
180
+ md = {"source": os.path.basename(path), "chunk": i}
181
+ for k in ("behaviors", "emotion", "topic_tags", "context_tags"):
182
+ if k in obj and obj[k]: md[k] = obj[k]
183
+ out.append(Document(page_content=txt, metadata=md))
184
+ except Exception: return []
185
+ return out
186
+
187
+ def bootstrap_vectorstore(sample_paths: List[str] | None = None, index_path: str = "data/faiss_index") -> FAISS:
188
+ docs: List[Document] = []
189
+ for p in (sample_paths or []):
190
+ try:
191
+ if p.lower().endswith(".jsonl"):
192
+ docs.extend(texts_from_jsonl(p))
193
+ else:
194
+ with open(p, "r", encoding="utf-8", errors="ignore") as fh:
195
+ docs.append(Document(page_content=fh.read(), metadata={"source": os.path.basename(p)}))
196
+ except Exception: continue
197
+ if not docs:
198
+ docs = [Document(page_content="(empty index)", metadata={"source": "placeholder"})]
199
+ return build_or_load_vectorstore(docs, index_path=index_path)
200
+
201
+ # -----------------------------
202
+ # LLM Call
203
+ # -----------------------------
204
+ def call_llm(messages: List[Dict[str, str]], temperature: float = 0.6, stop: Optional[List[str]] = None) -> str:
205
+ client = _openai_client()
206
+ model = os.getenv("OPENAI_MODEL", "gpt-4o-mini")
207
+ if not client:
208
+ return "(Offline Mode: OpenAI API key not configured.)"
209
+ try:
210
+ api_args = {"model": model, "messages": messages, "temperature": float(temperature if temperature is not None else 0.6)}
211
+ if stop: api_args["stop"] = stop
212
+ resp = client.chat.completions.create(**api_args)
213
+ return (resp.choices[0].message.content or "").strip()
214
+ except Exception as e:
215
+ return f"[LLM API Error: {e}]"
216
+
217
+ # -----------------------------
218
+ # Prompting & RAG Chain
219
+ # -----------------------------
220
+
221
+ def make_rag_chain(
222
+ vs_general: FAISS,
223
+ vs_personal: FAISS,
224
+ *,
225
+ role: str = "patient",
226
+ temperature: float = 0.6,
227
+ language: str = "English",
228
+ patient_name: str = "the patient",
229
+ caregiver_name: str = "the caregiver",
230
+ tone: str = "warm",
231
+ ):
232
+ def _format_docs(docs: List[Document], default_msg: str) -> str:
233
+ if not docs: return default_msg
234
+ unique_docs = {doc.page_content: doc for doc in docs}.values()
235
+ return "\n".join([f"- {d.page_content.strip()}" for d in unique_docs])
236
+
237
+ def _answer_fn(query: str, chat_history: List[Dict[str, str]], scenario_tag: Optional[str] = None, emotion_tag: Optional[str] = None, topic_tag: Optional[str] = None, context_tags: Optional[List[str]] = None) -> Dict[str, Any]:
238
+ router_messages = [{"role": "user", "content": ROUTER_PROMPT.format(query=query)}]
239
+ query_type = call_llm(router_messages, temperature=0.0).strip().lower()
240
+ print(f"Query classified as: {query_type}")
241
+
242
+ system_message = SYSTEM_TEMPLATE.format(tone=tone, language=language, patient_name=patient_name or "the patient", caregiver_name=caregiver_name or "the caregiver", guardrails=SAFETY_GUARDRAILS)
243
+ messages = [{"role": "system", "content": system_message}, *chat_history]
244
+
245
+ if "general_knowledge_question" in query_type:
246
+ user_prompt = ANSWER_TEMPLATE_GENERAL_KNOWLEDGE.format(question=query, language=language)
247
+ messages.append({"role": "user", "content": user_prompt})
248
+ return {"answer": call_llm(messages, temperature=temperature), "sources": ["General Knowledge"]}
249
+
250
+ elif "factual_question" in query_type:
251
+ expansion_prompt = QUERY_EXPANSION_PROMPT.format(question=query)
252
+ expansion_response = call_llm([{"role": "user", "content": expansion_prompt}], temperature=0.1)
253
+ try:
254
+ expanded_queries = json.loads(expansion_response.strip().replace("```json", "").replace("```", ""))
255
+ search_queries = [query] + expanded_queries
256
+ except json.JSONDecodeError:
257
+ search_queries = [query]
258
+
259
+ all_docs = []
260
+ for q in search_queries:
261
+ all_docs.extend(vs_personal.similarity_search(q, k=2))
262
+ all_docs.extend(vs_general.similarity_search(q, k=2))
263
+ context = _format_docs(all_docs, "(No relevant information found.)")
264
+ user_prompt = ANSWER_TEMPLATE_FACTUAL.format(context=context, question=query, language=language)
265
+ messages.append({"role": "user", "content": user_prompt})
266
+ return {"answer": call_llm(messages, temperature=temperature), "sources": list(set(d.metadata.get("source", "unknown") for d in all_docs))}
267
+
268
+ elif "general_conversation" in query_type:
269
+ user_prompt = ANSWER_TEMPLATE_GENERAL.format(question=query, language=language)
270
+ messages.append({"role": "user", "content": user_prompt})
271
+ return {"answer": call_llm(messages, temperature=temperature), "sources": []}
272
+
273
+ else: # Default to caregiving logic
274
+ search_filter = {}
275
+ if scenario_tag: search_filter["behaviors"] = scenario_tag.lower()
276
+ if emotion_tag: search_filter["emotion"] = emotion_tag.lower()
277
+ if topic_tag: search_filter["topic_tags"] = topic_tag.lower()
278
+ if context_tags: search_filter["context_tags"] = {"in": [tag.lower() for tag in context_tags]}
279
+
280
+ personal_docs = vs_personal.similarity_search(query, k=3)
281
+ general_docs = vs_general.similarity_search(query, k=3)
282
+ if search_filter:
283
+ personal_docs.extend(vs_personal.similarity_search(query, k=3, filter=search_filter))
284
+ general_docs.extend(vs_general.similarity_search(query, k=3, filter=search_filter))
285
+
286
+ all_docs_care = list({doc.page_content: doc for doc in personal_docs + general_docs}.values())
287
+ personal_context = _format_docs([d for d in all_docs_care if d in personal_docs], "(No relevant personal memories found.)")
288
+ general_context = _format_docs([d for d in all_docs_care if d in general_docs], "(No general guidance found.)")
289
+
290
+ first_emotion = next((d.metadata.get("emotion") for d in all_docs_care if d.metadata.get("emotion")), None)
291
+ emotions_context = render_emotion_guidelines(first_emotion or emotion_tag)
292
+
293
+ template = ANSWER_TEMPLATE_ADQ if any([scenario_tag, emotion_tag, first_emotion]) else ANSWER_TEMPLATE_CALM
294
+ if template == ANSWER_TEMPLATE_ADQ:
295
+ user_prompt = template.format(general_context=general_context, personal_context=personal_context, question=query, scenario_tag=scenario_tag, emotions_context=emotions_context, role=role, language=language)
296
+ else:
297
+ combined_context = f"General Guidance:\n{general_context}\n\nPersonal Memories:\n{personal_context}"
298
+ user_prompt = template.format(context=combined_context, question=query, language=language)
299
+
300
+ messages.append({"role": "user", "content": user_prompt})
301
+ answer = call_llm(messages, temperature=temperature)
302
+
303
+ if scenario_tag and scenario_tag.lower() in ["exit_seeking", "wandering"]:
304
+ answer += f"\n\n---\n{RISK_FOOTER}"
305
+
306
+ return {"answer": answer, "sources": list(set(d.metadata.get("source", "unknown") for d in all_docs_care))}
307
+ return _answer_fn
308
+
309
+ def answer_query(chain, question: str, **kwargs) -> Dict[str, Any]:
310
+ if not callable(chain): return {"answer": "[Error: RAG chain is not callable]", "sources": []}
311
+ try:
312
+ return chain(question, **kwargs)
313
+ except Exception as e:
314
+ print(f"ERROR in answer_query: {e}")
315
+ return {"answer": f"[Error executing chain: {e}]", "sources": []}
316
+
317
+ # -----------------------------
318
+ # TTS & Transcription
319
+ # -----------------------------
320
+ def synthesize_tts(text: str, lang: str = "en"):
321
+ if not text or gTTS is None: return None
322
+ try:
323
+ with tempfile.NamedTemporaryFile(suffix=".mp3", delete=False) as fp:
324
+ tts = gTTS(text=text, lang=(lang or "en"))
325
+ tts.save(fp.name)
326
+ return fp.name
327
+ except Exception:
328
+ return None
329
+
330
+ def transcribe_audio(filepath: str, lang: str = "en"):
331
+ client = _openai_client()
332
+ if not client: return "[Transcription failed: API key not configured]"
333
+ api_args = {"model": "whisper-1"}
334
+ if lang and lang != "auto": api_args["language"] = lang
335
+ with open(filepath, "rb") as audio_file:
336
+ transcription = client.audio.transcriptions.create(file=audio_file, **api_args)
337
+ return transcription.text
338
+
alz_companion/prompts.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Prompts for the Alzheimer’s AI Companion.
3
+ """
4
+
5
+ # ------------------------ Behaviour‑level tags ------------------------
6
+ BEHAVIOUR_TAGS = {
7
+ # Tags from "The Father"
8
+ "repetitive_questioning": ["validation", "gentle_redirection", "offer_distraction"],
9
+ "confusion": ["reassurance", "time_place_orientation", "photo_anchors"],
10
+ "wandering": ["walk_along_support", "simple_landmarks", "visual_cues", "safe_wandering_space"],
11
+ "agitation": ["de-escalating_tone", "validate_feelings", "reduce_stimulation", "simple_choices"],
12
+ "false_accusations": ["reassure_no_blame", "avoid_arguing", "redirect_activity"],
13
+ "address_memory_loss": ["encourage_ID_bracelet_or_GPS", "place_contact_info_in_wallet", "inform_trusted_neighbors", "avoid_quizzing_on_address"],
14
+ "hallucinations_delusions": ["avoid_arguing_or_correcting", "validate_the_underlying_emotion", "offer_reassurance_of_safety", "gently_redirect_to_real_activity", "check_for_physical_triggers"],
15
+
16
+ # Tags from "Still Alice" (and others for future use)
17
+ "exit_seeking": ["validation", "calm_presence", "safe_wandering_space", "environmental_cues"],
18
+ "aphasia": ["patience", "simple_language", "nonverbal_cues", "validation"],
19
+ "withdrawal": ["gentle_invitation", "calm_presence", "offer_familiar_comforts", "no_pressure"],
20
+ "affection": ["reciprocate_warmth", "positive_reinforcement", "simple_shared_activity"],
21
+ "sleep_disturbance": ["establish_calm_bedtime_routine", "limit_daytime_naps", "check_for_discomfort_or_pain"],
22
+ "anxiety": ["calm_reassurance", "simple_breathing_exercise", "reduce_environmental_stimuli"],
23
+ "depression_sadness": ["validate_feelings_of_sadness", "encourage_simple_pleasant_activity", "ensure_social_connection"],
24
+ "orientation_check": ["gentle_orientation_cues", "use_familiar_landmarks", "avoid_quizzing"],
25
+
26
+ # Tags from "Away from Her"
27
+ "misidentification": ["gently_correct_with_context", "use_photos_as_anchors", "respond_to_underlying_emotion", "avoid_insistent_correction"],
28
+
29
+ # Other useful tags
30
+ "sundowning_restlessness": ["predictable_routine", "soft_lighting", "low_stimulation", "familiar_music"],
31
+ "object_misplacement": ["nonconfrontational_search", "fixed_storage_spots"],
32
+
33
+ # --- New Tags from Test Fixtures ---
34
+ "validation": [],
35
+ "gentle_reorientation": [],
36
+ "de-escalation": [],
37
+ "distraction": [],
38
+ "spaced_cueing": [],
39
+ "reassurance": [],
40
+ "psychoeducation": [],
41
+ "goal_breakdown": [],
42
+ "routine_structuring": [],
43
+ "reminiscence_prompting": [],
44
+ "reframing": [],
45
+ "distress_tolerance": [],
46
+ "caregiver_communication_template": [],
47
+ "personalised_music_activation": [],
48
+ "memory_probe": [],
49
+ "safety_brief": [],
50
+ "follow_up_prompt": []
51
+ }
52
+
53
+ # ------------------------ Emotion styles & helpers ------------------------
54
+ EMOTION_STYLES = {
55
+ "confusion": {"tone": "calm, orienting, concrete", "playbook": ["Offer a simple time/place orientation cue (who/where/when).", "Reference one familiar anchor (photo/object/person).", "Use short sentences and one step at a time."]},
56
+ "fear": {"tone": "reassuring, safety-forward, gentle", "playbook": ["Acknowledge fear without contradiction.", "Provide a clear safety cue (e.g., 'You’re safe here with me').", "Reduce novelty and stimulation; suggest one safe action."]},
57
+ "anger": {"tone": "de-escalating, validating, low-arousal", "playbook": ["Validate the feeling; avoid arguing/correcting.", "Keep voice low and sentences short.", "Offer a simple choice to restore control (e.g., 'tea or water?')."]},
58
+ "sadness": {"tone": "warm, empathetic, gentle reminiscence", "playbook": ["Acknowledge loss/longing.", "Invite one comforting memory or familiar song.", "Keep pace slow; avoid tasking."]},
59
+ "warmth": {"tone": "affirming, appreciative", "playbook": ["Reflect gratitude and positive connection.", "Reinforce what’s going well.", "Keep it light; don’t overload with new info."]},
60
+ "joy": {"tone": "supportive, celebratory (but not overstimulating)", "playbook": ["Share the joy briefly; match energy gently.", "Offer a simple, pleasant follow-up activity.", "Avoid adding complex tasks."]},
61
+ "calm": {"tone": "matter-of-fact, concise, steady", "playbook": ["Keep instructions simple.", "Maintain steady pace.", "No extra soothing needed."]},
62
+ }
63
+
64
+ def render_emotion_guidelines(emotion: str | None) -> str:
65
+ e = (emotion or "").strip().lower()
66
+ if e not in EMOTION_STYLES:
67
+ return "Emotion: (auto)\nDesired tone: calm, clear.\nWhen replying, reassure if distress is apparent; prioritise validation and simple choices."
68
+ style = EMOTION_STYLES[e]
69
+ bullet = "\n".join([f"- {x}" for x in style["playbook"]])
70
+ return f"Emotion: {e}\nDesired tone: {style['tone']}\nWhen replying, follow:\n{bullet}"
71
+
72
+ # ------------------------ NLU Classification (Dynamic Pipeline) ------------------------
73
+
74
+ # --- STEP 1: Router for Primary Goal ---
75
+ NLU_ROUTER_PROMPT = """You are an expert NLU router. Your task is to classify the user's primary goal into one of two categories:
76
+ 1. `practical_planning`: The user is seeking a plan, strategy, "how-to" advice, or a solution to a problem.
77
+ 2. `emotional_support`: The user is expressing feelings, seeking comfort, validation, or reassurance.
78
+
79
+ User Query: "{query}"
80
+
81
+ Respond with ONLY a single category name from the list above.
82
+ Category: """
83
+
84
+ # --- STEP 2: Specialist Classifier (Examples are now injected dynamically) ---
85
+ SPECIALIST_CLASSIFIER_PROMPT = """You are an expert NLU engine. Your task is to analyze the user's query to deeply understand their underlying intent and classify it correctly. You will be given a few examples that are highly relevant to the user's query.
86
+
87
+ --- INSTRUCTIONS ---
88
+ First, in a <thinking> block, you must reason step-by-step about the user's query by following these points:
89
+ - **Literal Meaning:** What is the user literally asking or stating?
90
+ - **Underlying Situation:** What is the deeper emotional state or situation being described?
91
+ - **User's Primary Goal:** You have been told the user's goal is `{primary_goal}`. Briefly confirm if the query aligns with this goal.
92
+ - **Tag Selection:** Based on the primary goal and the provided examples, explain which tags from the provided lists are the most appropriate and why.
93
+
94
+ Second, after your reasoning, provide a single, valid JSON object with the final classification.
95
+
96
+ --- PROVIDED TAGS ---
97
+ Behaviors: {behavior_options}
98
+ Emotions: {emotion_options}
99
+ Topics: {topic_options}
100
+ Contexts: {context_options}
101
+
102
+ --- RELEVANT EXAMPLES ---
103
+ {examples}
104
+ ---
105
+
106
+ User Query: "{query}"
107
+
108
+ <thinking>
109
+ """
110
+
111
+ # ------------------------ Guardrails ------------------------
112
+ SAFETY_GUARDRAILS = """Never provide medical diagnoses or dosing. If a situation implies imminent risk (e.g., wandering/elopement, severe agitation, choking, falls), signpost immediate support from onsite staff or emergency services. Use respectful, person‑centred language. Keep guidance concrete and stepwise."""
113
+
114
+ # ------------------------ System & Answer Templates ------------------------
115
+ SYSTEM_TEMPLATE = """You are an Alzheimer’s caregiving companion. Address the patient as {patient_name} and the caregiver as {caregiver_name}. Ground every suggestion in retrieved evidence when possible. If unsure, say so plainly.
116
+ {guardrails}
117
+ --- IMPORTANT RULE ---
118
+ You MUST write your entire response in {language} ONLY. This is a strict instruction. Do not use any other language, even if the user or the retrieved context uses a different language. Your final output must be in {language}."""
119
+
120
+ ANSWER_TEMPLATE_CALM = """Context:
121
+ {context}
122
+
123
+ ---
124
+ Question from user: {question}
125
+
126
+ ---
127
+ Instructions:
128
+ Based on the context, write a gentle and supportive response in a single, natural-sounding paragraph.
129
+ Your response should:
130
+ 1. Start by briefly and calmly acknowledging the user's situation or feeling.
131
+ 2. Weave 2-3 practical, compassionate suggestions from the context into your paragraph. Do not use a numbered or bulleted list.
132
+ 3. Conclude with a short, reassuring phrase.
133
+ 4. You MUST use the retrieved context to directly address the user's specific **Question**.
134
+ Your response in {language}:"""
135
+
136
+ # For scenarios tagged with a specific behavior (e.g., agitation, confusion)
137
+ ANSWER_TEMPLATE_ADQ = """--- General Guidance from Knowledge Base ---
138
+ {general_context}
139
+
140
+ --- Relevant Personal Memories ---
141
+ {personal_context}
142
+
143
+ ---
144
+ Care scenario: {scenario_tag}
145
+ Response Guidelines:
146
+ {emotions_context}
147
+ Question from user: {question}
148
+
149
+ ---
150
+ Instructions:
151
+ Based on ALL the information above, write a **concise, warm, and validating** response for the {role} in a single, natural-sounding paragraph. **Keep the total response to 2-4 sentences.**
152
+ If possible, weave details from the 'Relevant Personal Memories' into your suggestions to make the response feel more personal and familiar.
153
+ Pay close attention to the Response Guidelines to tailor your tone.
154
+ Your response should follow this pattern:
155
+ 1. Start by validating the user's feeling or concern with a unique, empathetic opening. DO NOT USE THE SAME OPENING PHRASE REPEATEDLY. Choose from different styles of openers, such as:
156
+ - Acknowledging the difficulty: "That sounds like a very challenging situation..."
157
+ - Expressing understanding: "I can see why that would be worrying..."
158
+ - Stating a shared goal: "Let's walk through how we can handle that..."
159
+ - Directly validating the feeling: "It's completely understandable to feel frustrated when..."
160
+ 2. Gently offer **1-2 of the most important practical steps**, combining general guidance with personal memories where appropriate. Do not use a list.
161
+ 3. If the scenario involves risk (like exit_seeking), subtly include a safety cue.
162
+ 4. End with a compassionate, de-escalation phrase.
163
+ Your response in {language}:"""
164
+
165
+ RISK_FOOTER = """If safety is a concern right now, please seek immediate assistance from onsite staff or local emergency services."""
166
+
167
+ # ------------------------ Router & Specialized Templates ------------------------
168
+
169
+ QUERY_EXPANSION_PROMPT = """You are a helpful AI assistant. Your task is to rephrase a user's question into 3 different, semantically similar questions to improve document retrieval.
170
+ Provide the rephrased questions as a JSON list of strings.
171
+
172
+ User Question: "{question}"
173
+
174
+ JSON List:
175
+ """
176
+
177
+ # Template for routing/classifying the user's intent
178
+ ROUTER_PROMPT = """You are an expert NLU router. Your task is to classify the user's query into one of four categories:
179
+ 1. `caregiving_scenario`: The user is describing a situation, asking for advice, or expressing a concern related to Alzheimer's or caregiving.
180
+ 2. `factual_question`: The user is asking a direct question about a personal memory, person, or event that would be stored in the memory journal.
181
+ 3. `general_knowledge_question`: The user is asking a general knowledge question about the world, facts, or topics not related to personal memories or caregiving.
182
+ 4. `general_conversation`: The user is making a general conversational remark, like a greeting, a thank you, or a simple statement that does not require a knowledge base lookup.
183
+
184
+ User Query: "{query}"
185
+
186
+ Respond with ONLY a single category name from the list above.
187
+ Category: """
188
+
189
+ ANSWER_TEMPLATE_FACTUAL = """Context:
190
+ {context}
191
+
192
+ ---
193
+ Question from user: {question}
194
+
195
+ ---
196
+ Instructions:
197
+ Based on the provided context, directly and concisely answer the user's question.
198
+ - If the context contains the answer, state it clearly and naturally. Keep your response to a maximum of 3 sentences.
199
+ - If the context does not contain the answer, respond in a warm and friendly tone that you couldn't find a memory of that topic and gently ask if the user would like to talk more about it or add it as a new memory.
200
+ - ABSOLUTELY DO NOT invent, create, or hallucinate any stories, characters, or details. Your knowledge is limited to the provided context ONLY.
201
+
202
+ Your response MUST be in {language}:"""
203
+
204
+ ANSWER_TEMPLATE_GENERAL_KNOWLEDGE = """You are a factual answering engine.
205
+ Your task is to directly answer the user's general knowledge question based on your training data.
206
+
207
+ Instructions:
208
+ - Be factual and concise. Go straight to the answer.
209
+ - Do NOT include apologies or disclaimers about your knowledge cutoff date.
210
+ User's Question: "{question}"
211
+
212
+ Your factual response in {language}:"""
213
+
214
+
215
+ ANSWER_TEMPLATE_GENERAL = """You are a warm and friendly AI companion. The user has just said: "{question}".
216
+ Respond in a brief, natural, and conversational way. Do not try to provide caregiving advice unless the user asks for it.
217
+ Your response MUST be in {language}:"""
218
+
219
+
220
+ # ------------------------ Convenience exports ------------------------
221
+ __all__ = [
222
+ "SYSTEM_TEMPLATE", "ANSWER_TEMPLATE_CALM", "ANSWER_TEMPLATE_ADQ",
223
+ "SAFETY_GUARDRAILS", "RISK_FOOTER", "BEHAVIOUR_TAGS", "EMOTION_STYLES",
224
+ "render_emotion_guidelines",
225
+ "NLU_ROUTER_PROMPT", "SPECIALIST_CLASSIFIER_PROMPT",
226
+ "QUERY_EXPANSION_PROMPT",
227
+ "ROUTER_PROMPT",
228
+ "ANSWER_TEMPLATE_FACTUAL",
229
+ "ANSWER_TEMPLATE_GENERAL_KNOWLEDGE",
230
+ "ANSWER_TEMPLATE_GENERAL"
231
+ ]