159 lines
6.5 KiB
Python
159 lines
6.5 KiB
Python
from agno.embedder.sentence_transformer import SentenceTransformerEmbedder
|
||
import os
|
||
import json
|
||
from agno.agent import Agent, RunResponse
|
||
from agno.knowledge.pdf import PDFKnowledgeBase, PDFReader
|
||
from agno.vectordb.pgvector import PgVector, SearchType
|
||
from agno.storage.agent.sqlite import SqliteAgentStorage
|
||
from agno.memory.v2 import Memory
|
||
from agno.memory.v2.db.sqlite import SqliteMemoryDb
|
||
from agno.models.groq import Groq
|
||
from dotenv import load_dotenv
|
||
|
||
load_dotenv()
|
||
|
||
|
||
def init_support_agents(reload_knowledge=False, recreate_knowledge=False):
|
||
db_url = os.getenv("DOCKER_DB_URL")
|
||
kb = PDFKnowledgeBase(
|
||
path="tmp/knowledge-base/", reader=PDFReader(chunk=True),
|
||
vector_db=PgVector(
|
||
table_name="kb_documents", db_url=db_url, search_type=SearchType.hybrid,
|
||
embedder=SentenceTransformerEmbedder(id="sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2"),
|
||
),
|
||
)
|
||
|
||
kb.load(recreate=recreate_knowledge)
|
||
|
||
storage = SqliteAgentStorage(table_name="agent_sessions", db_file="tmp/agent_sessions.db")
|
||
memory = Memory(db=SqliteMemoryDb(table_name="user_memories", db_file="tmp/user_memory.db"))
|
||
|
||
context_agent = Agent(
|
||
model=Groq(id="meta-llama/llama-4-maverick-17b-128e-instruct"),
|
||
storage=storage,
|
||
instructions=[
|
||
"""
|
||
You are a structured context extractor.
|
||
Given a user message, output a strict JSON object with the following fields:
|
||
|
||
{
|
||
"user_goal": string, # what the user is trying to achieve
|
||
"item_model": string | null, # product or item model, if any
|
||
"platform": string | null, # device, system, or platform
|
||
"emotional_state": string | null, # inferred emotion or sentiment
|
||
"prior_steps": string | null # steps already taken by the user
|
||
}
|
||
|
||
- Always return valid JSON only. No explanations, no markdown.
|
||
- Use null for missing fields.
|
||
- Be conservative in assumptions.
|
||
"""
|
||
],
|
||
)
|
||
|
||
knowledge_agent = Agent(
|
||
model=Groq(id="meta-llama/llama-4-maverick-17b-128e-instruct"),
|
||
knowledge=kb,
|
||
search_knowledge=True,
|
||
add_context=True,
|
||
instructions=[
|
||
"""You are a knowledge retriever agent.
|
||
Given a JSON context, search the knowledge base for the top 3 most relevant snippets.
|
||
|
||
Output a plain text list like this:
|
||
|
||
- "[Snippet 1 content]" (Document: <doc_id or title>)
|
||
- "[Snippet 2 content]" (Document: <doc_id or title>)
|
||
- "[Snippet 3 content]" (Document: <doc_id or title>)
|
||
|
||
Guidelines:
|
||
- Do not speculate or infer beyond retrieved content.
|
||
- Prefer procedural or troubleshooting instructions if the user_goal implies an action.
|
||
- Snippets should be self-contained and relevant to the user_goal, platform, or item_model.
|
||
|
||
"""
|
||
],
|
||
)
|
||
|
||
conversation_agent = Agent(
|
||
model=Groq(id="meta-llama/llama-4-maverick-17b-128e-instruct"),
|
||
storage=storage,
|
||
instructions=[
|
||
"""
|
||
You are a professional AI-powered customer support assistant. Your goal is to solve the user's issue directly, clearly, and thoroughly using the provided context and knowledge base.
|
||
|
||
Guidelines:
|
||
- Always provide **step-by-step help** whenever possible.
|
||
- Use the **knowledge base snippets** to inform your reply. Rephrase them in user-friendly language.
|
||
- Do **not** tell the user to go check a manual, a website, or external source unless the information is truly not available in the snippets.
|
||
- If specific product details are missing from the context, ask for clarification — don’t assume.
|
||
- End with a friendly question to continue support, like:
|
||
- "Did that help resolve your issue?"
|
||
- "Would you like me to guide you through that step?"
|
||
- "Is there anything else you'd like to check?"
|
||
|
||
Example structure:
|
||
1. Brief recap of already tried actions, based on last messages. Max 2 row.
|
||
2. Address the issue with specifics from KB
|
||
3. Offer additional help
|
||
|
||
Do NOT include markdown, citations, or code unless explicitly required.
|
||
Do NOT be generic. You MUST be as specific as possible.
|
||
|
||
If the already tried actions are more than 5 or 6, inform the user that you have informed the assistance about his problem, and they will contact him, then ask if he wants to continue in the meantime.
|
||
|
||
All user messages are in ITALIAN. You must reply only in ITALIAN. Do not add text in english NEVER.
|
||
Adding text in english is FORBIDDEN.
|
||
"""
|
||
],
|
||
)
|
||
|
||
return context_agent, knowledge_agent, conversation_agent
|
||
|
||
|
||
def get_customer_agents_response(user_message: str, user_id: str = "user_1", session_id: str = None, history=None):
|
||
ctx_agent, kb_agent, convo_agent = init_support_agents()
|
||
|
||
ctx_run: RunResponse = ctx_agent.run(user_message, user_id=user_id, session_id=session_id)
|
||
ctx_text = ctx_run.content.strip()
|
||
try:
|
||
ctx = json.loads(ctx_text)
|
||
except json.JSONDecodeError:
|
||
ctx = {}
|
||
|
||
kb_prompt = json.dumps(ctx)
|
||
kb_run: RunResponse = kb_agent.run(kb_prompt, user_id=user_id, session_id=session_id)
|
||
kb_text = kb_run.content.strip()
|
||
|
||
history_lines = []
|
||
if history:
|
||
for msg in history[-4:]:
|
||
role = "Utente" if msg["role"] == "user" else "Assistente"
|
||
history_lines.append(f"{role}: {msg['content'].strip()}")
|
||
history_str = "\n".join(history_lines)
|
||
|
||
convo_prompt = f"""
|
||
Sei un assistente virtuale professionale.
|
||
|
||
Tieni conto del contesto, della cronologia della conversazione e degli snippet della knowledge base.
|
||
|
||
CONTESTO ESTRATTO:
|
||
{json.dumps(ctx, indent=2, ensure_ascii=False)}
|
||
|
||
SNIPPET DELLA KNOWLEDGE BASE:
|
||
{kb_text}
|
||
|
||
STORIA DELLA CONVERSAZIONE:
|
||
{history_str}
|
||
|
||
NUOVA DOMANDA DELL'UTENTE:
|
||
Utente: {user_message}
|
||
|
||
Rispondi in modo utile e dettagliato. Non fare riferimento a fonti esterne. Rispondi solo in ITALIANO.
|
||
"""
|
||
|
||
convo_run: RunResponse = convo_agent.run(convo_prompt, user_id=user_id, session_id=session_id)
|
||
return convo_run.content.strip(), convo_run.session_id
|
||
|
||
|