from typing import Annotated, TypedDict, Optional
from langgraph.graph import StateGraph, START, END
from langgraph.graph.message import add_messages
from langgraph.checkpoint.memory import MemorySaver
from langchain_openai import ChatOpenAI
from langchain_core.messages import SystemMessage, HumanMessage
from supermemory import Supermemory
from dotenv import load_dotenv
load_dotenv()
class SupportAgent:
def __init__(self):
self.llm = ChatOpenAI(model="gpt-4o", temperature=0.3)
self.memory = Supermemory()
self.app = self._build_graph()
def _build_graph(self):
class State(TypedDict):
messages: Annotated[list, add_messages]
user_id: str
context: str
category: Optional[str]
def retrieve_context(state: State):
"""Fetch user profile and relevant past tickets."""
user_id = state["user_id"]
query = state["messages"][-1].content
result = self.memory.profile(
container_tag=user_id,
q=query,
threshold=0.5
)
static = result.profile.static or []
dynamic = result.profile.dynamic or []
memories = result.search_results.results if result.search_results else []
context = f"""
## User Profile
{chr(10).join(f"- {fact}" for fact in static) if static else "New user, no history."}
## Current Context
{chr(10).join(f"- {ctx}" for ctx in dynamic) if dynamic else "No recent activity."}
## Related Past Tickets
{chr(10).join(f"- {m.memory}" for m in memories[:3]) if memories else "No similar issues found."}
"""
return {"context": context}
def categorize(state: State):
"""Determine ticket category for routing."""
query = state["messages"][-1].content.lower()
if any(word in query for word in ["billing", "payment", "charge", "invoice"]):
return {"category": "billing"}
elif any(word in query for word in ["bug", "error", "broken", "crash"]):
return {"category": "technical"}
else:
return {"category": "general"}
def respond(state: State):
"""Generate a response using context."""
category = state.get("category", "general")
context = state.get("context", "")
system_prompt = f"""You are a support agent. Category: {category}
{context}
Guidelines:
- Match explanation depth to the user's technical level
- Reference past interactions when relevant
- Be direct and helpful"""
system = SystemMessage(content=system_prompt)
response = self.llm.invoke([system] + state["messages"])
return {"messages": [response]}
def store_interaction(state: State):
"""Save the ticket for future context."""
user_msg = state["messages"][-2].content
ai_msg = state["messages"][-1].content
category = state.get("category", "general")
self.memory.add(
content=f"Support ticket ({category}): {user_msg}\nResolution: {ai_msg[:300]}",
container_tag=state["user_id"],
metadata={"type": "support_ticket", "category": category}
)
return {}
# Build the graph
graph = StateGraph(State)
graph.add_node("retrieve", retrieve_context)
graph.add_node("categorize", categorize)
graph.add_node("respond", respond)
graph.add_node("store", store_interaction)
graph.add_edge(START, "retrieve")
graph.add_edge("retrieve", "categorize")
graph.add_edge("categorize", "respond")
graph.add_edge("respond", "store")
graph.add_edge("store", END)
checkpointer = MemorySaver()
return graph.compile(checkpointer=checkpointer)
def handle(self, user_id: str, message: str, thread_id: str) -> str:
"""Process a support request."""
config = {"configurable": {"thread_id": thread_id}}
result = self.app.invoke(
{"messages": [HumanMessage(content=message)], "user_id": user_id},
config=config
)
return result["messages"][-1].content
# Usage
if __name__ == "__main__":
agent = SupportAgent()
# First interaction
response = agent.handle(
user_id="customer_alice",
message="The API is returning 429 errors when I make requests",
thread_id="ticket_001"
)
print(response)
# Follow-up (agent remembers context)
response = agent.handle(
user_id="customer_alice",
message="I'm only making 10 requests per minute though",
thread_id="ticket_001"
)
print(response)