11import logging
22import os
3- from typing import Literal , Optional
3+ from typing import Literal , Optional , List
44
5+ from langchain_core .messages import HumanMessage , SystemMessage
56from langchain_openai import AzureChatOpenAI
67from langchain_core .output_parsers import PydanticOutputParser
7- from langchain_core .prompts import ChatPromptTemplate
8- from langgraph .graph import START , END , StateGraph
8+ from langgraph .graph import START , END , StateGraph , MessagesState
99from langgraph .types import interrupt , Command
1010from pydantic import BaseModel , Field
1111
1212from uipath import UiPath
1313
14+ from uipath .models import CreateAction
1415logger = logging .getLogger (__name__ )
1516
1617uipath = UiPath ()
1718
1819class GraphInput (BaseModel ):
1920 message : str
2021 ticket_id : str
22+ assignee : Optional [str ] = None
2123
2224class GraphOutput (BaseModel ):
2325 label : str
2426 confidence : float
2527
26- class GraphState (BaseModel ):
28+ class GraphState (MessagesState ):
2729 message : str
2830 ticket_id : str
31+ assignee : Optional [str ]
2932 label : Optional [str ] = None
3033 confidence : Optional [float ] = None
31-
34+ last_predicted_category : Optional [str ]
35+ human_approval : Optional [bool ] = None
3236
3337class TicketClassification (BaseModel ):
3438 label : Literal ["security" , "error" , "system" , "billing" , "performance" ] = Field (
@@ -40,12 +44,7 @@ class TicketClassification(BaseModel):
4044
4145
4246output_parser = PydanticOutputParser (pydantic_object = TicketClassification )
43-
44- prompt = ChatPromptTemplate .from_messages (
45- [
46- (
47- "system" ,
48- """You are a support ticket classifier. Classify tickets into exactly one category and provide a confidence score.
47+ system_message = """You are a support ticket classifier. Classify tickets into exactly one category and provide a confidence score.
4948
5049{format_instructions}
5150
@@ -56,12 +55,20 @@ class TicketClassification(BaseModel):
5655- billing: Payment and subscription related issues
5756- performance: Speed and resource usage concerns
5857
59- Respond with the classification in the requested JSON format.""" ,
60- ),
61- ("user" , "{ticket_text}" ),
62- ]
63- )
64-
58+ Respond with the classification in the requested JSON format."""
59+
60+ def prepare_input (graph_input : GraphInput ) -> GraphState :
61+ return GraphState (
62+ message = graph_input .message ,
63+ ticket_id = graph_input .ticket_id ,
64+ assignee = graph_input .assignee ,
65+ messages = [
66+ SystemMessage (content = system_message .format (format_instructions = output_parser .get_format_instructions ())),
67+ HumanMessage (content = graph_input .message ) # Add the initial human message
68+ ],
69+ last_predicted_category = None ,
70+ human_approval = None ,
71+ )
6572
6673def get_azure_openai_api_key () -> str :
6774 """Get Azure OpenAI API key from environment or UiPath."""
@@ -78,60 +85,91 @@ def get_azure_openai_api_key() -> str:
7885
7986 return api_key
8087
88+ def decide_next_node (state : GraphState ) -> Literal ["classify" , "notify_team" ]:
89+ if state ["human_approval" ] is True :
90+ return "notify_team"
91+
92+ return "classify"
8193
82- async def classify (state : GraphState ) -> GraphState :
94+ async def classify (state : GraphState ) -> Command :
8395 """Classify the support ticket using LLM."""
8496 llm = AzureChatOpenAI (
8597 azure_deployment = "gpt-4o-mini" ,
8698 api_key = get_azure_openai_api_key (),
8799 azure_endpoint = os .getenv ("AZURE_OPENAI_ENDPOINT" ),
88100 api_version = "2024-10-21"
89101 )
90- _prompt = prompt .partial (
91- format_instructions = output_parser .get_format_instructions ()
92- )
93- chain = _prompt | llm | output_parser
102+
103+ if state .get ("last_predicted_category" , None ):
104+ predicted_category = state ["last_predicted_category" ]
105+ state ["messages" ].append (HumanMessage (content = f"The ticket is 100% not part of the category '{ predicted_category } '. Choose another one." ))
106+ chain = llm | output_parser
94107
95108 try :
96- result = await chain .ainvoke ({"ticket_text" : state .message })
97- print (result )
98- state .label = result .label
99- state .confidence = result .confidence
109+ result = await chain .ainvoke (state ["messages" ])
100110 logger .info (
101111 f"Ticket classified with label: { result .label } confidence score: { result .confidence } "
102112 )
103- return state
113+ return Command (
114+ update = {
115+ "confidence" : result .confidence ,
116+ "label" : result .label ,
117+ "last_predicted_category" : result .label ,
118+ "messages" : state ["messages" ],
119+ }
120+ )
104121 except Exception as e :
105122 logger .error (f"Classification failed: { str (e )} " )
106- state .label = "error"
107- state .confidence = 0.0
108- return state
123+ return Command (
124+ update = {
125+ "label" : "error" ,
126+ "confidence" : "0.0" ,
127+ }
128+ )
109129
110- async def wait_for_human (state : GraphState ) -> GraphState :
130+ async def wait_for_human (state : GraphState ) -> Command :
111131 logger .info ("Wait for human approval" )
112- feedback = interrupt (f"Label: { state .label } Confidence: { state .confidence } " )
113-
114- if isinstance (feedback , bool ) and feedback is True :
115- return Command (goto = "notify_team" )
116- else :
117- return Command (goto = END )
132+ ticket_id = state ["ticket_id" ]
133+ ticket_message = state ["messages" ][1 ].content
134+ label = state ["label" ]
135+ confidence = state ["confidence" ]
136+ action_data = interrupt (CreateAction (name = "escalation_agent_app" ,
137+ title = "Action Required: Review classification" ,
138+ data = {
139+ "AgentOutput" : (
140+ f"This is how I classified the ticket: '{ ticket_id } ',"
141+ f" with message '{ ticket_message } ' \n "
142+ f"Label: '{ label } '"
143+ f" Confidence: '{ confidence } '"
144+ ),
145+ "AgentName" : "ticket-classification " },
146+ app_version = 1 ,
147+ assignee = state .get ("assignee" , None ),
148+ ))
149+
150+ return Command (
151+ update = {
152+ "human_approval" : isinstance (action_data ["Answer" ], bool ) and action_data ["Answer" ] is True
153+ }
154+ )
118155
119- async def notify_team (state : GraphState ) -> GraphState :
156+ async def notify_team (state : GraphState ) -> GraphOutput :
120157 logger .info ("Send team email notification" )
121- print (state )
122- return state
158+ return GraphOutput (label = state ["label" ], confidence = state ["confidence" ])
123159
124160"""Process a support ticket through the workflow."""
125161
126162builder = StateGraph (GraphState , input = GraphInput , output = GraphOutput )
127163
164+ builder .add_node ("prepare_input" , prepare_input )
128165builder .add_node ("classify" , classify )
129- builder .add_node ("human_approval " , wait_for_human )
166+ builder .add_node ("human_approval_node " , wait_for_human )
130167builder .add_node ("notify_team" , notify_team )
131168
132- builder .add_edge (START , "classify" )
133- builder .add_edge ("classify" , "human_approval" )
134- builder .add_edge ("human_approval" , "notify_team" )
169+ builder .add_edge (START , "prepare_input" )
170+ builder .add_edge ("prepare_input" , "classify" )
171+ builder .add_edge ("classify" , "human_approval_node" )
172+ builder .add_conditional_edges ("human_approval_node" , decide_next_node )
135173builder .add_edge ("notify_team" , END )
136174
137175
0 commit comments