11import logging
22import os
3- from typing import Literal , Optional
3+ from typing import Literal , Optional , List
44
5+ from langchain_core .messages import HumanMessage , SystemMessage
56from langchain_openai import AzureChatOpenAI
67from langchain_core .output_parsers import PydanticOutputParser
7- from langchain_core .prompts import ChatPromptTemplate
8- from langgraph .graph import START , END , StateGraph
8+ from langgraph .graph import START , END , StateGraph , MessagesState
99from langgraph .types import interrupt , Command
1010from pydantic import BaseModel , Field
1111
1212from uipath_sdk import UiPathSDK
13-
13+ from uipath_sdk . _models import CreateAction
1414logger = logging .getLogger (__name__ )
1515
1616uipath = UiPathSDK ()
1717
1818class GraphInput (BaseModel ):
1919 message : str
2020 ticket_id : str
21+ assignee : Optional [str ] = None
2122
2223class GraphOutput (BaseModel ):
2324 label : str
2425 confidence : float
2526
26- class GraphState (BaseModel ):
27+ class GraphState (MessagesState ):
2728 message : str
2829 ticket_id : str
30+ assignee : Optional [str ]
2931 label : Optional [str ] = None
3032 confidence : Optional [float ] = None
31-
33+ last_predicted_category : Optional [str ]
34+ human_approval : Optional [bool ] = None
3235
3336class TicketClassification (BaseModel ):
3437 label : Literal ["security" , "error" , "system" , "billing" , "performance" ] = Field (
@@ -40,12 +43,7 @@ class TicketClassification(BaseModel):
4043
4144
4245output_parser = PydanticOutputParser (pydantic_object = TicketClassification )
43-
44- prompt = ChatPromptTemplate .from_messages (
45- [
46- (
47- "system" ,
48- """You are a support ticket classifier. Classify tickets into exactly one category and provide a confidence score.
46+ system_message = """You are a support ticket classifier. Classify tickets into exactly one category and provide a confidence score.
4947
5048{format_instructions}
5149
@@ -56,12 +54,20 @@ class TicketClassification(BaseModel):
5654- billing: Payment and subscription related issues
5755- performance: Speed and resource usage concerns
5856
59- Respond with the classification in the requested JSON format.""" ,
60- ),
61- ("user" , "{ticket_text}" ),
62- ]
63- )
64-
57+ Respond with the classification in the requested JSON format."""
58+
59+ def prepare_input (graph_input : GraphInput ) -> GraphState :
60+ return GraphState (
61+ message = graph_input .message ,
62+ ticket_id = graph_input .ticket_id ,
63+ assignee = graph_input .assignee ,
64+ messages = [
65+ SystemMessage (content = system_message .format (format_instructions = output_parser .get_format_instructions ())),
66+ HumanMessage (content = graph_input .message ) # Add the initial human message
67+ ],
68+ last_predicted_category = None ,
69+ human_approval = None ,
70+ )
6571
6672def get_azure_openai_api_key () -> str :
6773 """Get Azure OpenAI API key from environment or UiPath."""
@@ -78,60 +84,91 @@ def get_azure_openai_api_key() -> str:
7884
7985 return api_key
8086
87+ def decide_next_node (state : GraphState ) -> Literal ["classify" , "notify_team" ]:
88+ if state ["human_approval" ] is True :
89+ return "notify_team"
90+
91+ return "classify"
8192
82- async def classify (state : GraphState ) -> GraphState :
93+ async def classify (state : GraphState ) -> Command :
8394 """Classify the support ticket using LLM."""
8495 llm = AzureChatOpenAI (
8596 azure_deployment = "gpt-4o-mini" ,
8697 api_key = get_azure_openai_api_key (),
8798 azure_endpoint = os .getenv ("AZURE_OPENAI_ENDPOINT" ),
8899 api_version = "2024-10-21"
89100 )
90- _prompt = prompt .partial (
91- format_instructions = output_parser .get_format_instructions ()
92- )
93- chain = _prompt | llm | output_parser
101+
102+ if state .get ("last_predicted_category" , None ):
103+ predicted_category = state ["last_predicted_category" ]
104+ state ["messages" ].append (HumanMessage (content = f"The ticket is 100% not part of the category '{ predicted_category } '. Choose another one." ))
105+ chain = llm | output_parser
94106
95107 try :
96- result = await chain .ainvoke ({"ticket_text" : state .message })
97- print (result )
98- state .label = result .label
99- state .confidence = result .confidence
108+ result = await chain .ainvoke (state ["messages" ])
100109 logger .info (
101110 f"Ticket classified with label: { result .label } confidence score: { result .confidence } "
102111 )
103- return state
112+ return Command (
113+ update = {
114+ "confidence" : result .confidence ,
115+ "label" : result .label ,
116+ "last_predicted_category" : result .label ,
117+ "messages" : state ["messages" ],
118+ }
119+ )
104120 except Exception as e :
105121 logger .error (f"Classification failed: { str (e )} " )
106- state .label = "error"
107- state .confidence = 0.0
108- return state
122+ return Command (
123+ update = {
124+ "label" : "error" ,
125+ "confidence" : "0.0" ,
126+ }
127+ )
109128
110- async def wait_for_human (state : GraphState ) -> GraphState :
129+ async def wait_for_human (state : GraphState ) -> Command :
111130 logger .info ("Wait for human approval" )
112- feedback = interrupt (f"Label: { state .label } Confidence: { state .confidence } " )
113-
114- if isinstance (feedback , bool ) and feedback is True :
115- return Command (goto = "notify_team" )
116- else :
117- return Command (goto = END )
131+ ticket_id = state ["ticket_id" ]
132+ ticket_message = state ["messages" ][1 ].content
133+ label = state ["label" ]
134+ confidence = state ["confidence" ]
135+ action_data = interrupt (CreateAction (name = "escalation_agent_app" ,
136+ title = "Action Required: Review classification" ,
137+ data = {
138+ "AgentOutput" : (
139+ f"This is how I classified the ticket: '{ ticket_id } ',"
140+ f" with message '{ ticket_message } ' \n "
141+ f"Label: '{ label } '"
142+ f" Confidence: '{ confidence } '"
143+ ),
144+ "AgentName" : "ticket-classification " },
145+ app_version = 1 ,
146+ assignee = state .get ("assignee" , None ),
147+ ))
148+
149+ return Command (
150+ update = {
151+ "human_approval" : isinstance (action_data ["Answer" ], bool ) and action_data ["Answer" ] is True
152+ }
153+ )
118154
119- async def notify_team (state : GraphState ) -> GraphState :
155+ async def notify_team (state : GraphState ) -> GraphOutput :
120156 logger .info ("Send team email notification" )
121- print (state )
122- return state
157+ return GraphOutput (label = state ["label" ], confidence = state ["confidence" ])
123158
124159"""Process a support ticket through the workflow."""
125160
126161builder = StateGraph (GraphState , input = GraphInput , output = GraphOutput )
127162
163+ builder .add_node ("prepare_input" , prepare_input )
128164builder .add_node ("classify" , classify )
129- builder .add_node ("human_approval " , wait_for_human )
165+ builder .add_node ("human_approval_node " , wait_for_human )
130166builder .add_node ("notify_team" , notify_team )
131167
132- builder .add_edge (START , "classify" )
133- builder .add_edge ("classify" , "human_approval" )
134- builder .add_edge ("human_approval" , "notify_team" )
168+ builder .add_edge (START , "prepare_input" )
169+ builder .add_edge ("prepare_input" , "classify" )
170+ builder .add_edge ("classify" , "human_approval_node" )
171+ builder .add_conditional_edges ("human_approval_node" , decide_next_node )
135172builder .add_edge ("notify_team" , END )
136173
137174
@@ -140,3 +177,4 @@ async def notify_team(state: GraphState) -> GraphState:
140177memory = MemorySaver ()
141178
142179graph = builder .compile (checkpointer = memory )
180+
0 commit comments