@@ -73,35 +73,41 @@ def _get_instance(cls, *, evaluator_config: EvaluatorConfig) -> "LLMAsJudgeBoole
7373
7474 def evaluate (self , data : Dict ) -> bool :
7575 """
76- Processes the input data by using the llm_as_a_judge_boolean_mapper to evaluate the context.
76+ Processes the input data using the llm_as_a_judge_boolean_mapper to evaluate the context.
7777
7878 Args:
7979 data (BooleanLLMJudgeInput): Input data containing context for evaluation.
8080
8181 Returns:
82- BooleanLLMJudgeOutput : Evaluation result as a boolean decision.
82+ bool : Evaluation result as a boolean decision.
8383 """
84- formatted_message = None
85- llm_response = None
86- json_response = None
87- decision = None
84+ formatted_message = llm_response = json_response = decision = None
8885
8986 try :
9087 formatted_message = self .text_prompt_mapper .map (data )
9188 llm_response = self .llm_inference_mapper .map ([formatted_message ])
9289 json_response = self .json_mapper .map (llm_response .content )
9390 decision = self .criteria_checker .map (json_response )
94- return decision
91+
92+ if not isinstance (decision , bool ):
93+ raise ValueError ("Decision is not a boolean value" )
94+
9595 except Exception as e :
9696 Log .error (
97- "Exception during aevaluate:\n "
98- f"formatted_message: { formatted_message } \n "
99- f"llm_response: { llm_response } \n "
100- f"json_response: { json_response } \n "
101- f"decision: { decision } "
97+ "[SYNC EVALUATION ERROR]\n "
98+ "------------------------------\n "
99+ f"[INPUT DATA]: { data } \n \n "
100+ f"[PROMPT]: { self .evaluator_config .evaluator_params .prompt } \n \n "
101+ f"[FORMATTED MESSAGE]: { formatted_message } \n \n "
102+ f"[LLM RESPONSE]: { llm_response } \n \n "
103+ f"[JSON RESPONSE]: { json_response } \n \n "
104+ f"[DECISION]: { decision } \n \n "
105+ f"[ERROR]: { e } "
102106 )
103107 raise
104108
109+ return decision
110+
105111 async def aevaluate (self , data : Dict ) -> bool :
106112 """
107113 Asynchronous version of `evaluate` that processes the input data.
@@ -110,29 +116,31 @@ async def aevaluate(self, data: Dict) -> bool:
110116 data (BooleanLLMJudgeInput): Input data containing context for evaluation.
111117
112118 Returns:
113- BooleanLLMJudgeOutput : Evaluation result as a boolean decision.
119+ bool : Evaluation result as a boolean decision.
114120 """
115-
116- formatted_message = None
117- llm_response = None
118- json_response = None
119- decision = None
121+ formatted_message = llm_response = json_response = decision = None
120122
121123 try :
122124 formatted_message = await self .text_prompt_mapper .amap (data )
123125 llm_response = await self .llm_inference_mapper .amap ([formatted_message ])
124126 json_response = await self .json_mapper .amap (llm_response .content )
125127 decision = await self .criteria_checker .amap (json_response )
128+
126129 if not isinstance (decision , bool ):
127130 raise ValueError ("Decision is not a boolean value" )
131+
128132 except Exception as e :
129133 Log .error (
130- "Exception during aevaluate:\n "
131- f"formatted_message: { formatted_message } \n "
132- f"llm_response: { llm_response } \n "
133- f"json_response: { json_response } \n "
134- f"decision: { decision } "
134+ "[ASYNC EVALUATION ERROR]\n "
135+ "--------------------------------\n "
136+ f"[INPUT DATA]: { data } \n \n "
137+ f"[PROMPT]: { self .evaluator_config .evaluator_params .prompt } \n \n "
138+ f"[FORMATTED MESSAGE]: { formatted_message } \n \n "
139+ f"[LLM RESPONSE]: { llm_response } \n \n "
140+ f"[JSON RESPONSE]: { json_response } \n \n "
141+ f"[DECISION]: { decision } \n \n "
142+ f"[ERROR]: { e } "
135143 )
136- raise e
144+ raise
137145
138146 return decision
0 commit comments