Skip to content

Commit adb6da9

Browse files
author
Siba Rajendran
committed
Bug Fixes
1 parent 6a6fc93 commit adb6da9

File tree

3 files changed

+35
-27
lines changed

3 files changed

+35
-27
lines changed

src/fmcore/llm/mixins/llm_mixins.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,4 @@ class LLMConfigMixin(MutableTyped, Mixin):
1515
llm_config (Optional[LLMConfig]): The LLM configuration object.
1616
"""
1717

18-
llm_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode='left_to_right')
18+
llm_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode="left_to_right")

src/fmcore/prompt_tuner/evaluator/llm_as_a_judge_boolean/llm_as_a_judge_boolean_evaluator.py

Lines changed: 32 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -73,35 +73,41 @@ def _get_instance(cls, *, evaluator_config: EvaluatorConfig) -> "LLMAsJudgeBoole
7373

7474
def evaluate(self, data: Dict) -> bool:
7575
"""
76-
Processes the input data by using the llm_as_a_judge_boolean_mapper to evaluate the context.
76+
Processes the input data using the llm_as_a_judge_boolean_mapper to evaluate the context.
7777
7878
Args:
7979
data (BooleanLLMJudgeInput): Input data containing context for evaluation.
8080
8181
Returns:
82-
BooleanLLMJudgeOutput: Evaluation result as a boolean decision.
82+
bool: Evaluation result as a boolean decision.
8383
"""
84-
formatted_message = None
85-
llm_response = None
86-
json_response = None
87-
decision = None
84+
formatted_message = llm_response = json_response = decision = None
8885

8986
try:
9087
formatted_message = self.text_prompt_mapper.map(data)
9188
llm_response = self.llm_inference_mapper.map([formatted_message])
9289
json_response = self.json_mapper.map(llm_response.content)
9390
decision = self.criteria_checker.map(json_response)
94-
return decision
91+
92+
if not isinstance(decision, bool):
93+
raise ValueError("Decision is not a boolean value")
94+
9595
except Exception as e:
9696
Log.error(
97-
"Exception during aevaluate:\n"
98-
f"formatted_message: {formatted_message}\n"
99-
f"llm_response: {llm_response}\n"
100-
f"json_response: {json_response}\n"
101-
f"decision: {decision}"
97+
"[SYNC EVALUATION ERROR]\n"
98+
"------------------------------\n"
99+
f"[INPUT DATA]: {data}\n\n"
100+
f"[PROMPT]: {self.evaluator_config.evaluator_params.prompt}\n\n"
101+
f"[FORMATTED MESSAGE]: {formatted_message}\n\n"
102+
f"[LLM RESPONSE]: {llm_response}\n\n"
103+
f"[JSON RESPONSE]: {json_response}\n\n"
104+
f"[DECISION]: {decision}\n\n"
105+
f"[ERROR]: {e}"
102106
)
103107
raise
104108

109+
return decision
110+
105111
async def aevaluate(self, data: Dict) -> bool:
106112
"""
107113
Asynchronous version of `evaluate` that processes the input data.
@@ -110,29 +116,31 @@ async def aevaluate(self, data: Dict) -> bool:
110116
data (BooleanLLMJudgeInput): Input data containing context for evaluation.
111117
112118
Returns:
113-
BooleanLLMJudgeOutput: Evaluation result as a boolean decision.
119+
bool: Evaluation result as a boolean decision.
114120
"""
115-
116-
formatted_message = None
117-
llm_response = None
118-
json_response = None
119-
decision = None
121+
formatted_message = llm_response = json_response = decision = None
120122

121123
try:
122124
formatted_message = await self.text_prompt_mapper.amap(data)
123125
llm_response = await self.llm_inference_mapper.amap([formatted_message])
124126
json_response = await self.json_mapper.amap(llm_response.content)
125127
decision = await self.criteria_checker.amap(json_response)
128+
126129
if not isinstance(decision, bool):
127130
raise ValueError("Decision is not a boolean value")
131+
128132
except Exception as e:
129133
Log.error(
130-
"Exception during aevaluate:\n"
131-
f"formatted_message: {formatted_message}\n"
132-
f"llm_response: {llm_response}\n"
133-
f"json_response: {json_response}\n"
134-
f"decision: {decision}"
134+
"[ASYNC EVALUATION ERROR]\n"
135+
"--------------------------------\n"
136+
f"[INPUT DATA]: {data}\n\n"
137+
f"[PROMPT]: {self.evaluator_config.evaluator_params.prompt}\n\n"
138+
f"[FORMATTED MESSAGE]: {formatted_message}\n\n"
139+
f"[LLM RESPONSE]: {llm_response}\n\n"
140+
f"[JSON RESPONSE]: {json_response}\n\n"
141+
f"[DECISION]: {decision}\n\n"
142+
f"[ERROR]: {e}"
135143
)
136-
raise e
144+
raise
137145

138146
return decision

src/fmcore/prompt_tuner/types/mixins/optimizer_mixins.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ class StudentConfigMixin(MutableTyped, Mixin):
1616
student_config (Optional[LLMConfig]): The LLM configuration object for student model
1717
"""
1818

19-
student_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode='left_to_right')
19+
student_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode="left_to_right")
2020

2121

2222
class TeacherConfigMixin(MutableTyped, Mixin):
@@ -27,7 +27,7 @@ class TeacherConfigMixin(MutableTyped, Mixin):
2727
teacher_config (Optional[LLMConfig]): The LLM configuration object for teacher model
2828
"""
2929

30-
teacher_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode='left_to_right')
30+
teacher_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode="left_to_right")
3131

3232

3333
class EvaluatorConfigMixin(MutableTyped, Mixin):

0 commit comments

Comments
 (0)