Skip to content

Commit 6a6fc93

Browse files
author
Siba Rajendran
committed
Bug Fixes
(cherry picked from commit 4d0fc2e)
1 parent a1ae458 commit 6a6fc93

File tree

6 files changed

+61
-25
lines changed

6 files changed

+61
-25
lines changed

src/fmcore/llm/mixins/llm_mixins.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
from typing import Optional, Union
22

3+
from pydantic import Field
4+
35
from fmcore.llm.types.llm_types import LLMConfig, DistributedLLMConfig
46
from fmcore.types.mixins_types import Mixin
57
from fmcore.types.typed import MutableTyped
@@ -13,4 +15,4 @@ class LLMConfigMixin(MutableTyped, Mixin):
1315
llm_config (Optional[LLMConfig]): The LLM configuration object.
1416
"""
1517

16-
llm_config: Union[LLMConfig, DistributedLLMConfig]
18+
llm_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode='left_to_right')

src/fmcore/llm/mixins/provider_mixins.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,8 @@ class AWSAccountMixin(MutableTyped, Mixin):
1616
region (str): The AWS region where the account operates. Defaults to 'us-east-1'.
1717
"""
1818

19-
role_arn: str
20-
region: str = Field(default=AWSRegion.US_EAST_1.value)
19+
role_arn: Optional[str] = Field(default=None)
20+
region: Optional[str] = Field(default="us-east-1")
2121

2222

2323
class APIKeyServiceMixin(MutableTyped, Mixin):

src/fmcore/prompt_tuner/dspy/optimizer_wrapper/miprov2/miprov2_optimizer_types.py

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
from typing import Optional
22

3+
from pydantic import Field
4+
35
from fmcore.prompt_tuner.types.enums.optimizer_enums import OptimizerMetricType, DSPyOptimizerType
46
from fmcore.prompt_tuner.types.mixins.optimizer_mixins import (
57
StudentConfigMixin,
@@ -22,8 +24,10 @@ class MIPROv2OptimizerParams(BaseOptimizerParams):
2224
"""
2325

2426
optimizer_metric: str = OptimizerMetricType.ACCURACY
25-
auto: Optional[str] = "light"
26-
num_candidates: int = 7
27+
auto: Optional[str] = Field(default="light")
28+
num_candidates: Optional[int] = Field(default=7)
29+
max_errors: Optional[int] = Field(default=10)
30+
minibatch: Optional[bool] = Field(default=False)
2731

2832

2933
class MIPROv2OptimizerConfig(

src/fmcore/prompt_tuner/dspy/utils/dspy_utils.py

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -168,13 +168,8 @@ def evaluate_func(example: dspy.Example, prediction: dspy.Prediction, trace=None
168168
"output": prediction.toDict(),
169169
}
170170

171-
try:
172-
# We are using this hack because dspy doesn't support async
173-
decision = AsyncUtils.execute(evaluator.aevaluate(data=row))
174-
except Exception as e:
175-
# Defaulting to false incase of failures
176-
Log.info(f"Error {e} during evaluating {row}")
177-
decision = False
171+
# We are using this hack because dspy doesn't support async
172+
decision = AsyncUtils.execute(evaluator.aevaluate(data=row))
178173

179174
return decision
180175

src/fmcore/prompt_tuner/evaluator/llm_as_a_judge_boolean/llm_as_a_judge_boolean_evaluator.py

Lines changed: 44 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@
1111
from fmcore.mapper.llm_response_json_mapper import LLMResponseJsonMapper
1212
from fmcore.mapper.criteria_checker_mapper import CriteriaCheckerMapper
1313
from fmcore.mapper.llm_inference_mapper import LLMInferenceMapper
14+
from fmcore.utils.logging_utils import Log
1415

1516

1617
class LLMAsJudgeBooleanEvaluator(BaseEvaluator[Dict, bool]):
@@ -80,12 +81,26 @@ def evaluate(self, data: Dict) -> bool:
8081
Returns:
8182
BooleanLLMJudgeOutput: Evaluation result as a boolean decision.
8283
"""
83-
# Format the context into messages using the template
84-
formatted_message: BaseMessage = self.text_prompt_mapper.map(data)
85-
llm_response: BaseMessage = self.llm_inference_mapper.map([formatted_message])
86-
json_response: Dict = self.json_mapper.map(llm_response.content)
87-
decision: bool = self.criteria_checker.map(json_response)
88-
return decision
84+
formatted_message = None
85+
llm_response = None
86+
json_response = None
87+
decision = None
88+
89+
try:
90+
formatted_message = self.text_prompt_mapper.map(data)
91+
llm_response = self.llm_inference_mapper.map([formatted_message])
92+
json_response = self.json_mapper.map(llm_response.content)
93+
decision = self.criteria_checker.map(json_response)
94+
return decision
95+
except Exception as e:
96+
Log.error(
97+
"Exception during aevaluate:\n"
98+
f"formatted_message: {formatted_message}\n"
99+
f"llm_response: {llm_response}\n"
100+
f"json_response: {json_response}\n"
101+
f"decision: {decision}"
102+
)
103+
raise
89104

90105
async def aevaluate(self, data: Dict) -> bool:
91106
"""
@@ -97,9 +112,27 @@ async def aevaluate(self, data: Dict) -> bool:
97112
Returns:
98113
BooleanLLMJudgeOutput: Evaluation result as a boolean decision.
99114
"""
100-
# Format the context into messages using the template
101-
formatted_message: BaseMessage = await self.text_prompt_mapper.amap(data)
102-
llm_response: BaseMessage = await self.llm_inference_mapper.amap([formatted_message])
103-
json_response: Dict = await self.json_mapper.amap(llm_response.content)
104-
decision: bool = await self.criteria_checker.amap(json_response)
115+
116+
formatted_message = None
117+
llm_response = None
118+
json_response = None
119+
decision = None
120+
121+
try:
122+
formatted_message = await self.text_prompt_mapper.amap(data)
123+
llm_response = await self.llm_inference_mapper.amap([formatted_message])
124+
json_response = await self.json_mapper.amap(llm_response.content)
125+
decision = await self.criteria_checker.amap(json_response)
126+
if not isinstance(decision, bool):
127+
raise ValueError("Decision is not a boolean value")
128+
except Exception as e:
129+
Log.error(
130+
"Exception during aevaluate:\n"
131+
f"formatted_message: {formatted_message}\n"
132+
f"llm_response: {llm_response}\n"
133+
f"json_response: {json_response}\n"
134+
f"decision: {decision}"
135+
)
136+
raise e
137+
105138
return decision

src/fmcore/prompt_tuner/types/mixins/optimizer_mixins.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,7 @@
11
from typing import Optional, Union
22

3+
from pydantic import Field
4+
35
from fmcore.llm.types.llm_types import LLMConfig, DistributedLLMConfig
46
from fmcore.prompt_tuner.evaluator.types.evaluator_types import EvaluatorConfig
57
from fmcore.types.mixins_types import Mixin
@@ -14,7 +16,7 @@ class StudentConfigMixin(MutableTyped, Mixin):
1416
student_config (Optional[LLMConfig]): The LLM configuration object for student model
1517
"""
1618

17-
student_config: Union[LLMConfig, DistributedLLMConfig]
19+
student_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode='left_to_right')
1820

1921

2022
class TeacherConfigMixin(MutableTyped, Mixin):
@@ -25,7 +27,7 @@ class TeacherConfigMixin(MutableTyped, Mixin):
2527
teacher_config (Optional[LLMConfig]): The LLM configuration object for teacher model
2628
"""
2729

28-
teacher_config: Union[LLMConfig, DistributedLLMConfig]
30+
teacher_config: Union[LLMConfig, DistributedLLMConfig] = Field(union_mode='left_to_right')
2931

3032

3133
class EvaluatorConfigMixin(MutableTyped, Mixin):

0 commit comments

Comments
 (0)