From 715cc5b71b996eecde2d97bad71a617274739dcc Mon Sep 17 00:00:00 2001 From: A Vertex SDK engineer Date: Tue, 23 Dec 2025 11:07:24 -0800 Subject: [PATCH] feat: GenAI SDK client - Enabling Few-shot Prompt Optimization by passing either "OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS" or "OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE" to the `optimize_prompt` method together with example dataframe PiperOrigin-RevId: 848237033 --- ...mizer_async_optimize_prompt_return_type.py | 70 +++++++++ ...t_optimizer_optimize_prompt_return_type.py | 77 +++++++-- .../vertexai/genai/test_prompt_optimizer.py | 84 ++++++++++ vertexai/_genai/_prompt_optimizer_utils.py | 146 +++++++++++++++++- vertexai/_genai/prompt_optimizer.py | 84 +++++++++- vertexai/_genai/types/__init__.py | 16 +- vertexai/_genai/types/common.py | 93 ++++------- vertexai/_genai/types/prompt_optimizer.py | 107 +++++++++++++ 8 files changed, 575 insertions(+), 102 deletions(-) create mode 100644 vertexai/_genai/types/prompt_optimizer.py diff --git a/tests/unit/vertexai/genai/replays/test_prompt_optimizer_async_optimize_prompt_return_type.py b/tests/unit/vertexai/genai/replays/test_prompt_optimizer_async_optimize_prompt_return_type.py index 86fd5f36ae..a4fd53937c 100644 --- a/tests/unit/vertexai/genai/replays/test_prompt_optimizer_async_optimize_prompt_return_type.py +++ b/tests/unit/vertexai/genai/replays/test_prompt_optimizer_async_optimize_prompt_return_type.py @@ -16,6 +16,7 @@ from tests.unit.vertexai.genai.replays import pytest_helper from vertexai._genai import types +import pandas as pd import pytest @@ -32,6 +33,75 @@ async def test_optimize_prompt(client): assert response.raw_text_response +@pytest.mark.asyncio +async def test_optimize_prompt_w_optimization_target(client): + """Tests the optimize request parameters method with optimization target.""" + test_prompt = "Generate system instructions for analyzing medical articles" + response = await client.aio.prompt_optimizer.optimize_prompt( + prompt=test_prompt, + config=types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_GEMINI_NANO, + ), + ) + assert isinstance(response, types.OptimizeResponse) + assert response.raw_text_response + + +@pytest.mark.asyncio +async def test_optimize_prompt_w_few_shot_optimization_target(client): + """Tests the optimize request parameters method with few shot optimization target.""" + test_prompt = "Generate system instructions for analyzing medical articles" + df = pd.DataFrame( + { + "prompt": ["prompt1", "prompt2"], + "model_response": ["response1", "response2"], + "target_response": ["target1", "target2"], + } + ) + response = await client.aio.prompt_optimizer.optimize_prompt( + prompt=test_prompt, + config=types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE, + examples_dataframe=df, + ), + ) + assert isinstance(response, types.OptimizeResponse) + assert response.raw_text_response + assert isinstance(response.raw_text_response, str) + if response.parsed_response: + assert isinstance( + response.parsed_response, types.prompt_optimizer.ParsedResponseFewShot + ) + + +@pytest.mark.asyncio +async def test_optimize_prompt_w_few_shot_optimization_rubrics(client): + """Tests the optimize request parameters method with few shot optimization target.""" + test_prompt = "Generate system instructions for analyzing medical articles" + df = pd.DataFrame( + { + "prompt": ["prompt1", "prompt2"], + "model_response": ["response1", "response2"], + "rubrics": ["rubric1", "rubric2"], + "rubrics_evaluations": ["[True, True]", "[True, False]"], + } + ) + response = await client.aio.prompt_optimizer.optimize_prompt( + prompt=test_prompt, + config=types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS, + examples_dataframe=df, + ), + ) + assert isinstance(response, types.OptimizeResponse) + assert response.raw_text_response + assert isinstance(response.raw_text_response, str) + if response.parsed_response: + assert isinstance( + response.parsed_response, types.prompt_optimizer.ParsedResponseFewShot + ) + + pytestmark = pytest_helper.setup( file=__file__, globals_for_file=globals(), diff --git a/tests/unit/vertexai/genai/replays/test_prompt_optimizer_optimize_prompt_return_type.py b/tests/unit/vertexai/genai/replays/test_prompt_optimizer_optimize_prompt_return_type.py index 6e1a9170d3..c6799e3afc 100644 --- a/tests/unit/vertexai/genai/replays/test_prompt_optimizer_optimize_prompt_return_type.py +++ b/tests/unit/vertexai/genai/replays/test_prompt_optimizer_optimize_prompt_return_type.py @@ -16,6 +16,7 @@ from tests.unit.vertexai.genai.replays import pytest_helper from vertexai._genai import types +import pandas as pd def test_optimize_prompt(client): @@ -27,18 +28,70 @@ def test_optimize_prompt(client): assert response.raw_text_response -# def test_optimize_prompt_w_optimization_target(client): -# """Tests the optimize request parameters method with optimization target.""" -# from google.genai import types as genai_types -# test_prompt = "Generate system instructions for analyzing medical articles" -# response = client.prompt_optimizer.optimize_prompt( -# prompt=test_prompt, -# config=types.OptimizeConfig( -# optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_GEMINI_NANO, -# ), -# ) -# assert isinstance(response, types.OptimizeResponse) -# assert response.raw_text_response +def test_optimize_prompt_w_optimization_target(client): + """Tests the optimize request parameters method with optimization target.""" + test_prompt = "Generate system instructions for analyzing medical articles" + response = client.prompt_optimizer.optimize_prompt( + prompt=test_prompt, + config=types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_GEMINI_NANO, + ), + ) + assert isinstance(response, types.OptimizeResponse) + assert response.raw_text_response + + +def test_optimize_prompt_w_few_shot_optimization_target(client): + """Tests the optimize request parameters method with few shot optimization target.""" + test_prompt = "Generate system instructions for analyzing medical articles" + df = pd.DataFrame( + { + "prompt": ["prompt1", "prompt2"], + "model_response": ["response1", "response2"], + "target_response": ["target1", "target2"], + } + ) + response = client.prompt_optimizer.optimize_prompt( + prompt=test_prompt, + config=types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE, + examples_dataframe=df, + ), + ) + assert isinstance(response, types.OptimizeResponse) + assert response.raw_text_response + assert isinstance(response.raw_text_response, str) + if response.parsed_response: + assert isinstance( + response.parsed_response, types.prompt_optimizer.ParsedResponseFewShot + ) + + +def test_optimize_prompt_w_few_shot_optimization_rubrics(client): + """Tests the optimize request parameters method with few shot optimization target.""" + test_prompt = "Generate system instructions for analyzing medical articles" + df = pd.DataFrame( + { + "prompt": ["prompt1", "prompt2"], + "model_response": ["response1", "response2"], + "rubrics": ["rubric1", "rubric2"], + "rubrics_evaluations": ["[True, True]", "[True, False]"], + } + ) + response = client.prompt_optimizer.optimize_prompt( + prompt=test_prompt, + config=types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS, + examples_dataframe=df, + ), + ) + assert isinstance(response, types.OptimizeResponse) + assert response.raw_text_response + assert isinstance(response.raw_text_response, str) + if response.parsed_response: + assert isinstance( + response.parsed_response, types.prompt_optimizer.ParsedResponseFewShot + ) pytestmark = pytest_helper.setup( diff --git a/tests/unit/vertexai/genai/test_prompt_optimizer.py b/tests/unit/vertexai/genai/test_prompt_optimizer.py index c735657d88..0317c54778 100644 --- a/tests/unit/vertexai/genai/test_prompt_optimizer.py +++ b/tests/unit/vertexai/genai/test_prompt_optimizer.py @@ -21,6 +21,7 @@ from vertexai._genai import prompt_optimizer from vertexai._genai import types from google.genai import client +import pandas as pd import pytest @@ -91,6 +92,34 @@ def test_prompt_optimizer_optimize_prompt( mock_client.assert_called_once() mock_custom_optimize_prompt.assert_called_once() + @mock.patch.object(prompt_optimizer.PromptOptimizer, "_custom_optimize_prompt") + def test_prompt_optimizer_optimize_few_shot(self, mock_custom_optimize_prompt): + """Test that prompt_optimizer.optimize method for few shot optimizer.""" + df = pd.DataFrame( + { + "prompt": ["prompt1", "prompt2"], + "model_response": ["response1", "response2"], + "target_response": ["target1", "target2"], + } + ) + test_client = vertexai.Client(project=_TEST_PROJECT, location=_TEST_LOCATION) + test_config = types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE, + examples_dataframe=df, + ) + test_client.prompt_optimizer.optimize_prompt( + prompt="test_prompt", + config=test_config, + ) + mock_custom_optimize_prompt.assert_called_once() + mock_kwargs = mock_custom_optimize_prompt.call_args.kwargs + assert ( + mock_kwargs["config"].optimization_target == test_config.optimization_target + ) + pd.testing.assert_frame_equal( + mock_kwargs["config"].examples_dataframe, test_config.examples_dataframe + ) + @mock.patch.object(prompt_optimizer.PromptOptimizer, "_custom_optimize_prompt") def test_prompt_optimizer_optimize_prompt_with_optimization_target( self, mock_custom_optimize_prompt @@ -138,4 +167,59 @@ async def test_async_prompt_optimizer_optimize_prompt_with_optimization_target( config=config, ) + @pytest.mark.asyncio + @mock.patch.object(prompt_optimizer.AsyncPromptOptimizer, "_custom_optimize_prompt") + async def test_async_prompt_optimizer_optimize_prompt_few_shot_target_response( + self, mock_custom_optimize_prompt + ): + """Test that async prompt_optimizer.optimize_prompt calls optimize_prompt with few shot target response.""" + test_client = vertexai.Client(project=_TEST_PROJECT, location=_TEST_LOCATION) + df = pd.DataFrame( + { + "prompt": ["prompt1", "prompt2"], + "model_response": ["response1", "response2"], + "target_response": ["target1", "target2"], + } + ) + config = types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE, + examples_dataframe=df, + ) + await test_client.aio.prompt_optimizer.optimize_prompt( + prompt="test_prompt", + config=config, + ) + mock_custom_optimize_prompt.assert_called_once_with( + content=mock.ANY, + config=config, + ) + + @pytest.mark.asyncio + @mock.patch.object(prompt_optimizer.AsyncPromptOptimizer, "_custom_optimize_prompt") + async def test_async_prompt_optimizer_optimize_prompt_few_shot_rubrics( + self, mock_custom_optimize_prompt + ): + """Test that async prompt_optimizer.optimize_prompt calls optimize_prompt with few shot rubrics.""" + test_client = vertexai.Client(project=_TEST_PROJECT, location=_TEST_LOCATION) + df = pd.DataFrame( + { + "prompt": ["prompt1", "prompt2"], + "model_response": ["response1", "response2"], + "rubrics": ["rubric1", "rubric2"], + "rubrics_evaluations": ["[True, True]", "[True, False]"], + } + ) + config = types.OptimizeConfig( + optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS, + examples_dataframe=df, + ) + await test_client.aio.prompt_optimizer.optimize_prompt( + prompt="test_prompt", + config=config, + ) + mock_custom_optimize_prompt.assert_called_once_with( + content=mock.ANY, + config=config, + ) + # # TODO(b/415060797): add more tests for prompt_optimizer.optimize diff --git a/vertexai/_genai/_prompt_optimizer_utils.py b/vertexai/_genai/_prompt_optimizer_utils.py index 2d6271b9ee..3aa6ac5648 100644 --- a/vertexai/_genai/_prompt_optimizer_utils.py +++ b/vertexai/_genai/_prompt_optimizer_utils.py @@ -15,8 +15,130 @@ """Utility functions for prompt optimizer.""" import json +from typing import Any, Optional, Union + +from pydantic import ValidationError + from . import types +try: + import pandas as pd # pylint: disable=g-import-not-at-top + + PandasDataFrame = pd.DataFrame +except ImportError: + pd = None + PandasDataFrame = Any + + +def _construct_input_prompt( + example_df: PandasDataFrame, + *, + prompt_col_name: str, + model_response_col_name: str, + rubrics_col_name: str, + rubrics_evaluations_col_name: str, + target_response_col_name: str, + system_instruction: Optional[str] = None, +) -> str: + """Construct the input prompt for the few shot prompt optimizer.""" + + all_prompts = [] + for row in example_df.to_dict(orient="records"): + example_data = { + "prompt": row[prompt_col_name], + "model_response": row[model_response_col_name], + } + if rubrics_col_name: + example_data["rubrics"] = row[rubrics_col_name] + if rubrics_evaluations_col_name: + example_data["rubrics_evaluations"] = row[rubrics_evaluations_col_name] + if target_response_col_name: + example_data["target_response"] = row[target_response_col_name] + + json_str = json.dumps(example_data, indent=2) + all_prompts.append(f"```JSON\n{json_str}\n```") + + all_prompts_str = "\n\n".join(all_prompts) + + if system_instruction is None: + system_instruction = "" + + return "\n".join( + [ + "Original System Instructions:\n", + system_instruction, + "Examples:\n", + all_prompts_str, + "\nNew Output:\n", + ] + ) + + +def _get_few_shot_prompt( + system_instruction: str, + config: types.OptimizeConfig, +) -> str: + """Builds the few shot prompt.""" + + if "prompt" not in config.examples_dataframe.columns: + raise ValueError("'prompt' is required in the examples_dataframe.") + prompt_col_name = "prompt" + + if "model_response" not in config.examples_dataframe.columns: + raise ValueError("'model_response' is required in the example_df.") + model_response_col_name = "model_response" + + target_response_col_name = "" + rubrics_col_name = "" + rubrics_evaluations_col_name = "" + + if ( + config.optimization_target + == types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE + ): + if "target_response" not in config.examples_dataframe.columns: + raise ValueError("'target_response' is required in the examples_dataframe.") + target_response_col_name = "target_response" + if "rubrics" in config.examples_dataframe.columns: + raise ValueError( + "Only 'target_response' should be provided " + "for OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE " + "but 'rubrics' was provided." + ) + + elif ( + config.optimization_target + == types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS + ): + if not {"rubrics", "rubrics_evaluations"}.issubset( + config.examples_dataframe.columns + ): + raise ValueError( + "rubrics and rubrics_evaluations is required in the" + "examples_dataframe when rubrics is set." + ) + + rubrics_col_name = "rubrics" + rubrics_evaluations_col_name = "rubrics_evaluations" + if "target_response" in config.examples_dataframe.columns: + raise ValueError( + "Only 'rubrics' and 'rubrics_evaluations' should be provided " + "for OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS " + "but target_response was provided." + ) + else: + raise ValueError("One of 'target_response' or 'rubrics' must be provided.") + + return _construct_input_prompt( + config.examples_dataframe, + prompt_col_name=prompt_col_name, + model_response_col_name=model_response_col_name, + rubrics_col_name=rubrics_col_name, + rubrics_evaluations_col_name=rubrics_evaluations_col_name, + target_response_col_name=target_response_col_name, + system_instruction=system_instruction, + ) + def _get_service_account( config: types.PromptOptimizerConfigOrDict, @@ -25,15 +147,17 @@ def _get_service_account( if isinstance(config, dict): config = types.PromptOptimizerConfig.model_validate(config) - if config.service_account and config.service_account_project_number: + if ( + config.service_account and config.service_account_project_number + ): # pytype: disable=attribute-error raise ValueError( "Only one of service_account or " "service_account_project_number can be provided." ) - elif config.service_account: - return config.service_account - elif config.service_account_project_number: - return f"{config.service_account_project_number}-compute@developer.gserviceaccount.com" + elif config.service_account: # pytype: disable=attribute-error + return config.service_account # pytype: disable=attribute-error + elif config.service_account_project_number: # pytype: disable=attribute-error + return f"{config.service_account_project_number}-compute@developer.gserviceaccount.com" # pytype: disable=attribute-error else: raise ValueError( "Either service_account or service_account_project_number " "is required." @@ -70,7 +194,15 @@ def _clean_and_parse_optimized_prompt(output_str: str): ) from e -def _parse(output_str: str) -> types.ParsedResponse: +def _parse( + output_str: str, +) -> Union[ + types.prompt_optimizer.ParsedResponse, + types.prompt_optimizer.ParsedResponseFewShot, +]: """Parses the output string from the prompt optimizer endpoint.""" parsed_out = _clean_and_parse_optimized_prompt(output_str) - return types.ParsedResponse(**parsed_out) + try: + return types.prompt_optimizer.ParsedResponse(**parsed_out) + except ValidationError: + return types.prompt_optimizer.ParsedResponseFewShot(**parsed_out) diff --git a/vertexai/_genai/prompt_optimizer.py b/vertexai/_genai/prompt_optimizer.py index d2bcb22be8..3ba4f0c28f 100644 --- a/vertexai/_genai/prompt_optimizer.py +++ b/vertexai/_genai/prompt_optimizer.py @@ -500,7 +500,7 @@ def optimize_prompt( self, *, prompt: str, - config: Optional[types.OptimizeConfig] = None, + config: Optional[types.OptimizeConfigOrDict] = None, ) -> types.OptimizeResponse: """Makes an API request to _optimize_prompt and returns the parsed response. @@ -517,15 +517,47 @@ def optimize_prompt( types.OptimizeConfig( optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_GEMINI_NANO ) + For few-shot optimization, provide: + + optim_target = types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS + or + optim_target = types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE + types.OptimizeConfig( + optimization_target=optim_target, + examples_dataframe=dataframe + ) + OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS indicates that the few-shot + examples include specific scoring rubrics and their corresponding + evaluations. + OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE indicates that the few-shot + examples include a ground-truth target response. Returns: The parsed response from the API request. """ - prompt = genai_types.Content(parts=[genai_types.Part(text=prompt)], role="user") + if isinstance(config, dict): + config = types.OptimizeConfig(**config) + + optimization_target: Optional[types.OptimizeTarget] = None + if config is not None: + optimization_target = config.optimization_target + + final_prompt = prompt + if ( + optimization_target + == types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS + or optimization_target + == types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE + ): + final_prompt = _prompt_optimizer_utils._get_few_shot_prompt(prompt, config) + # TODO: b/435653980 - replace the custom method with a generated method. + config_for_api = config.model_copy() if config else None return self._custom_optimize_prompt( - content=prompt, - config=config, + content=genai_types.Content( + parts=[genai_types.Part(text=final_prompt)], role="user" + ), + config=config_for_api, ) def _custom_optimize_prompt( @@ -540,6 +572,10 @@ def _custom_optimize_prompt( Then gathers the response, concatenates into one string and returns the parsed response. """ + if isinstance(config, dict): + config.pop("examples_dataframe", None) + elif config and hasattr(config, "examples_dataframe"): + del config.examples_dataframe parameter_model = types._OptimizeRequestParameters( content=content, @@ -887,6 +923,10 @@ async def _custom_optimize_prompt( config: Optional[types.OptimizeConfigOrDict] = None, ) -> types.OptimizeResponse: """Optimize a single prompt.""" + if isinstance(config, dict): + config.pop("examples_dataframe", None) + elif config and hasattr(config, "examples_dataframe"): + del config.examples_dataframe parameter_model = types._OptimizeRequestParameters( content=content, @@ -953,7 +993,7 @@ async def optimize_prompt( self, *, prompt: str, - config: Optional[types.OptimizeConfig] = None, + config: Optional[types.OptimizeConfigOrDict] = None, ) -> types.OptimizeResponse: """Makes an async request to _optimize_prompt and returns an optimized prompt. @@ -969,13 +1009,41 @@ async def optimize_prompt( types.OptimizeConfig( optimization_target=types.OptimizeTarget.OPTIMIZATION_TARGET_GEMINI_NANO ) + For few-shot optimization, provide: + optim_target = types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS # or types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE + types.OptimizeConfig( + optimization_target=optim_target, + examples_dataframe=dataframe + ) + OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS indicates that the few-shot + examples include specific scoring rubrics and their corresponding + evaluations. + OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE indicates that the few-shot + examples include a ground-truth target response. Returns: The parsed response from the API request. """ + if isinstance(config, dict): + config = types.OptimizeConfig(**config) + + optimization_target: Optional[types.OptimizeTarget] = None + if config is not None: + optimization_target = config.optimization_target + + final_prompt = prompt + if ( + optimization_target + == types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS + or optimization_target + == types.OptimizeTarget.OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE + ): + final_prompt = _prompt_optimizer_utils._get_few_shot_prompt(prompt, config) - prompt = genai_types.Content(parts=[genai_types.Part(text=prompt)], role="user") # TODO: b/435653980 - replace the custom method with a generated method. + config_for_api = config.model_copy() if config else None return await self._custom_optimize_prompt( - content=prompt, - config=config, + content=genai_types.Content( + parts=[genai_types.Part(text=final_prompt)], role="user" + ), + config=config_for_api, ) diff --git a/vertexai/_genai/types/__init__.py b/vertexai/_genai/types/__init__.py index 86b88b8994..5486b1640f 100644 --- a/vertexai/_genai/types/__init__.py +++ b/vertexai/_genai/types/__init__.py @@ -21,6 +21,7 @@ from . import agent_engines from . import evals +from . import prompt_optimizer from .common import _AppendAgentEngineSessionEventRequestParameters from .common import _AssembleDatasetParameters from .common import _AssessDatasetParameters @@ -126,9 +127,6 @@ from .common import AppendAgentEngineSessionEventResponse from .common import AppendAgentEngineSessionEventResponseDict from .common import AppendAgentEngineSessionEventResponseOrDict -from .common import ApplicableGuideline -from .common import ApplicableGuidelineDict -from .common import ApplicableGuidelineOrDict from .common import AssembleDataset from .common import AssembleDatasetConfig from .common import AssembleDatasetConfigDict @@ -635,9 +633,7 @@ from .common import PairwiseMetricResult from .common import PairwiseMetricResultDict from .common import PairwiseMetricResultOrDict -from .common import ParsedResponse -from .common import ParsedResponseDict -from .common import ParsedResponseOrDict +from .common import ParsedResponseUnion from .common import PointwiseMetricInput from .common import PointwiseMetricInputDict from .common import PointwiseMetricInputOrDict @@ -1824,12 +1820,6 @@ "PromptOptimizerConfig", "PromptOptimizerConfigDict", "PromptOptimizerConfigOrDict", - "ApplicableGuideline", - "ApplicableGuidelineDict", - "ApplicableGuidelineOrDict", - "ParsedResponse", - "ParsedResponseDict", - "ParsedResponseOrDict", "OptimizeResponse", "OptimizeResponseDict", "OptimizeResponseOrDict", @@ -1949,6 +1939,7 @@ "Message", "MessageDict", "Importance", + "ParsedResponseUnion", "_CreateEvaluationItemParameters", "_CreateEvaluationRunParameters", "_CreateEvaluationSetParameters", @@ -2015,6 +2006,7 @@ "_UpdateDatasetParameters", "evals", "agent_engines", + "prompt_optimizer", "PrebuiltMetric", "RubricMetric", ] diff --git a/vertexai/_genai/types/common.py b/vertexai/_genai/types/common.py index fd54f5a8ff..bbe24d4095 100644 --- a/vertexai/_genai/types/common.py +++ b/vertexai/_genai/types/common.py @@ -45,6 +45,7 @@ ) from typing_extensions import TypedDict from . import evals as evals_types +from . import prompt_optimizer as prompt_optimizer_types def camel_to_snake(camel_case_string: str) -> str: @@ -332,10 +333,16 @@ class EvaluationRunState(_common.CaseInSensitiveEnum): class OptimizeTarget(_common.CaseInSensitiveEnum): - """None""" + """Specifies the method for calling the optimize_prompt.""" OPTIMIZATION_TARGET_GEMINI_NANO = "OPTIMIZATION_TARGET_GEMINI_NANO" """The data driven prompt optimizer designer for prompts from Android core API.""" + OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS = "OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS" + """The prompt optimizer based on user provided examples with rubrics.""" + OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE = ( + "OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE" + ) + """The prompt optimizer based on user provided examples with target responses.""" class GenerateMemoriesResponseGeneratedMemoryAction(_common.CaseInSensitiveEnum): @@ -3786,7 +3793,12 @@ class OptimizeConfig(_common.BaseModel): default=None, description="""Used to override HTTP request options.""" ) optimization_target: Optional[OptimizeTarget] = Field( - default=None, description="""""" + default=None, + description="""The optimization target for the prompt optimizer. It must be one of the OptimizeTarget enum values: OPTIMIZATION_TARGET_GEMINI_NANO for the prompts from Android core API, OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS for the few-shot prompt optimizer with rubrics, OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE for the few-shot prompt optimizer with target responses.""", + ) + examples_dataframe: Optional[pd.DataFrame] = Field( + default=None, + description="""The examples dataframe for the few-shot prompt optimizer. It must contain "prompt" and "model_response" columns. Depending on which optimization target is used, it also needs to contain "rubrics" and "rubrics_evaluations" or "target_response" columns.""", ) @@ -3797,7 +3809,10 @@ class OptimizeConfigDict(TypedDict, total=False): """Used to override HTTP request options.""" optimization_target: Optional[OptimizeTarget] - """""" + """The optimization target for the prompt optimizer. It must be one of the OptimizeTarget enum values: OPTIMIZATION_TARGET_GEMINI_NANO for the prompts from Android core API, OPTIMIZATION_TARGET_FEW_SHOT_RUBRICS for the few-shot prompt optimizer with rubrics, OPTIMIZATION_TARGET_FEW_SHOT_TARGET_RESPONSE for the few-shot prompt optimizer with target responses.""" + + examples_dataframe: Optional[pd.DataFrame] + """The examples dataframe for the few-shot prompt optimizer. It must contain "prompt" and "model_response" columns. Depending on which optimization target is used, it also needs to contain "rubrics" and "rubrics_evaluations" or "target_response" columns.""" OptimizeConfigOrDict = Union[OptimizeConfig, OptimizeConfigDict] @@ -12628,69 +12643,13 @@ class PromptOptimizerConfigDict(TypedDict, total=False): PromptOptimizerConfigOrDict = Union[PromptOptimizerConfig, PromptOptimizerConfigDict] -class ApplicableGuideline(_common.BaseModel): - """Applicable guideline for the optimize_prompt method.""" - - applicable_guideline: Optional[str] = Field(default=None, description="""""") - suggested_improvement: Optional[str] = Field(default=None, description="""""") - text_before_change: Optional[str] = Field(default=None, description="""""") - text_after_change: Optional[str] = Field(default=None, description="""""") - - -class ApplicableGuidelineDict(TypedDict, total=False): - """Applicable guideline for the optimize_prompt method.""" - - applicable_guideline: Optional[str] - """""" - - suggested_improvement: Optional[str] - """""" - - text_before_change: Optional[str] - """""" - - text_after_change: Optional[str] - """""" - - -ApplicableGuidelineOrDict = Union[ApplicableGuideline, ApplicableGuidelineDict] - - -class ParsedResponse(_common.BaseModel): - """Response for the optimize_prompt method.""" - - optimization_type: Optional[str] = Field(default=None, description="""""") - applicable_guidelines: Optional[list[ApplicableGuideline]] = Field( - default=None, description="""""" - ) - original_prompt: Optional[str] = Field(default=None, description="""""") - suggested_prompt: Optional[str] = Field(default=None, description="""""") - - -class ParsedResponseDict(TypedDict, total=False): - """Response for the optimize_prompt method.""" - - optimization_type: Optional[str] - """""" - - applicable_guidelines: Optional[list[ApplicableGuidelineDict]] - """""" - - original_prompt: Optional[str] - """""" - - suggested_prompt: Optional[str] - """""" - - -ParsedResponseOrDict = Union[ParsedResponse, ParsedResponseDict] - - class OptimizeResponse(_common.BaseModel): """Response for the optimize_prompt method.""" raw_text_response: Optional[str] = Field(default=None, description="""""") - parsed_response: Optional[ParsedResponse] = Field(default=None, description="""""") + parsed_response: Optional["ParsedResponseUnion"] = Field( + default=None, description="""""" + ) class OptimizeResponseDict(TypedDict, total=False): @@ -12699,7 +12658,7 @@ class OptimizeResponseDict(TypedDict, total=False): raw_text_response: Optional[str] """""" - parsed_response: Optional[ParsedResponseDict] + parsed_response: Optional["ParsedResponseUnionDict"] """""" @@ -14183,6 +14142,14 @@ def assemble_contents(self) -> list[genai_types.Content]: PromptDataDict = SchemaPromptSpecPromptMessageDict PromptDataOrDict = Union[PromptData, PromptDataDict] +ParsedResponseUnion = Union[ + prompt_optimizer_types.ParsedResponse, prompt_optimizer_types.ParsedResponseFewShot +] +ParsedResponseUnionDict = Union[ + prompt_optimizer_types.ParsedResponseDict, + prompt_optimizer_types.ParsedResponseFewShotDict, +] + class PromptDict(TypedDict, total=False): """Represents a prompt.""" diff --git a/vertexai/_genai/types/prompt_optimizer.py b/vertexai/_genai/types/prompt_optimizer.py new file mode 100644 index 0000000000..52c6c3058f --- /dev/null +++ b/vertexai/_genai/types/prompt_optimizer.py @@ -0,0 +1,107 @@ +# Copyright 2025 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Code generated by the Google Gen AI SDK generator DO NOT EDIT. + +from typing import Optional, Union +from google.genai import _common +from pydantic import Field +from typing_extensions import TypedDict + + +class ApplicableGuideline(_common.BaseModel): + """Applicable guideline for the optimize_prompt method.""" + + applicable_guideline: Optional[str] = Field(default=None, description="""""") + suggested_improvement: Optional[str] = Field(default=None, description="""""") + text_before_change: Optional[str] = Field(default=None, description="""""") + text_after_change: Optional[str] = Field(default=None, description="""""") + + +class ApplicableGuidelineDict(TypedDict, total=False): + """Applicable guideline for the optimize_prompt method.""" + + applicable_guideline: Optional[str] + """""" + + suggested_improvement: Optional[str] + """""" + + text_before_change: Optional[str] + """""" + + text_after_change: Optional[str] + """""" + + +ApplicableGuidelineOrDict = Union[ApplicableGuideline, ApplicableGuidelineDict] + + +class ParsedResponse(_common.BaseModel): + """Response for the optimize_prompt method.""" + + optimization_type: Optional[str] = Field(default=None, description="""""") + applicable_guidelines: Optional[list[ApplicableGuideline]] = Field( + default=None, description="""""" + ) + original_prompt: Optional[str] = Field(default=None, description="""""") + suggested_prompt: Optional[str] = Field(default=None, description="""""") + + +class ParsedResponseDict(TypedDict, total=False): + """Response for the optimize_prompt method.""" + + optimization_type: Optional[str] + """""" + + applicable_guidelines: Optional[list[ApplicableGuidelineDict]] + """""" + + original_prompt: Optional[str] + """""" + + suggested_prompt: Optional[str] + """""" + + +ParsedResponseOrDict = Union[ParsedResponse, ParsedResponseDict] + + +class ParsedResponseFewShot(_common.BaseModel): + """Response for the optimize_prompt method.""" + + suggested_modifications: Optional[list[ApplicableGuideline]] = Field( + default=None, description="""""" + ) + original_system_instructions: Optional[str] = Field( + default=None, description="""""" + ) + new_system_instructions: Optional[str] = Field(default=None, description="""""") + + +class ParsedResponseFewShotDict(TypedDict, total=False): + """Response for the optimize_prompt method.""" + + suggested_modifications: Optional[list[ApplicableGuidelineDict]] + """""" + + original_system_instructions: Optional[str] + """""" + + new_system_instructions: Optional[str] + """""" + + +ParsedResponseFewShotOrDict = Union[ParsedResponseFewShot, ParsedResponseFewShotDict]