Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions tests/unit/architecture/test_vertexai_import.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,15 @@ def test_vertexai_import():
assert "pandas" not in modules_after_genai_client_import
assert "pydantic" in modules_after_genai_client_import

# The types module should not import _evals_metric_loaders until
# PrebuiltMetric or RubricMetric are accessed.
from vertexai._genai import types # noqa: F401

assert (
"google.cloud.aiplatform.vertexai._genai._evals_metric_loaders"
not in sys.modules
)

# Tests the evals module is lazy loaded.
from vertexai._genai import evals as _ # noqa: F401,F811

Expand Down
16 changes: 14 additions & 2 deletions vertexai/_genai/types/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,9 @@
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.
# flake8: noqa: F401

import importlib
import typing

from . import agent_engines
from . import evals
from .common import _AppendAgentEngineSessionEventRequestParameters
Expand Down Expand Up @@ -756,7 +759,6 @@
from .common import RubricGroup
from .common import RubricGroupDict
from .common import RubricGroupOrDict
from .common import RubricMetric
from .common import RubricOrDict
from .common import RubricVerdict
from .common import RubricVerdictDict
Expand Down Expand Up @@ -1813,7 +1815,6 @@
"PromptDataDict",
"PromptDataOrDict",
"LLMMetric",
"RubricMetric",
"MetricPromptBuilder",
"_CreateEvaluationItemParameters",
"_CreateEvaluationRunParameters",
Expand Down Expand Up @@ -1878,4 +1879,15 @@
"_UpdateDatasetParameters",
"evals",
"agent_engines",
"PrebuiltMetric",
"RubricMetric",
]


def __getattr__(name: str) -> typing.Any:
if name == "PrebuiltMetric" or name == "RubricMetric":
module = importlib.import_module(".._evals_metric_loaders", __package__)
prebuilt_metric_obj = getattr(module, name)
globals()[name] = prebuilt_metric_obj
return prebuilt_metric_obj
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
12 changes: 0 additions & 12 deletions vertexai/_genai/types/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
# Code generated by the Google Gen AI SDK generator DO NOT EDIT.

import datetime
import importlib
import json
import logging
import os
Expand Down Expand Up @@ -47,17 +46,6 @@
from typing_extensions import TypedDict
from . import evals as evals_types

__all__ = ["PrebuiltMetric", "RubricMetric"] # noqa: F822


def __getattr__(name: str) -> typing.Any:
if name == "PrebuiltMetric" or name == "RubricMetric":
module = importlib.import_module(".._evals_metric_loaders", __package__)
prebuilt_metric_obj = getattr(module, name)
globals()[name] = prebuilt_metric_obj
return prebuilt_metric_obj
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")


def _camel_to_snake(camel_case_string: str) -> str:
snake_case_string = re.sub(r"(?<!^)([A-Z])", r"_\1", camel_case_string)
Expand Down
Loading