Skip to content

Commit ac5a5e4

Browse files
quad2524copybara-github
authored andcommitted
Copybara import of the project:
-- 24dff9c by Alan <argnarf@gmail.com>: fix: update litellm to >=1.83.0 to resolve security vulnerability -- 8cda891 by Alan <argnarf@gmail.com>: fix linting errors -- fb1efd0 by Alan <argnarf@gmail.com>: Removed version pinning from PR COPYBARA_INTEGRATE_REVIEW=#6599 from quad2524:issue-6598-litellm-version 18c9d68 PiperOrigin-RevId: 903440345
1 parent 31943db commit ac5a5e4

2 files changed

Lines changed: 20 additions & 11 deletions

File tree

tests/unit/vertexai/genai/test_evals.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3624,8 +3624,7 @@ def test_run_inference_with_litellm_string_prompt_format(
36243624
) as mock_litellm, mock.patch(
36253625
"vertexai._genai._evals_common._call_litellm_completion"
36263626
) as mock_call_litellm_completion:
3627-
# fmt: on
3628-
mock_litellm.utils.get_valid_models.return_value = ["gpt-4o"]
3627+
mock_litellm.get_llm_provider.return_value = ("gpt-4o", "openai", None , None)
36293628
prompt_df = pd.DataFrame([{"prompt": "What is LiteLLM?"}])
36303629
expected_messages = [{"role": "user", "content": "What is LiteLLM?"}]
36313630

@@ -3676,17 +3675,18 @@ def test_run_inference_with_litellm_openai_request_format(
36763675
mock_api_client_fixture,
36773676
):
36783677
"""Tests inference with LiteLLM where the row contains a chat completion request body."""
3679-
# fmt: off
36803678
with (
3681-
mock.patch(
3682-
"vertexai._genai._evals_common.litellm"
3683-
) as mock_litellm,
3679+
mock.patch("vertexai._genai._evals_common.litellm") as mock_litellm,
36843680
mock.patch(
36853681
"vertexai._genai._evals_common._call_litellm_completion"
36863682
) as mock_call_litellm_completion,
36873683
):
3688-
# fmt: on
3689-
mock_litellm.utils.get_valid_models.return_value = ["gpt-4o"]
3684+
mock_litellm.get_llm_provider.return_value = (
3685+
"gpt-4o",
3686+
"openai",
3687+
None,
3688+
None,
3689+
)
36903690
prompt_df = pd.DataFrame(
36913691
[
36923692
{
@@ -3755,7 +3755,9 @@ def test_run_inference_with_unsupported_model_string(
37553755
with mock.patch(
37563756
"vertexai._genai._evals_common.litellm"
37573757
) as mock_litellm_package:
3758-
mock_litellm_package.utils.get_valid_models.return_value = []
3758+
mock_litellm_package.get_llm_provider.side_effect = ValueError(
3759+
"unsupported model"
3760+
)
37593761
evals_module = evals.Evals(api_client_=mock_api_client_fixture)
37603762
prompt_df = pd.DataFrame([{"prompt": "test"}])
37613763

@@ -3822,7 +3824,7 @@ def test_run_inference_with_litellm_parsing(
38223824
# fmt: off
38233825
with mock.patch("vertexai._genai._evals_common.litellm") as mock_litellm:
38243826
# fmt: on
3825-
mock_litellm.utils.get_valid_models.return_value = ["gpt-4o"]
3827+
mock_litellm.get_llm_provider.return_value = ("gpt-4o", "openai", None , None)
38263828
inference_result = self.client.evals.run_inference(
38273829
model="gpt-4o",
38283830
src=mock_df,

vertexai/_genai/_evals_common.py

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -735,7 +735,14 @@ def _is_litellm_vertex_maas_model(model: str) -> bool:
735735

736736
def _is_litellm_model(model: str) -> bool:
737737
"""Checks if the model name corresponds to a valid LiteLLM model name."""
738-
return model in litellm.utils.get_valid_models(model)
738+
if litellm is None:
739+
return False
740+
741+
try:
742+
litellm.get_llm_provider(model)
743+
return True
744+
except ValueError:
745+
return False
739746

740747

741748
def _is_gemini_model(model: str) -> bool:

0 commit comments

Comments
 (0)