forked from phoenix/litellm-mirror
fix(vertex_ai.py): correctly parse optional params and pass vertex ai project
This commit is contained in:
parent
4e395bf244
commit
cdb960eb34
5 changed files with 78 additions and 25 deletions
|
@ -1047,6 +1047,7 @@ def embedding(
|
||||||
vertex_project=None,
|
vertex_project=None,
|
||||||
vertex_location=None,
|
vertex_location=None,
|
||||||
aembedding=False,
|
aembedding=False,
|
||||||
|
print_verbose=None,
|
||||||
):
|
):
|
||||||
# logic for parsing in - calling - parsing out model embedding calls
|
# logic for parsing in - calling - parsing out model embedding calls
|
||||||
try:
|
try:
|
||||||
|
@ -1062,7 +1063,13 @@ def embedding(
|
||||||
|
|
||||||
## Load credentials with the correct quota project ref: https://github.com/googleapis/python-aiplatform/issues/2557#issuecomment-1709284744
|
## Load credentials with the correct quota project ref: https://github.com/googleapis/python-aiplatform/issues/2557#issuecomment-1709284744
|
||||||
try:
|
try:
|
||||||
|
print_verbose(
|
||||||
|
f"VERTEX AI: vertex_project={vertex_project}; vertex_location={vertex_location}"
|
||||||
|
)
|
||||||
creds, _ = google.auth.default(quota_project_id=vertex_project)
|
creds, _ = google.auth.default(quota_project_id=vertex_project)
|
||||||
|
print_verbose(
|
||||||
|
f"VERTEX AI: creds={creds}; google application credentials: {os.getenv('GOOGLE_APPLICATION_CREDENTIALS')}"
|
||||||
|
)
|
||||||
vertexai.init(
|
vertexai.init(
|
||||||
project=vertex_project, location=vertex_location, credentials=creds
|
project=vertex_project, location=vertex_location, credentials=creds
|
||||||
)
|
)
|
||||||
|
|
|
@ -2617,6 +2617,7 @@ def embedding(
|
||||||
vertex_project=vertex_ai_project,
|
vertex_project=vertex_ai_project,
|
||||||
vertex_location=vertex_ai_location,
|
vertex_location=vertex_ai_location,
|
||||||
aembedding=aembedding,
|
aembedding=aembedding,
|
||||||
|
print_verbose=print_verbose,
|
||||||
)
|
)
|
||||||
elif custom_llm_provider == "oobabooga":
|
elif custom_llm_provider == "oobabooga":
|
||||||
response = oobabooga.embedding(
|
response = oobabooga.embedding(
|
||||||
|
|
|
@ -451,6 +451,34 @@ async def test_gemini_pro_async_function_calling():
|
||||||
|
|
||||||
# asyncio.run(gemini_pro_async_function_calling())
|
# asyncio.run(gemini_pro_async_function_calling())
|
||||||
|
|
||||||
|
|
||||||
|
def test_vertexai_embedding():
|
||||||
|
try:
|
||||||
|
load_vertex_ai_credentials()
|
||||||
|
# litellm.set_verbose=True
|
||||||
|
response = embedding(
|
||||||
|
model="textembedding-gecko@001",
|
||||||
|
input=["good morning from litellm", "this is another item"],
|
||||||
|
)
|
||||||
|
print(f"response:", response)
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.asyncio
|
||||||
|
async def test_vertexai_aembedding():
|
||||||
|
try:
|
||||||
|
load_vertex_ai_credentials()
|
||||||
|
# litellm.set_verbose=True
|
||||||
|
response = await litellm.aembedding(
|
||||||
|
model="textembedding-gecko@001",
|
||||||
|
input=["good morning from litellm", "this is another item"],
|
||||||
|
)
|
||||||
|
print(f"response: {response}")
|
||||||
|
except Exception as e:
|
||||||
|
pytest.fail(f"Error occurred: {e}")
|
||||||
|
|
||||||
|
|
||||||
# Extra gemini Vision tests for completion + stream, async, async + stream
|
# Extra gemini Vision tests for completion + stream, async, async + stream
|
||||||
# if we run into issues with gemini, we will also add these to our ci/cd pipeline
|
# if we run into issues with gemini, we will also add these to our ci/cd pipeline
|
||||||
# def test_gemini_pro_vision_stream():
|
# def test_gemini_pro_vision_stream():
|
||||||
|
|
|
@ -231,31 +231,6 @@ def test_cohere_embedding3():
|
||||||
# test_cohere_embedding3()
|
# test_cohere_embedding3()
|
||||||
|
|
||||||
|
|
||||||
def test_vertexai_embedding():
|
|
||||||
try:
|
|
||||||
# litellm.set_verbose=True
|
|
||||||
response = embedding(
|
|
||||||
model="textembedding-gecko@001",
|
|
||||||
input=["good morning from litellm", "this is another item"],
|
|
||||||
)
|
|
||||||
print(f"response:", response)
|
|
||||||
except Exception as e:
|
|
||||||
pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio
|
|
||||||
async def test_vertexai_aembedding():
|
|
||||||
try:
|
|
||||||
# litellm.set_verbose=True
|
|
||||||
response = await litellm.aembedding(
|
|
||||||
model="textembedding-gecko@001",
|
|
||||||
input=["good morning from litellm", "this is another item"],
|
|
||||||
)
|
|
||||||
print(f"response: {response}")
|
|
||||||
except Exception as e:
|
|
||||||
pytest.fail(f"Error occurred: {e}")
|
|
||||||
|
|
||||||
|
|
||||||
def test_bedrock_embedding_titan():
|
def test_bedrock_embedding_titan():
|
||||||
try:
|
try:
|
||||||
# this tests if we support str input for bedrock embedding
|
# this tests if we support str input for bedrock embedding
|
||||||
|
|
42
litellm/tests/test_get_optional_params_embeddings.py
Normal file
42
litellm/tests/test_get_optional_params_embeddings.py
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
# What is this?
|
||||||
|
## This tests the `get_optional_params_embeddings` function
|
||||||
|
import sys, os
|
||||||
|
import traceback
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
import os, io
|
||||||
|
|
||||||
|
sys.path.insert(
|
||||||
|
0, os.path.abspath("../..")
|
||||||
|
) # Adds the parent directory to the system path
|
||||||
|
import pytest
|
||||||
|
import litellm
|
||||||
|
from litellm import embedding
|
||||||
|
from litellm.utils import get_optional_params_embeddings, get_llm_provider
|
||||||
|
|
||||||
|
|
||||||
|
def test_vertex_projects():
|
||||||
|
litellm.drop_params = True
|
||||||
|
model, custom_llm_provider, _, _ = get_llm_provider(
|
||||||
|
model="vertex_ai/textembedding-gecko"
|
||||||
|
)
|
||||||
|
optional_params = get_optional_params_embeddings(
|
||||||
|
model=model,
|
||||||
|
user="test-litellm-user-5",
|
||||||
|
dimensions=None,
|
||||||
|
encoding_format="base64",
|
||||||
|
custom_llm_provider=custom_llm_provider,
|
||||||
|
**{
|
||||||
|
"vertex_ai_project": "my-test-project",
|
||||||
|
"vertex_ai_location": "us-east-1",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
print(f"received optional_params: {optional_params}")
|
||||||
|
|
||||||
|
assert "vertex_ai_project" in optional_params
|
||||||
|
assert "vertex_ai_location" in optional_params
|
||||||
|
|
||||||
|
|
||||||
|
# test_vertex_projects()
|
Loading…
Add table
Add a link
Reference in a new issue