fix(main.py): allow vertex ai project and location to be set in completion() call

This commit is contained in:
Krrish Dholakia 2024-01-25 15:00:51 -08:00
parent 5e7c43ebf7
commit 014f83c847
3 changed files with 16 additions and 4 deletions

View file

@ -1417,9 +1417,15 @@ def completion(
return response return response
response = model_response response = model_response
elif custom_llm_provider == "vertex_ai": elif custom_llm_provider == "vertex_ai":
vertex_ai_project = litellm.vertex_project or get_secret("VERTEXAI_PROJECT") vertex_ai_project = (
vertex_ai_location = litellm.vertex_location or get_secret( optional_params.pop("vertex_ai_project", None)
"VERTEXAI_LOCATION" or litellm.vertex_project
or get_secret("VERTEXAI_PROJECT")
)
vertex_ai_location = (
optional_params.pop("vertex_ai_location", None)
or litellm.vertex_location
or get_secret("VERTEXAI_LOCATION")
) )
model_response = vertex_ai.completion( model_response = vertex_ai.completion(

View file

@ -95,7 +95,8 @@ def test_vertex_ai():
+ litellm.vertex_code_text_models + litellm.vertex_code_text_models
) )
litellm.set_verbose = False litellm.set_verbose = False
litellm.vertex_project = "reliablekeys" vertex_ai_project = "reliablekeys"
# litellm.vertex_project = "reliablekeys"
test_models = random.sample(test_models, 1) test_models = random.sample(test_models, 1)
# test_models += litellm.vertex_language_models # always test gemini-pro # test_models += litellm.vertex_language_models # always test gemini-pro
@ -117,6 +118,7 @@ def test_vertex_ai():
model=model, model=model,
messages=[{"role": "user", "content": "hi"}], messages=[{"role": "user", "content": "hi"}],
temperature=0.7, temperature=0.7,
vertex_ai_project=vertex_ai_project,
) )
print("\nModel Response", response) print("\nModel Response", response)
print(response) print(response)

View file

@ -3351,6 +3351,10 @@ def get_optional_params(
custom_llm_provider != "bedrock" and custom_llm_provider != "sagemaker" custom_llm_provider != "bedrock" and custom_llm_provider != "sagemaker"
): # allow dynamically setting boto3 init logic ): # allow dynamically setting boto3 init logic
continue continue
elif (
k.startswith("vertex_") and custom_llm_provider != "vertex_ai"
): # allow dynamically setting vertex ai init logic
continue
passed_params[k] = v passed_params[k] = v
default_params = { default_params = {
"functions": None, "functions": None,