forked from phoenix/litellm-mirror
refactor(main.py): migrate vertex gemini calls to vertex_httpx
Completes migration to vertex_httpx
This commit is contained in:
parent
e835f7336a
commit
86596c53e9
6 changed files with 159 additions and 206 deletions
|
@ -2080,6 +2080,28 @@ def completion(
|
|||
headers=headers,
|
||||
custom_prompt_dict=custom_prompt_dict,
|
||||
)
|
||||
elif "gemini" in model:
|
||||
model_response = vertex_chat_completion.completion( # type: ignore
|
||||
model=model,
|
||||
messages=messages,
|
||||
model_response=model_response,
|
||||
print_verbose=print_verbose,
|
||||
optional_params=new_params,
|
||||
litellm_params=litellm_params,
|
||||
logger_fn=logger_fn,
|
||||
encoding=encoding,
|
||||
vertex_location=vertex_ai_location,
|
||||
vertex_project=vertex_ai_project,
|
||||
vertex_credentials=vertex_credentials,
|
||||
gemini_api_key=None,
|
||||
logging_obj=logging,
|
||||
acompletion=acompletion,
|
||||
timeout=timeout,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
client=client,
|
||||
api_base=api_base,
|
||||
extra_headers=extra_headers,
|
||||
)
|
||||
else:
|
||||
model_response = vertex_ai.completion(
|
||||
model=model,
|
||||
|
@ -2099,8 +2121,8 @@ def completion(
|
|||
|
||||
if (
|
||||
"stream" in optional_params
|
||||
and optional_params["stream"] == True
|
||||
and acompletion == False
|
||||
and optional_params["stream"] is True
|
||||
and acompletion is False
|
||||
):
|
||||
response = CustomStreamWrapper(
|
||||
model_response,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue