mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 19:54:13 +00:00
fix optional params
This commit is contained in:
parent
d57f8dbf43
commit
30453bd751
2 changed files with 1170 additions and 8 deletions
1168
litellm/llms/.ipynb_checkpoints/vertex_ai-checkpoint.py
Normal file
1168
litellm/llms/.ipynb_checkpoints/vertex_ai-checkpoint.py
Normal file
File diff suppressed because it is too large
Load diff
|
@ -560,8 +560,7 @@ def completion(
|
||||||
)
|
)
|
||||||
response = llm_model.predict(
|
response = llm_model.predict(
|
||||||
endpoint=endpoint_path,
|
endpoint=endpoint_path,
|
||||||
instances=instances,
|
instances=instances
|
||||||
**optional_params,
|
|
||||||
).predictions
|
).predictions
|
||||||
|
|
||||||
completion_response = response[0]
|
completion_response = response[0]
|
||||||
|
@ -590,8 +589,7 @@ def completion(
|
||||||
f"llm_model.predict(instances={instances}, **{optional_params})\n"
|
f"llm_model.predict(instances={instances}, **{optional_params})\n"
|
||||||
)
|
)
|
||||||
response = llm_model.predict(
|
response = llm_model.predict(
|
||||||
instances=instances,
|
instances=instances
|
||||||
**optional_params,
|
|
||||||
).predictions
|
).predictions
|
||||||
|
|
||||||
completion_response = response[0]
|
completion_response = response[0]
|
||||||
|
@ -791,7 +789,6 @@ async def async_completion(
|
||||||
response_obj = await llm_model.predict(
|
response_obj = await llm_model.predict(
|
||||||
endpoint=endpoint_path,
|
endpoint=endpoint_path,
|
||||||
instances=instances,
|
instances=instances,
|
||||||
**optional_params,
|
|
||||||
)
|
)
|
||||||
response = response_obj.predictions
|
response = response_obj.predictions
|
||||||
completion_response = response[0]
|
completion_response = response[0]
|
||||||
|
@ -807,7 +804,6 @@ async def async_completion(
|
||||||
)
|
)
|
||||||
response_obj = await llm_model.predict_async(
|
response_obj = await llm_model.predict_async(
|
||||||
instances=instances,
|
instances=instances,
|
||||||
**optional_params,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
response = response_obj.predictions
|
response = response_obj.predictions
|
||||||
|
@ -980,7 +976,6 @@ async def async_streaming(
|
||||||
response_obj = await llm_model.predict(
|
response_obj = await llm_model.predict(
|
||||||
endpoint=endpoint_path,
|
endpoint=endpoint_path,
|
||||||
instances=instances,
|
instances=instances,
|
||||||
**optional_params,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
response = response_obj.predictions
|
response = response_obj.predictions
|
||||||
|
@ -999,7 +994,6 @@ async def async_streaming(
|
||||||
request_str += f"llm_model.predict_async(instances={instances}, **{optional_params})\n"
|
request_str += f"llm_model.predict_async(instances={instances}, **{optional_params})\n"
|
||||||
response_obj = await llm_model.predict_async(
|
response_obj = await llm_model.predict_async(
|
||||||
instances=instances,
|
instances=instances,
|
||||||
**optional_params,
|
|
||||||
)
|
)
|
||||||
response = response_obj.predictions
|
response = response_obj.predictions
|
||||||
completion_response = response[0]
|
completion_response = response[0]
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue