mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 03:04:13 +00:00
update request strings
This commit is contained in:
parent
8c39a631d3
commit
02c58a9760
1 changed files with 4 additions and 5 deletions
|
@ -556,7 +556,7 @@ def completion(
|
||||||
project=vertex_project, location=vertex_location, endpoint=model
|
project=vertex_project, location=vertex_location, endpoint=model
|
||||||
)
|
)
|
||||||
request_str += (
|
request_str += (
|
||||||
f"llm_model.predict(endpoint={endpoint_path}, instances={instances}, **{optional_params})\n"
|
f"llm_model.predict(endpoint={endpoint_path}, instances={instances})\n"
|
||||||
)
|
)
|
||||||
response = llm_model.predict(
|
response = llm_model.predict(
|
||||||
endpoint=endpoint_path,
|
endpoint=endpoint_path,
|
||||||
|
@ -588,7 +588,6 @@ def completion(
|
||||||
request_str += (
|
request_str += (
|
||||||
f"llm_model.predict(instances={instances})\n"
|
f"llm_model.predict(instances={instances})\n"
|
||||||
)
|
)
|
||||||
print("instances", instances)
|
|
||||||
response = llm_model.predict(
|
response = llm_model.predict(
|
||||||
instances=instances
|
instances=instances
|
||||||
).predictions
|
).predictions
|
||||||
|
@ -785,7 +784,7 @@ async def async_completion(
|
||||||
project=vertex_project, location=vertex_location, endpoint=model
|
project=vertex_project, location=vertex_location, endpoint=model
|
||||||
)
|
)
|
||||||
request_str += (
|
request_str += (
|
||||||
f"llm_model.predict(endpoint={endpoint_path}, instances={instances}, **{optional_params})\n"
|
f"llm_model.predict(endpoint={endpoint_path}, instances={instances})\n"
|
||||||
)
|
)
|
||||||
response_obj = await llm_model.predict(
|
response_obj = await llm_model.predict(
|
||||||
endpoint=endpoint_path,
|
endpoint=endpoint_path,
|
||||||
|
@ -801,7 +800,7 @@ async def async_completion(
|
||||||
|
|
||||||
elif mode == "private":
|
elif mode == "private":
|
||||||
request_str += (
|
request_str += (
|
||||||
f"llm_model.predict(instances={instances}, **{optional_params})\n"
|
f"llm_model.predict_async(instances={instances})\n"
|
||||||
)
|
)
|
||||||
response_obj = await llm_model.predict_async(
|
response_obj = await llm_model.predict_async(
|
||||||
instances=instances,
|
instances=instances,
|
||||||
|
@ -992,7 +991,7 @@ async def async_streaming(
|
||||||
elif mode == "private":
|
elif mode == "private":
|
||||||
stream = optional_params.pop("stream", None)
|
stream = optional_params.pop("stream", None)
|
||||||
_ = instances[0].pop("stream", None)
|
_ = instances[0].pop("stream", None)
|
||||||
request_str += f"llm_model.predict_async(instances={instances}, **{optional_params})\n"
|
request_str += f"llm_model.predict_async(instances={instances})\n"
|
||||||
response_obj = await llm_model.predict_async(
|
response_obj = await llm_model.predict_async(
|
||||||
instances=instances,
|
instances=instances,
|
||||||
)
|
)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue