mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
fix(router.py): fix order of dereferenced dictionaries
This commit is contained in:
parent
7079b951de
commit
5e0d99b2ef
3 changed files with 18 additions and 14 deletions
|
@ -142,7 +142,11 @@ def completion(
|
||||||
logging_obj.pre_call(
|
logging_obj.pre_call(
|
||||||
input=prompt,
|
input=prompt,
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
additional_args={"complete_input_dict": data, "api_base": api_base},
|
additional_args={
|
||||||
|
"complete_input_dict": data,
|
||||||
|
"api_base": api_base,
|
||||||
|
"headers": headers,
|
||||||
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
## COMPLETION CALL
|
## COMPLETION CALL
|
||||||
|
|
|
@ -394,11 +394,11 @@ class Router:
|
||||||
|
|
||||||
response = litellm.completion(
|
response = litellm.completion(
|
||||||
**{
|
**{
|
||||||
|
**kwargs,
|
||||||
**data,
|
**data,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"caching": self.cache_responses,
|
"caching": self.cache_responses,
|
||||||
"client": model_client,
|
"client": model_client,
|
||||||
**kwargs,
|
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
verbose_router_logger.info(
|
verbose_router_logger.info(
|
||||||
|
@ -479,7 +479,8 @@ class Router:
|
||||||
kwargs.setdefault("metadata", {}).update({"model_group": model})
|
kwargs.setdefault("metadata", {}).update({"model_group": model})
|
||||||
|
|
||||||
response = await self.async_function_with_fallbacks(
|
response = await self.async_function_with_fallbacks(
|
||||||
**kwargs, **completion_kwargs
|
**completion_kwargs,
|
||||||
|
**kwargs,
|
||||||
)
|
)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
@ -525,16 +526,15 @@ class Router:
|
||||||
else:
|
else:
|
||||||
model_client = potential_model_client
|
model_client = potential_model_client
|
||||||
self.total_calls[model_name] += 1
|
self.total_calls[model_name] += 1
|
||||||
response = await litellm.acompletion(
|
final_data = {
|
||||||
**{
|
**kwargs,
|
||||||
**data,
|
**data,
|
||||||
"messages": messages,
|
"messages": messages,
|
||||||
"caching": self.cache_responses,
|
"caching": self.cache_responses,
|
||||||
"client": model_client,
|
"client": model_client,
|
||||||
"timeout": self.timeout,
|
"timeout": self.timeout,
|
||||||
**kwargs,
|
}
|
||||||
}
|
response = await litellm.acompletion(**final_data)
|
||||||
)
|
|
||||||
self.success_calls[model_name] += 1
|
self.success_calls[model_name] += 1
|
||||||
verbose_router_logger.info(
|
verbose_router_logger.info(
|
||||||
f"litellm.acompletion(model={model_name})\033[32m 200 OK\033[0m"
|
f"litellm.acompletion(model={model_name})\033[32m 200 OK\033[0m"
|
||||||
|
|
|
@ -844,7 +844,7 @@ class Logging:
|
||||||
curl_command += additional_args.get("request_str", None)
|
curl_command += additional_args.get("request_str", None)
|
||||||
elif api_base == "":
|
elif api_base == "":
|
||||||
curl_command = self.model_call_details
|
curl_command = self.model_call_details
|
||||||
print_verbose(f"\033[92m{curl_command}\033[0m\n")
|
verbose_logger.info(f"\033[92m{curl_command}\033[0m\n")
|
||||||
if self.logger_fn and callable(self.logger_fn):
|
if self.logger_fn and callable(self.logger_fn):
|
||||||
try:
|
try:
|
||||||
self.logger_fn(
|
self.logger_fn(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue