mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
LiteLLM Minor Fixes & Improvements (10/10/2024) (#6158)
* refactor(vertex_ai_partner_models/anthropic): refactor anthropic to use partner model logic * fix(vertex_ai/): support passing custom api base to partner models Fixes https://github.com/BerriAI/litellm/issues/4317 * fix(proxy_server.py): Fix prometheus premium user check logic * docs(prometheus.md): update quick start docs * fix(custom_llm.py): support passing dynamic api key + api base * fix(realtime_api/main.py): Add request/response logging for realtime api endpoints Closes https://github.com/BerriAI/litellm/issues/6081 * feat(openai/realtime): add openai realtime api logging Closes https://github.com/BerriAI/litellm/issues/6081 * fix(realtime_streaming.py): fix linting errors * fix(realtime_streaming.py): fix linting errors * fix: fix linting errors * fix pattern match router * Add literalai in the sidebar observability category (#6163) * fix: add literalai in the sidebar * fix: typo * update (#6160) * Feat: Add Langtrace integration (#5341) * Feat: Add Langtrace integration * add langtrace service name * fix timestamps for traces * add tests * Discard Callback + use existing otel logger * cleanup * remove print statments * remove callback * add docs * docs * add logging docs * format logging * remove emoji and add litellm proxy example * format logging * format `logging.md` * add langtrace docs to logging.md * sync conflict * docs fix * (perf) move s3 logging to Batch logging + async [94% faster perf under 100 RPS on 1 litellm instance] (#6165) * fix move s3 to use customLogger * add basic s3 logging test * add s3 to custom logger compatible * use batch logger for s3 * s3 set flush interval and batch size * fix s3 logging * add notes on s3 logging * fix s3 logging * add basic s3 logging test * fix s3 type errors * add test for sync logging on s3 * fix: fix to debug log --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Willy Douhard <willy.douhard@gmail.com> Co-authored-by: yujonglee <yujonglee.dev@gmail.com> Co-authored-by: Ali Waleed <ali@scale3labs.com>
This commit is contained in:
parent
9db4ccca9f
commit
11f9df923a
28 changed files with 966 additions and 760 deletions
|
@ -197,7 +197,6 @@ lagoLogger = None
|
|||
dataDogLogger = None
|
||||
prometheusLogger = None
|
||||
dynamoLogger = None
|
||||
s3Logger = None
|
||||
genericAPILogger = None
|
||||
clickHouseLogger = None
|
||||
greenscaleLogger = None
|
||||
|
@ -1410,6 +1409,8 @@ def client(original_function):
|
|||
)
|
||||
else:
|
||||
return result
|
||||
elif call_type == CallTypes.arealtime.value:
|
||||
return result
|
||||
|
||||
# ADD HIDDEN PARAMS - additional call metadata
|
||||
if hasattr(result, "_hidden_params"):
|
||||
|
@ -1799,8 +1800,9 @@ def calculate_tiles_needed(
|
|||
total_tiles = tiles_across * tiles_down
|
||||
return total_tiles
|
||||
|
||||
|
||||
def get_image_type(image_data: bytes) -> Union[str, None]:
|
||||
""" take an image (really only the first ~100 bytes max are needed)
|
||||
"""take an image (really only the first ~100 bytes max are needed)
|
||||
and return 'png' 'gif' 'jpeg' 'heic' or None. method added to
|
||||
allow deprecation of imghdr in 3.13"""
|
||||
|
||||
|
@ -4336,16 +4338,18 @@ def get_api_base(
|
|||
_optional_params.vertex_location is not None
|
||||
and _optional_params.vertex_project is not None
|
||||
):
|
||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_ai_anthropic import (
|
||||
create_vertex_anthropic_url,
|
||||
from litellm.llms.vertex_ai_and_google_ai_studio.vertex_ai_partner_models.main import (
|
||||
VertexPartnerProvider,
|
||||
create_vertex_url,
|
||||
)
|
||||
|
||||
if "claude" in model:
|
||||
_api_base = create_vertex_anthropic_url(
|
||||
_api_base = create_vertex_url(
|
||||
vertex_location=_optional_params.vertex_location,
|
||||
vertex_project=_optional_params.vertex_project,
|
||||
model=model,
|
||||
stream=stream,
|
||||
partner=VertexPartnerProvider.claude,
|
||||
)
|
||||
else:
|
||||
|
||||
|
@ -4442,19 +4446,7 @@ def get_supported_openai_params(
|
|||
elif custom_llm_provider == "volcengine":
|
||||
return litellm.VolcEngineConfig().get_supported_openai_params(model=model)
|
||||
elif custom_llm_provider == "groq":
|
||||
return [
|
||||
"temperature",
|
||||
"max_tokens",
|
||||
"top_p",
|
||||
"stream",
|
||||
"stop",
|
||||
"tools",
|
||||
"tool_choice",
|
||||
"response_format",
|
||||
"seed",
|
||||
"extra_headers",
|
||||
"extra_body",
|
||||
]
|
||||
return litellm.GroqChatConfig().get_supported_openai_params(model=model)
|
||||
elif custom_llm_provider == "hosted_vllm":
|
||||
return litellm.HostedVLLMChatConfig().get_supported_openai_params(model=model)
|
||||
elif custom_llm_provider == "deepseek":
|
||||
|
@ -4599,6 +4591,8 @@ def get_supported_openai_params(
|
|||
return (
|
||||
litellm.MistralTextCompletionConfig().get_supported_openai_params()
|
||||
)
|
||||
if model.startswith("claude"):
|
||||
return litellm.VertexAIAnthropicConfig().get_supported_openai_params()
|
||||
return litellm.VertexAIConfig().get_supported_openai_params()
|
||||
elif request_type == "embeddings":
|
||||
return litellm.VertexAITextEmbeddingConfig().get_supported_openai_params()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue