LiteLLM Minor Fixes & Improvements (10/09/2024) (#6139)

* fix(utils.py): don't return 'none' response headers

Fixes https://github.com/BerriAI/litellm/issues/6123

* fix(vertex_and_google_ai_studio_gemini.py): support parsing out additional properties and strict value for tool calls

Fixes https://github.com/BerriAI/litellm/issues/6136

* fix(cost_calculator.py): set default character value to none

Fixes https://github.com/BerriAI/litellm/issues/6133#issuecomment-2403290196

* fix(google.py): fix cost per token / cost per char conversion

Fixes https://github.com/BerriAI/litellm/issues/6133#issuecomment-2403370287

* build(model_prices_and_context_window.json): update gemini pricing

Fixes https://github.com/BerriAI/litellm/issues/6133

* build(model_prices_and_context_window.json): update gemini pricing

* fix(litellm_logging.py): fix streaming caching logging when 'turn_off_message_logging' enabled

Stores unredacted response in cache

* build(model_prices_and_context_window.json): update gemini-1.5-flash pricing

* fix(cost_calculator.py): fix default prompt_character count logic

Fixes error in gemini cost calculation

* fix(cost_calculator.py): fix cost calc for tts models
This commit is contained in:
Krish Dholakia 2024-10-10 00:42:11 -07:00 committed by GitHub
parent 60baa65e0e
commit 6005450c8f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 788 additions and 534 deletions

View file

@ -1711,31 +1711,6 @@ def test_completion_perplexity_api():
# test_completion_perplexity_api()
@pytest.mark.skip(
reason="too many requests. Hitting gemini rate limits. Convert to mock test."
)
def test_completion_pydantic_obj_2():
from pydantic import BaseModel
litellm.set_verbose = True
class CalendarEvent(BaseModel):
name: str
date: str
participants: list[str]
class EventsList(BaseModel):
events: list[CalendarEvent]
messages = [
{"role": "user", "content": "List important events from the 20th century."}
]
response = litellm.completion(
model="gemini/gemini-1.5-pro", messages=messages, response_format=EventsList
)
@pytest.mark.skip(reason="this test is flaky")
def test_completion_perplexity_api_2():
try:
@ -4573,12 +4548,7 @@ async def test_completion_ai21_chat():
@pytest.mark.parametrize(
"model",
[
"gpt-4o",
"azure/chatgpt-v-2",
"claude-3-sonnet-20240229",
"fireworks_ai/mixtral-8x7b-instruct",
],
["gpt-4o", "azure/chatgpt-v-2", "claude-3-sonnet-20240229"],
)
@pytest.mark.parametrize(
"stream",
@ -4594,5 +4564,7 @@ def test_completion_response_ratelimit_headers(model, stream):
additional_headers = hidden_params.get("additional_headers", {})
print(additional_headers)
for k, v in additional_headers.items():
assert v != "None" and v is not None
assert "x-ratelimit-remaining-requests" in additional_headers
assert "x-ratelimit-remaining-tokens" in additional_headers