forked from phoenix/litellm-mirror
Litellm Minor Fixes & Improvements (10/12/2024) (#6179)
* build(model_prices_and_context_window.json): add bedrock llama3.2 pricing
* build(model_prices_and_context_window.json): add bedrock cross region inference pricing
* Revert "(perf) move s3 logging to Batch logging + async [94% faster perf under 100 RPS on 1 litellm instance] (#6165)"
This reverts commit 2a5624af47
.
* add azure/gpt-4o-2024-05-13 (#6174)
* LiteLLM Minor Fixes & Improvements (10/10/2024) (#6158)
* refactor(vertex_ai_partner_models/anthropic): refactor anthropic to use partner model logic
* fix(vertex_ai/): support passing custom api base to partner models
Fixes https://github.com/BerriAI/litellm/issues/4317
* fix(proxy_server.py): Fix prometheus premium user check logic
* docs(prometheus.md): update quick start docs
* fix(custom_llm.py): support passing dynamic api key + api base
* fix(realtime_api/main.py): Add request/response logging for realtime api endpoints
Closes https://github.com/BerriAI/litellm/issues/6081
* feat(openai/realtime): add openai realtime api logging
Closes https://github.com/BerriAI/litellm/issues/6081
* fix(realtime_streaming.py): fix linting errors
* fix(realtime_streaming.py): fix linting errors
* fix: fix linting errors
* fix pattern match router
* Add literalai in the sidebar observability category (#6163)
* fix: add literalai in the sidebar
* fix: typo
* update (#6160)
* Feat: Add Langtrace integration (#5341)
* Feat: Add Langtrace integration
* add langtrace service name
* fix timestamps for traces
* add tests
* Discard Callback + use existing otel logger
* cleanup
* remove print statments
* remove callback
* add docs
* docs
* add logging docs
* format logging
* remove emoji and add litellm proxy example
* format logging
* format `logging.md`
* add langtrace docs to logging.md
* sync conflict
* docs fix
* (perf) move s3 logging to Batch logging + async [94% faster perf under 100 RPS on 1 litellm instance] (#6165)
* fix move s3 to use customLogger
* add basic s3 logging test
* add s3 to custom logger compatible
* use batch logger for s3
* s3 set flush interval and batch size
* fix s3 logging
* add notes on s3 logging
* fix s3 logging
* add basic s3 logging test
* fix s3 type errors
* add test for sync logging on s3
* fix: fix to debug log
---------
Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Willy Douhard <willy.douhard@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>
Co-authored-by: Ali Waleed <ali@scale3labs.com>
* docs(custom_llm_server.md): update doc on passing custom params
* fix(pass_through_endpoints.py): don't require headers
Fixes https://github.com/BerriAI/litellm/issues/6128
* feat(utils.py): add support for caching rerank endpoints
Closes https://github.com/BerriAI/litellm/issues/6144
* feat(litellm_logging.py'): add response headers for failed requests
Closes https://github.com/BerriAI/litellm/issues/6159
---------
Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com>
Co-authored-by: Willy Douhard <willy.douhard@gmail.com>
Co-authored-by: yujonglee <yujonglee.dev@gmail.com>
Co-authored-by: Ali Waleed <ali@scale3labs.com>
This commit is contained in:
parent
2cb65b450d
commit
2acb0c0675
18 changed files with 533 additions and 82 deletions
|
@ -5,6 +5,7 @@ import traceback
|
|||
import uuid
|
||||
|
||||
from dotenv import load_dotenv
|
||||
from test_rerank import assert_response_shape
|
||||
|
||||
load_dotenv()
|
||||
import os
|
||||
|
@ -2234,3 +2235,56 @@ def test_logging_turn_off_message_logging_streaming():
|
|||
mock_client.assert_called_once()
|
||||
|
||||
assert mock_client.call_args.args[0].choices[0].message.content == "hello"
|
||||
|
||||
|
||||
@pytest.mark.asyncio()
|
||||
@pytest.mark.parametrize("sync_mode", [True, False])
|
||||
@pytest.mark.parametrize(
|
||||
"top_n_1, top_n_2, expect_cache_hit",
|
||||
[
|
||||
(3, 3, True),
|
||||
(3, None, False),
|
||||
],
|
||||
)
|
||||
async def test_basic_rerank_caching(sync_mode, top_n_1, top_n_2, expect_cache_hit):
|
||||
litellm.set_verbose = True
|
||||
litellm.cache = Cache(type="local")
|
||||
|
||||
if sync_mode is True:
|
||||
for idx in range(2):
|
||||
if idx == 0:
|
||||
top_n = top_n_1
|
||||
else:
|
||||
top_n = top_n_2
|
||||
response = litellm.rerank(
|
||||
model="cohere/rerank-english-v3.0",
|
||||
query="hello",
|
||||
documents=["hello", "world"],
|
||||
top_n=top_n,
|
||||
)
|
||||
else:
|
||||
for idx in range(2):
|
||||
if idx == 0:
|
||||
top_n = top_n_1
|
||||
else:
|
||||
top_n = top_n_2
|
||||
response = await litellm.arerank(
|
||||
model="cohere/rerank-english-v3.0",
|
||||
query="hello",
|
||||
documents=["hello", "world"],
|
||||
top_n=top_n,
|
||||
)
|
||||
|
||||
await asyncio.sleep(1)
|
||||
|
||||
if expect_cache_hit is True:
|
||||
assert "cache_key" in response._hidden_params
|
||||
else:
|
||||
assert "cache_key" not in response._hidden_params
|
||||
|
||||
print("re rank response: ", response)
|
||||
|
||||
assert response.id is not None
|
||||
assert response.results is not None
|
||||
|
||||
assert_response_shape(response, custom_llm_provider="cohere")
|
||||
|
|
|
@ -1385,9 +1385,9 @@ def test_logging_standard_payload_failure_call():
|
|||
resp = litellm.completion(
|
||||
model="gpt-3.5-turbo",
|
||||
messages=[{"role": "user", "content": "Hey, how's it going?"}],
|
||||
mock_response="litellm.RateLimitError",
|
||||
api_key="my-bad-api-key",
|
||||
)
|
||||
except litellm.RateLimitError:
|
||||
except litellm.AuthenticationError:
|
||||
pass
|
||||
|
||||
mock_client.assert_called_once()
|
||||
|
@ -1401,6 +1401,7 @@ def test_logging_standard_payload_failure_call():
|
|||
standard_logging_object: StandardLoggingPayload = mock_client.call_args.kwargs[
|
||||
"kwargs"
|
||||
]["standard_logging_object"]
|
||||
assert "additional_headers" in standard_logging_object["hidden_params"]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("stream", [True, False])
|
||||
|
|
|
@ -368,7 +368,7 @@ async def test_simple_image_generation_async():
|
|||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_image_generation_async_with_api_key_and_api_base():
|
||||
async def test_image_generation_async_additional_params():
|
||||
my_custom_llm = MyCustomLLM()
|
||||
litellm.custom_provider_map = [
|
||||
{"provider": "custom_llm", "custom_handler": my_custom_llm}
|
||||
|
@ -383,6 +383,7 @@ async def test_image_generation_async_with_api_key_and_api_base():
|
|||
prompt="Hello world",
|
||||
api_key="my-api-key",
|
||||
api_base="my-api-base",
|
||||
my_custom_param="my-custom-param",
|
||||
)
|
||||
|
||||
print(resp)
|
||||
|
@ -393,3 +394,6 @@ async def test_image_generation_async_with_api_key_and_api_base():
|
|||
|
||||
mock_client.call_args.kwargs["api_key"] == "my-api-key"
|
||||
mock_client.call_args.kwargs["api_base"] == "my-api-base"
|
||||
mock_client.call_args.kwargs["optional_params"] == {
|
||||
"my_custom_param": "my-custom-param"
|
||||
}
|
||||
|
|
|
@ -39,6 +39,36 @@ def client():
|
|||
return TestClient(app)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pass_through_endpoint_no_headers(client, monkeypatch):
|
||||
# Mock the httpx.AsyncClient.request method
|
||||
monkeypatch.setattr("httpx.AsyncClient.request", mock_request)
|
||||
import litellm
|
||||
|
||||
# Define a pass-through endpoint
|
||||
pass_through_endpoints = [
|
||||
{
|
||||
"path": "/test-endpoint",
|
||||
"target": "https://api.example.com/v1/chat/completions",
|
||||
}
|
||||
]
|
||||
|
||||
# Initialize the pass-through endpoint
|
||||
await initialize_pass_through_endpoints(pass_through_endpoints)
|
||||
general_settings: dict = (
|
||||
getattr(litellm.proxy.proxy_server, "general_settings", {}) or {}
|
||||
)
|
||||
general_settings.update({"pass_through_endpoints": pass_through_endpoints})
|
||||
setattr(litellm.proxy.proxy_server, "general_settings", general_settings)
|
||||
|
||||
# Make a request to the pass-through endpoint
|
||||
response = client.post("/test-endpoint", json={"prompt": "Hello, world!"})
|
||||
|
||||
# Assert the response
|
||||
assert response.status_code == 200
|
||||
assert response.json() == {"message": "Mocked response"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_pass_through_endpoint(client, monkeypatch):
|
||||
# Mock the httpx.AsyncClient.request method
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue