forked from phoenix/litellm-mirror
* refactor: move gemini translation logic inside the transformation.py file easier to isolate the gemini translation logic * fix(gemini-transformation): support multiple tool calls in message body Merges https://github.com/BerriAI/litellm/pull/6487/files * test(test_vertex.py): add remaining tests from https://github.com/BerriAI/litellm/pull/6487 * fix(gemini-transformation): return tool calls for multiple tool calls * fix: support passing logprobs param for vertex + gemini * feat(vertex_ai): add logprobs support for gemini calls * fix(anthropic/chat/transformation.py): fix disable parallel tool use flag * fix: fix linting error * fix(_logging.py): log stacktrace information in json logs Closes https://github.com/BerriAI/litellm/issues/6497 * fix(utils.py): fix mem leak for async stream + completion Uses a global executor pool instead of creating a new thread on each request Fixes https://github.com/BerriAI/litellm/issues/6404 * fix(factory.py): handle tool call + content in assistant message for bedrock * fix: fix import * fix(factory.py): maintain support for content as a str in assistant response * fix: fix import * test: cleanup test * fix(vertex_and_google_ai_studio/): return none for content if no str value * test: retry flaky tests * (UI) Fix viewing members, keys in a team + added testing (#6514) * fix listing teams on ui * LiteLLM Minor Fixes & Improvements (10/28/2024) (#6475) * fix(anthropic/chat/transformation.py): support anthropic disable_parallel_tool_use param Fixes https://github.com/BerriAI/litellm/issues/6456 * feat(anthropic/chat/transformation.py): support anthropic computer tool use Closes https://github.com/BerriAI/litellm/issues/6427 * fix(vertex_ai/common_utils.py): parse out '$schema' when calling vertex ai Fixes issue when trying to call vertex from vercel sdk * fix(main.py): add 'extra_headers' support for azure on all translation endpoints Fixes https://github.com/BerriAI/litellm/issues/6465 * fix: fix linting errors * fix(transformation.py): handle no beta headers for anthropic * test: cleanup test * fix: fix linting error * fix: fix linting errors * fix: fix linting errors * fix(transformation.py): handle dummy tool call * fix(main.py): fix linting error * fix(azure.py): pass required param * LiteLLM Minor Fixes & Improvements (10/24/2024) (#6441) * fix(azure.py): handle /openai/deployment in azure api base * fix(factory.py): fix faulty anthropic tool result translation check Fixes https://github.com/BerriAI/litellm/issues/6422 * fix(gpt_transformation.py): add support for parallel_tool_calls to azure Fixes https://github.com/BerriAI/litellm/issues/6440 * fix(factory.py): support anthropic prompt caching for tool results * fix(vertex_ai/common_utils): don't pop non-null required field Fixes https://github.com/BerriAI/litellm/issues/6426 * feat(vertex_ai.py): support code_execution tool call for vertex ai + gemini Closes https://github.com/BerriAI/litellm/issues/6434 * build(model_prices_and_context_window.json): Add 'supports_assistant_prefill' for bedrock claude-3-5-sonnet v2 models Closes https://github.com/BerriAI/litellm/issues/6437 * fix(types/utils.py): fix linting * test: update test to include required fields * test: fix test * test: handle flaky test * test: remove e2e test - hitting gemini rate limits * Litellm dev 10 26 2024 (#6472) * docs(exception_mapping.md): add missing exception types Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183 * fix(main.py): register custom model pricing with specific key Ensure custom model pricing is registered to the specific model+provider key combination * test: make testing more robust for custom pricing * fix(redis_cache.py): instrument otel logging for sync redis calls ensures complete coverage for all redis cache calls * (Testing) Add unit testing for DualCache - ensure in memory cache is used when expected (#6471) * test test_dual_cache_get_set * unit testing for dual cache * fix async_set_cache_sadd * test_dual_cache_local_only * redis otel tracing + async support for latency routing (#6452) * docs(exception_mapping.md): add missing exception types Fixes https://github.com/Aider-AI/aider/issues/2120#issuecomment-2438971183 * fix(main.py): register custom model pricing with specific key Ensure custom model pricing is registered to the specific model+provider key combination * test: make testing more robust for custom pricing * fix(redis_cache.py): instrument otel logging for sync redis calls ensures complete coverage for all redis cache calls * refactor: pass parent_otel_span for redis caching calls in router allows for more observability into what calls are causing latency issues * test: update tests with new params * refactor: ensure e2e otel tracing for router * refactor(router.py): add more otel tracing acrosss router catch all latency issues for router requests * fix: fix linting error * fix(router.py): fix linting error * fix: fix test * test: fix tests * fix(dual_cache.py): pass ttl to redis cache * fix: fix param * fix(dual_cache.py): set default value for parent_otel_span * fix(transformation.py): support 'response_format' for anthropic calls * fix(transformation.py): check for cache_control inside 'function' block * fix: fix linting error * fix: fix linting errors --------- Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> --------- Co-authored-by: Krish Dholakia <krrishdholakia@gmail.com> * ui new build * Add retry strat (#6520) Signed-off-by: dbczumar <corey.zumar@databricks.com> * (fix) slack alerting - don't spam the failed cost tracking alert for the same model (#6543) * fix use failing_model as cache key for failed_tracking_alert * fix use standard logging payload for getting response cost * fix kwargs.get("response_cost") * fix getting response cost * (feat) add XAI ChatCompletion Support (#6373) * init commit for XAI * add full logic for xai chat completion * test_completion_xai * docs xAI * add xai/grok-beta * test_xai_chat_config_get_openai_compatible_provider_info * test_xai_chat_config_map_openai_params * add xai streaming test --------- Signed-off-by: dbczumar <corey.zumar@databricks.com> Co-authored-by: Ishaan Jaff <ishaanjaffer0324@gmail.com> Co-authored-by: Corey Zumar <39497902+dbczumar@users.noreply.github.com>
243 lines
8.5 KiB
Python
243 lines
8.5 KiB
Python
# import io
|
|
# import os
|
|
# import sys
|
|
|
|
# sys.path.insert(0, os.path.abspath("../.."))
|
|
|
|
# import litellm
|
|
# from memory_profiler import profile
|
|
# from litellm.utils import (
|
|
# ModelResponseIterator,
|
|
# ModelResponseListIterator,
|
|
# CustomStreamWrapper,
|
|
# )
|
|
# from litellm.types.utils import ModelResponse, Choices, Message
|
|
# import time
|
|
# import pytest
|
|
|
|
|
|
# # @app.post("/debug")
|
|
# # async def debug(body: ExampleRequest) -> str:
|
|
# # return await main_logic(body.query)
|
|
# def model_response_list_factory():
|
|
# chunks = [
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [
|
|
# {
|
|
# "delta": {"content": "", "role": "assistant"},
|
|
# "finish_reason": None,
|
|
# "index": 0,
|
|
# }
|
|
# ],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [
|
|
# {"delta": {"content": "This"}, "finish_reason": None, "index": 0}
|
|
# ],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [
|
|
# {"delta": {"content": " is"}, "finish_reason": None, "index": 0}
|
|
# ],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [
|
|
# {"delta": {"content": " a"}, "finish_reason": None, "index": 0}
|
|
# ],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [
|
|
# {"delta": {"content": " dummy"}, "finish_reason": None, "index": 0}
|
|
# ],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [
|
|
# {
|
|
# "delta": {"content": " response"},
|
|
# "finish_reason": None,
|
|
# "index": 0,
|
|
# }
|
|
# ],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "",
|
|
# "choices": [
|
|
# {
|
|
# "finish_reason": None,
|
|
# "index": 0,
|
|
# "content_filter_offsets": {
|
|
# "check_offset": 35159,
|
|
# "start_offset": 35159,
|
|
# "end_offset": 36150,
|
|
# },
|
|
# "content_filter_results": {
|
|
# "hate": {"filtered": False, "severity": "safe"},
|
|
# "self_harm": {"filtered": False, "severity": "safe"},
|
|
# "sexual": {"filtered": False, "severity": "safe"},
|
|
# "violence": {"filtered": False, "severity": "safe"},
|
|
# },
|
|
# }
|
|
# ],
|
|
# "created": 0,
|
|
# "model": "",
|
|
# "object": "",
|
|
# },
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [{"delta": {"content": "."}, "finish_reason": None, "index": 0}],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "chatcmpl-9SQxdH5hODqkWyJopWlaVOOUnFwlj",
|
|
# "choices": [{"delta": {}, "finish_reason": "stop", "index": 0}],
|
|
# "created": 1716563849,
|
|
# "model": "gpt-4o-2024-05-13",
|
|
# "object": "chat.completion.chunk",
|
|
# "system_fingerprint": "fp_5f4bad809a",
|
|
# },
|
|
# {
|
|
# "id": "",
|
|
# "choices": [
|
|
# {
|
|
# "finish_reason": None,
|
|
# "index": 0,
|
|
# "content_filter_offsets": {
|
|
# "check_offset": 36150,
|
|
# "start_offset": 36060,
|
|
# "end_offset": 37029,
|
|
# },
|
|
# "content_filter_results": {
|
|
# "hate": {"filtered": False, "severity": "safe"},
|
|
# "self_harm": {"filtered": False, "severity": "safe"},
|
|
# "sexual": {"filtered": False, "severity": "safe"},
|
|
# "violence": {"filtered": False, "severity": "safe"},
|
|
# },
|
|
# }
|
|
# ],
|
|
# "created": 0,
|
|
# "model": "",
|
|
# "object": "",
|
|
# },
|
|
# ]
|
|
|
|
# chunk_list = []
|
|
# for chunk in chunks:
|
|
# new_chunk = litellm.ModelResponse(stream=True, id=chunk["id"])
|
|
# if "choices" in chunk and isinstance(chunk["choices"], list):
|
|
# new_choices = []
|
|
# for choice in chunk["choices"]:
|
|
# if isinstance(choice, litellm.utils.StreamingChoices):
|
|
# _new_choice = choice
|
|
# elif isinstance(choice, dict):
|
|
# _new_choice = litellm.utils.StreamingChoices(**choice)
|
|
# new_choices.append(_new_choice)
|
|
# new_chunk.choices = new_choices
|
|
# chunk_list.append(new_chunk)
|
|
|
|
# return ModelResponseListIterator(model_responses=chunk_list)
|
|
|
|
|
|
# async def mock_completion(*args, **kwargs):
|
|
# completion_stream = model_response_list_factory()
|
|
# return litellm.CustomStreamWrapper(
|
|
# completion_stream=completion_stream,
|
|
# model="gpt-4-0613",
|
|
# custom_llm_provider="cached_response",
|
|
# logging_obj=litellm.Logging(
|
|
# model="gpt-4-0613",
|
|
# messages=[{"role": "user", "content": "Hey"}],
|
|
# stream=True,
|
|
# call_type="completion",
|
|
# start_time=time.time(),
|
|
# litellm_call_id="12345",
|
|
# function_id="1245",
|
|
# ),
|
|
# )
|
|
|
|
|
|
# @profile
|
|
# async def main_logic() -> str:
|
|
# stream = await mock_completion()
|
|
# result = ""
|
|
# async for chunk in stream:
|
|
# result += chunk.choices[0].delta.content or ""
|
|
# return result
|
|
|
|
|
|
# import asyncio
|
|
|
|
# for _ in range(100):
|
|
# asyncio.run(main_logic())
|
|
|
|
|
|
# # @pytest.mark.asyncio
|
|
# # def test_memory_profile(capsys):
|
|
# # # Run the async function
|
|
# # result = asyncio.run(main_logic())
|
|
|
|
# # # Verify the result
|
|
# # assert result == "This is a dummy response."
|
|
|
|
# # # Capture the output
|
|
# # captured = capsys.readouterr()
|
|
|
|
# # # Print memory output for debugging
|
|
# # print("Memory Profiler Output:")
|
|
# # print(f"captured out: {captured.out}")
|
|
|
|
# # # Basic memory leak checks
|
|
# # for idx, line in enumerate(captured.out.split("\n")):
|
|
# # if idx % 2 == 0 and "MiB" in line:
|
|
# # print(f"line: {line}")
|
|
|
|
# # # mem_lines = [line for line in captured.out.split("\n") if "MiB" in line]
|
|
|
|
# # print(mem_lines)
|
|
|
|
# # # Ensure we have some memory lines
|
|
# # assert len(mem_lines) > 0, "No memory profiler output found"
|
|
|
|
# # # Optional: Add more specific memory leak detection
|
|
# # for line in mem_lines:
|
|
# # # Extract memory increment
|
|
# # parts = line.split()
|
|
# # if len(parts) >= 3:
|
|
# # try:
|
|
# # mem_increment = float(parts[2].replace("MiB", ""))
|
|
# # # Assert that memory increment is below a reasonable threshold
|
|
# # assert mem_increment < 1.0, f"Potential memory leak detected: {line}"
|
|
# # except (ValueError, IndexError):
|
|
# # pass # Skip lines that don't match expected format
|