mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 18:54:30 +00:00
(feat) Add usage tracking for streaming /anthropic
passthrough routes (#6842)
* use 1 file for AnthropicPassthroughLoggingHandler * add support for anthropic streaming usage tracking * ci/cd run again * fix - add real streaming for anthropic pass through * remove unused function stream_response * working anthropic streaming logging * fix code quality * fix use 1 file for vertex success handler * use helper for _handle_logging_vertex_collected_chunks * enforce vertex streaming to use sse for streaming * test test_basic_vertex_ai_pass_through_streaming_with_spendlog * fix type hints * add comment * fix linting * add pass through logging unit testing
This commit is contained in:
parent
920f4c9f82
commit
b8af46e1a2
12 changed files with 688 additions and 295 deletions
|
@ -4,114 +4,116 @@ from datetime import datetime
|
|||
from enum import Enum
|
||||
from typing import AsyncIterable, Dict, List, Optional, Union
|
||||
|
||||
import httpx
|
||||
|
||||
import litellm
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.llms.anthropic.chat.handler import (
|
||||
ModelResponseIterator as AnthropicIterator,
|
||||
)
|
||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
||||
ModelResponseIterator as VertexAIIterator,
|
||||
)
|
||||
from litellm.types.utils import GenericStreamingChunk
|
||||
|
||||
from .llm_provider_handlers.anthropic_passthrough_logging_handler import (
|
||||
AnthropicPassthroughLoggingHandler,
|
||||
)
|
||||
from .llm_provider_handlers.vertex_passthrough_logging_handler import (
|
||||
VertexPassthroughLoggingHandler,
|
||||
)
|
||||
from .success_handler import PassThroughEndpointLogging
|
||||
from .types import EndpointType
|
||||
|
||||
|
||||
def get_litellm_chunk(
|
||||
model_iterator: VertexAIIterator,
|
||||
custom_stream_wrapper: litellm.utils.CustomStreamWrapper,
|
||||
chunk_dict: Dict,
|
||||
) -> Optional[Dict]:
|
||||
|
||||
generic_chunk: GenericStreamingChunk = model_iterator.chunk_parser(chunk_dict)
|
||||
if generic_chunk:
|
||||
return custom_stream_wrapper.chunk_creator(chunk=generic_chunk)
|
||||
return None
|
||||
|
||||
|
||||
def get_iterator_class_from_endpoint_type(
|
||||
endpoint_type: EndpointType,
|
||||
) -> Optional[type]:
|
||||
if endpoint_type == EndpointType.VERTEX_AI:
|
||||
return VertexAIIterator
|
||||
return None
|
||||
|
||||
|
||||
async def chunk_processor(
|
||||
aiter_bytes: AsyncIterable[bytes],
|
||||
response: httpx.Response,
|
||||
request_body: Optional[dict],
|
||||
litellm_logging_obj: LiteLLMLoggingObj,
|
||||
endpoint_type: EndpointType,
|
||||
start_time: datetime,
|
||||
passthrough_success_handler_obj: PassThroughEndpointLogging,
|
||||
url_route: str,
|
||||
) -> AsyncIterable[bytes]:
|
||||
):
|
||||
"""
|
||||
- Yields chunks from the response
|
||||
- Collect non-empty chunks for post-processing (logging)
|
||||
"""
|
||||
collected_chunks: List[str] = [] # List to store all chunks
|
||||
try:
|
||||
async for chunk in response.aiter_lines():
|
||||
verbose_proxy_logger.debug(f"Processing chunk: {chunk}")
|
||||
if not chunk:
|
||||
continue
|
||||
|
||||
iteratorClass = get_iterator_class_from_endpoint_type(endpoint_type)
|
||||
if iteratorClass is None:
|
||||
# Generic endpoint - litellm does not do any tracking / logging for this
|
||||
async for chunk in aiter_bytes:
|
||||
yield chunk
|
||||
else:
|
||||
# known streaming endpoint - litellm will do tracking / logging for this
|
||||
model_iterator = iteratorClass(
|
||||
sync_stream=False, streaming_response=aiter_bytes
|
||||
)
|
||||
custom_stream_wrapper = litellm.utils.CustomStreamWrapper(
|
||||
completion_stream=aiter_bytes, model=None, logging_obj=litellm_logging_obj
|
||||
)
|
||||
buffer = b""
|
||||
all_chunks = []
|
||||
async for chunk in aiter_bytes:
|
||||
buffer += chunk
|
||||
try:
|
||||
_decoded_chunk = chunk.decode("utf-8")
|
||||
_chunk_dict = json.loads(_decoded_chunk)
|
||||
litellm_chunk = get_litellm_chunk(
|
||||
model_iterator, custom_stream_wrapper, _chunk_dict
|
||||
)
|
||||
if litellm_chunk:
|
||||
all_chunks.append(litellm_chunk)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
finally:
|
||||
yield chunk # Yield the original bytes
|
||||
# Handle SSE format - pass through the raw SSE format
|
||||
if isinstance(chunk, bytes):
|
||||
chunk = chunk.decode("utf-8")
|
||||
|
||||
# Process any remaining data in the buffer
|
||||
if buffer:
|
||||
try:
|
||||
_chunk_dict = json.loads(buffer.decode("utf-8"))
|
||||
# Store the chunk for post-processing
|
||||
if chunk.strip(): # Only store non-empty chunks
|
||||
collected_chunks.append(chunk)
|
||||
yield f"{chunk}\n"
|
||||
|
||||
if isinstance(_chunk_dict, list):
|
||||
for _chunk in _chunk_dict:
|
||||
litellm_chunk = get_litellm_chunk(
|
||||
model_iterator, custom_stream_wrapper, _chunk
|
||||
)
|
||||
if litellm_chunk:
|
||||
all_chunks.append(litellm_chunk)
|
||||
elif isinstance(_chunk_dict, dict):
|
||||
litellm_chunk = get_litellm_chunk(
|
||||
model_iterator, custom_stream_wrapper, _chunk_dict
|
||||
)
|
||||
if litellm_chunk:
|
||||
all_chunks.append(litellm_chunk)
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
complete_streaming_response: Optional[
|
||||
Union[litellm.ModelResponse, litellm.TextCompletionResponse]
|
||||
] = litellm.stream_chunk_builder(chunks=all_chunks)
|
||||
if complete_streaming_response is None:
|
||||
complete_streaming_response = litellm.ModelResponse()
|
||||
# After all chunks are processed, handle post-processing
|
||||
end_time = datetime.now()
|
||||
|
||||
if passthrough_success_handler_obj.is_vertex_route(url_route):
|
||||
_model = passthrough_success_handler_obj.extract_model_from_url(url_route)
|
||||
complete_streaming_response.model = _model
|
||||
litellm_logging_obj.model = _model
|
||||
litellm_logging_obj.model_call_details["model"] = _model
|
||||
|
||||
asyncio.create_task(
|
||||
litellm_logging_obj.async_success_handler(
|
||||
result=complete_streaming_response,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
)
|
||||
await _route_streaming_logging_to_handler(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
request_body=request_body or {},
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
all_chunks=collected_chunks,
|
||||
end_time=end_time,
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
verbose_proxy_logger.error(f"Error in chunk_processor: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
async def _route_streaming_logging_to_handler(
|
||||
litellm_logging_obj: LiteLLMLoggingObj,
|
||||
passthrough_success_handler_obj: PassThroughEndpointLogging,
|
||||
url_route: str,
|
||||
request_body: dict,
|
||||
endpoint_type: EndpointType,
|
||||
start_time: datetime,
|
||||
all_chunks: List[str],
|
||||
end_time: datetime,
|
||||
):
|
||||
"""
|
||||
Route the logging for the collected chunks to the appropriate handler
|
||||
|
||||
Supported endpoint types:
|
||||
- Anthropic
|
||||
- Vertex AI
|
||||
"""
|
||||
if endpoint_type == EndpointType.ANTHROPIC:
|
||||
await AnthropicPassthroughLoggingHandler._handle_logging_anthropic_collected_chunks(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
request_body=request_body,
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
all_chunks=all_chunks,
|
||||
end_time=end_time,
|
||||
)
|
||||
elif endpoint_type == EndpointType.VERTEX_AI:
|
||||
await VertexPassthroughLoggingHandler._handle_logging_vertex_collected_chunks(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
request_body=request_body,
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
all_chunks=all_chunks,
|
||||
end_time=end_time,
|
||||
)
|
||||
elif endpoint_type == EndpointType.GENERIC:
|
||||
# No logging is supported for generic streaming endpoints
|
||||
pass
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue