Merge branch 'main' into litellm_dev_04_22_2025_p1

This commit is contained in:
Krish Dholakia 2025-04-22 23:58:33 -07:00 committed by GitHub
commit 1d0826255c
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
49 changed files with 3174 additions and 408 deletions

View file

@ -0,0 +1,83 @@
# 🖇️ AgentOps - LLM Observability Platform
:::tip
This is community maintained. Please make an issue if you run into a bug:
https://github.com/BerriAI/litellm
:::
[AgentOps](https://docs.agentops.ai) is an observability platform that enables tracing and monitoring of LLM calls, providing detailed insights into your AI operations.
## Using AgentOps with LiteLLM
LiteLLM provides `success_callbacks` and `failure_callbacks`, allowing you to easily integrate AgentOps for comprehensive tracing and monitoring of your LLM operations.
### Integration
Use just a few lines of code to instantly trace your responses **across all providers** with AgentOps:
Get your AgentOps API Keys from https://app.agentops.ai/
```python
import litellm
# Configure LiteLLM to use AgentOps
litellm.success_callback = ["agentops"]
# Make your LLM calls as usual
response = litellm.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello, how are you?"}],
)
```
Complete Code:
```python
import os
from litellm import completion
# Set env variables
os.environ["OPENAI_API_KEY"] = "your-openai-key"
os.environ["AGENTOPS_API_KEY"] = "your-agentops-api-key"
# Configure LiteLLM to use AgentOps
litellm.success_callback = ["agentops"]
# OpenAI call
response = completion(
model="gpt-4",
messages=[{"role": "user", "content": "Hi 👋 - I'm OpenAI"}],
)
print(response)
```
### Configuration Options
The AgentOps integration can be configured through environment variables:
- `AGENTOPS_API_KEY` (str, optional): Your AgentOps API key
- `AGENTOPS_ENVIRONMENT` (str, optional): Deployment environment (defaults to "production")
- `AGENTOPS_SERVICE_NAME` (str, optional): Service name for tracing (defaults to "agentops")
### Advanced Usage
You can configure additional settings through environment variables:
```python
import os
# Configure AgentOps settings
os.environ["AGENTOPS_API_KEY"] = "your-agentops-api-key"
os.environ["AGENTOPS_ENVIRONMENT"] = "staging"
os.environ["AGENTOPS_SERVICE_NAME"] = "my-service"
# Enable AgentOps tracing
litellm.success_callback = ["agentops"]
```
### Support
For issues or questions, please refer to:
- [AgentOps Documentation](https://docs.agentops.ai)
- [LiteLLM Documentation](https://docs.litellm.ai)

View file

@ -299,6 +299,9 @@ router_settings:
|------|-------------|
| ACTIONS_ID_TOKEN_REQUEST_TOKEN | Token for requesting ID in GitHub Actions
| ACTIONS_ID_TOKEN_REQUEST_URL | URL for requesting ID token in GitHub Actions
| AGENTOPS_ENVIRONMENT | Environment for AgentOps logging integration
| AGENTOPS_API_KEY | API Key for AgentOps logging integration
| AGENTOPS_SERVICE_NAME | Service Name for AgentOps logging integration
| AISPEND_ACCOUNT_ID | Account ID for AI Spend
| AISPEND_API_KEY | API Key for AI Spend
| ALLOWED_EMAIL_DOMAINS | List of email domains allowed for access

View file

@ -411,6 +411,7 @@ const sidebars = {
type: "category",
label: "Logging & Observability",
items: [
"observability/agentops_integration",
"observability/langfuse_integration",
"observability/lunary_integration",
"observability/mlflow",

View file

@ -113,6 +113,7 @@ _custom_logger_compatible_callbacks_literal = Literal[
"pagerduty",
"humanloop",
"gcs_pubsub",
"agentops",
"anthropic_cache_control_hook",
]
logged_real_time_event_types: Optional[Union[List[str], Literal["*"]]] = None

View file

@ -45,6 +45,14 @@ class SpanAttributes:
"""
The name of the model being used.
"""
LLM_PROVIDER = "llm.provider"
"""
The provider of the model, such as OpenAI, Azure, Google, etc.
"""
LLM_SYSTEM = "llm.system"
"""
The AI product as identified by the client or server
"""
LLM_PROMPTS = "llm.prompts"
"""
Prompts provided to a completions API.
@ -65,15 +73,40 @@ class SpanAttributes:
"""
Number of tokens in the prompt.
"""
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_WRITE = "llm.token_count.prompt_details.cache_write"
"""
Number of tokens in the prompt that were written to cache.
"""
LLM_TOKEN_COUNT_PROMPT_DETAILS_CACHE_READ = "llm.token_count.prompt_details.cache_read"
"""
Number of tokens in the prompt that were read from cache.
"""
LLM_TOKEN_COUNT_PROMPT_DETAILS_AUDIO = "llm.token_count.prompt_details.audio"
"""
The number of audio input tokens presented in the prompt
"""
LLM_TOKEN_COUNT_COMPLETION = "llm.token_count.completion"
"""
Number of tokens in the completion.
"""
LLM_TOKEN_COUNT_COMPLETION_DETAILS_REASONING = "llm.token_count.completion_details.reasoning"
"""
Number of tokens used for reasoning steps in the completion.
"""
LLM_TOKEN_COUNT_COMPLETION_DETAILS_AUDIO = "llm.token_count.completion_details.audio"
"""
The number of audio input tokens generated by the model
"""
LLM_TOKEN_COUNT_TOTAL = "llm.token_count.total"
"""
Total number of tokens, including both prompt and completion.
"""
LLM_TOOLS = "llm.tools"
"""
List of tools that are advertised to the LLM to be able to call
"""
TOOL_NAME = "tool.name"
"""
Name of the tool being used.
@ -112,6 +145,19 @@ class SpanAttributes:
The id of the user
"""
PROMPT_VENDOR = "prompt.vendor"
"""
The vendor or origin of the prompt, e.g. a prompt library, a specialized service, etc.
"""
PROMPT_ID = "prompt.id"
"""
A vendor-specific id used to locate the prompt.
"""
PROMPT_URL = "prompt.url"
"""
A vendor-specific url used to locate the prompt.
"""
class MessageAttributes:
"""
@ -151,6 +197,10 @@ class MessageAttributes:
The JSON string representing the arguments passed to the function
during a function call.
"""
MESSAGE_TOOL_CALL_ID = "message.tool_call_id"
"""
The id of the tool call.
"""
class MessageContentAttributes:
@ -186,6 +236,25 @@ class ImageAttributes:
"""
class AudioAttributes:
"""
Attributes for audio
"""
AUDIO_URL = "audio.url"
"""
The url to an audio file
"""
AUDIO_MIME_TYPE = "audio.mime_type"
"""
The mime type of the audio file
"""
AUDIO_TRANSCRIPT = "audio.transcript"
"""
The transcript of the audio file
"""
class DocumentAttributes:
"""
Attributes for a document.
@ -257,6 +326,10 @@ class ToolCallAttributes:
Attributes for a tool call
"""
TOOL_CALL_ID = "tool_call.id"
"""
The id of the tool call.
"""
TOOL_CALL_FUNCTION_NAME = "tool_call.function.name"
"""
The name of function that is being called during a tool call.
@ -268,6 +341,18 @@ class ToolCallAttributes:
"""
class ToolAttributes:
"""
Attributes for a tools
"""
TOOL_JSON_SCHEMA = "tool.json_schema"
"""
The json schema of a tool input, It is RECOMMENDED that this be in the
OpenAI tool calling format: https://platform.openai.com/docs/assistants/tools
"""
class OpenInferenceSpanKindValues(Enum):
TOOL = "TOOL"
CHAIN = "CHAIN"
@ -284,3 +369,21 @@ class OpenInferenceSpanKindValues(Enum):
class OpenInferenceMimeTypeValues(Enum):
TEXT = "text/plain"
JSON = "application/json"
class OpenInferenceLLMSystemValues(Enum):
OPENAI = "openai"
ANTHROPIC = "anthropic"
COHERE = "cohere"
MISTRALAI = "mistralai"
VERTEXAI = "vertexai"
class OpenInferenceLLMProviderValues(Enum):
OPENAI = "openai"
ANTHROPIC = "anthropic"
COHERE = "cohere"
MISTRALAI = "mistralai"
GOOGLE = "google"
AZURE = "azure"
AWS = "aws"

View file

@ -0,0 +1,3 @@
from .agentops import AgentOps
__all__ = ["AgentOps"]

View file

@ -0,0 +1,118 @@
"""
AgentOps integration for LiteLLM - Provides OpenTelemetry tracing for LLM calls
"""
import os
from dataclasses import dataclass
from typing import Optional, Dict, Any
from litellm.integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfig
from litellm.llms.custom_httpx.http_handler import _get_httpx_client
@dataclass
class AgentOpsConfig:
endpoint: str = "https://otlp.agentops.cloud/v1/traces"
api_key: Optional[str] = None
service_name: Optional[str] = None
deployment_environment: Optional[str] = None
auth_endpoint: str = "https://api.agentops.ai/v3/auth/token"
@classmethod
def from_env(cls):
return cls(
endpoint="https://otlp.agentops.cloud/v1/traces",
api_key=os.getenv("AGENTOPS_API_KEY"),
service_name=os.getenv("AGENTOPS_SERVICE_NAME", "agentops"),
deployment_environment=os.getenv("AGENTOPS_ENVIRONMENT", "production"),
auth_endpoint="https://api.agentops.ai/v3/auth/token"
)
class AgentOps(OpenTelemetry):
"""
AgentOps integration - built on top of OpenTelemetry
Example usage:
```python
import litellm
litellm.success_callback = ["agentops"]
response = litellm.completion(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Hello, how are you?"}],
)
```
"""
def __init__(
self,
config: Optional[AgentOpsConfig] = None,
):
if config is None:
config = AgentOpsConfig.from_env()
# Prefetch JWT token for authentication
jwt_token = None
project_id = None
if config.api_key:
try:
response = self._fetch_auth_token(config.api_key, config.auth_endpoint)
jwt_token = response.get("token")
project_id = response.get("project_id")
except Exception:
pass
headers = f"Authorization=Bearer {jwt_token}" if jwt_token else None
otel_config = OpenTelemetryConfig(
exporter="otlp_http",
endpoint=config.endpoint,
headers=headers
)
# Initialize OpenTelemetry with our config
super().__init__(
config=otel_config,
callback_name="agentops"
)
# Set AgentOps-specific resource attributes
resource_attrs = {
"service.name": config.service_name or "litellm",
"deployment.environment": config.deployment_environment or "production",
"telemetry.sdk.name": "agentops",
}
if project_id:
resource_attrs["project.id"] = project_id
self.resource_attributes = resource_attrs
def _fetch_auth_token(self, api_key: str, auth_endpoint: str) -> Dict[str, Any]:
"""
Fetch JWT authentication token from AgentOps API
Args:
api_key: AgentOps API key
auth_endpoint: Authentication endpoint
Returns:
Dict containing JWT token and project ID
"""
headers = {
"Content-Type": "application/json",
"Connection": "keep-alive",
}
client = _get_httpx_client()
try:
response = client.post(
url=auth_endpoint,
headers=headers,
json={"api_key": api_key},
timeout=10
)
if response.status_code != 200:
raise Exception(f"Failed to fetch auth token: {response.text}")
return response.json()
finally:
client.close()

View file

@ -1,3 +1,4 @@
import json
from typing import TYPE_CHECKING, Any, Optional, Union
from litellm._logging import verbose_logger
@ -12,36 +13,141 @@ else:
Span = Any
def set_attributes(span: Span, kwargs, response_obj):
def cast_as_primitive_value_type(value) -> Union[str, bool, int, float]:
"""
Converts a value to an OTEL-supported primitive for Arize/Phoenix observability.
"""
if value is None:
return ""
if isinstance(value, (str, bool, int, float)):
return value
try:
return str(value)
except Exception:
return ""
def safe_set_attribute(span: Span, key: str, value: Any):
"""
Sets a span attribute safely with OTEL-compliant primitive typing for Arize/Phoenix.
"""
primitive_value = cast_as_primitive_value_type(value)
span.set_attribute(key, primitive_value)
def set_attributes(span: Span, kwargs, response_obj): # noqa: PLR0915
"""
Populates span with OpenInference-compliant LLM attributes for Arize and Phoenix tracing.
"""
from litellm.integrations._types.open_inference import (
MessageAttributes,
OpenInferenceSpanKindValues,
SpanAttributes,
ToolCallAttributes,
)
try:
optional_params = kwargs.get("optional_params", {})
litellm_params = kwargs.get("litellm_params", {})
standard_logging_payload: Optional[StandardLoggingPayload] = kwargs.get(
"standard_logging_object"
)
if standard_logging_payload is None:
raise ValueError("standard_logging_object not found in kwargs")
#############################################
############ LLM CALL METADATA ##############
#############################################
if standard_logging_payload and (
metadata := standard_logging_payload["metadata"]
):
span.set_attribute(SpanAttributes.METADATA, safe_dumps(metadata))
# Set custom metadata for observability and trace enrichment.
metadata = (
standard_logging_payload.get("metadata")
if standard_logging_payload
else None
)
if metadata is not None:
safe_set_attribute(span, SpanAttributes.METADATA, safe_dumps(metadata))
#############################################
########## LLM Request Attributes ###########
#############################################
# The name of the LLM a request is being made to
# The name of the LLM a request is being made to.
if kwargs.get("model"):
span.set_attribute(SpanAttributes.LLM_MODEL_NAME, kwargs.get("model"))
safe_set_attribute(
span,
SpanAttributes.LLM_MODEL_NAME,
kwargs.get("model"),
)
span.set_attribute(
# The LLM request type.
safe_set_attribute(
span,
"llm.request.type",
standard_logging_payload["call_type"],
)
# The Generative AI Provider: Azure, OpenAI, etc.
safe_set_attribute(
span,
SpanAttributes.LLM_PROVIDER,
litellm_params.get("custom_llm_provider", "Unknown"),
)
# The maximum number of tokens the LLM generates for a request.
if optional_params.get("max_tokens"):
safe_set_attribute(
span,
"llm.request.max_tokens",
optional_params.get("max_tokens"),
)
# The temperature setting for the LLM request.
if optional_params.get("temperature"):
safe_set_attribute(
span,
"llm.request.temperature",
optional_params.get("temperature"),
)
# The top_p sampling setting for the LLM request.
if optional_params.get("top_p"):
safe_set_attribute(
span,
"llm.request.top_p",
optional_params.get("top_p"),
)
# Indicates whether response is streamed.
safe_set_attribute(
span,
"llm.is_streaming",
str(optional_params.get("stream", False)),
)
# Logs the user ID if present.
if optional_params.get("user"):
safe_set_attribute(
span,
"llm.user",
optional_params.get("user"),
)
# The unique identifier for the completion.
if response_obj and response_obj.get("id"):
safe_set_attribute(span, "llm.response.id", response_obj.get("id"))
# The model used to generate the response.
if response_obj and response_obj.get("model"):
safe_set_attribute(
span,
"llm.response.model",
response_obj.get("model"),
)
# Required by OpenInference to mark span as LLM kind.
safe_set_attribute(
span,
SpanAttributes.OPENINFERENCE_SPAN_KIND,
OpenInferenceSpanKindValues.LLM.value,
)
@ -50,77 +156,132 @@ def set_attributes(span: Span, kwargs, response_obj):
# for /chat/completions
# https://docs.arize.com/arize/large-language-models/tracing/semantic-conventions
if messages:
span.set_attribute(
last_message = messages[-1]
safe_set_attribute(
span,
SpanAttributes.INPUT_VALUE,
messages[-1].get("content", ""), # get the last message for input
last_message.get("content", ""),
)
# LLM_INPUT_MESSAGES shows up under `input_messages` tab on the span page
# LLM_INPUT_MESSAGES shows up under `input_messages` tab on the span page.
for idx, msg in enumerate(messages):
# Set the role per message
span.set_attribute(
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{idx}.{MessageAttributes.MESSAGE_ROLE}",
msg["role"],
prefix = f"{SpanAttributes.LLM_INPUT_MESSAGES}.{idx}"
# Set the role per message.
safe_set_attribute(
span, f"{prefix}.{MessageAttributes.MESSAGE_ROLE}", msg.get("role")
)
# Set the content per message
span.set_attribute(
f"{SpanAttributes.LLM_INPUT_MESSAGES}.{idx}.{MessageAttributes.MESSAGE_CONTENT}",
# Set the content per message.
safe_set_attribute(
span,
f"{prefix}.{MessageAttributes.MESSAGE_CONTENT}",
msg.get("content", ""),
)
if standard_logging_payload and (
model_params := standard_logging_payload["model_parameters"]
):
# Capture tools (function definitions) used in the LLM call.
tools = optional_params.get("tools")
if tools:
for idx, tool in enumerate(tools):
function = tool.get("function")
if not function:
continue
prefix = f"{SpanAttributes.LLM_TOOLS}.{idx}"
safe_set_attribute(
span, f"{prefix}.{SpanAttributes.TOOL_NAME}", function.get("name")
)
safe_set_attribute(
span,
f"{prefix}.{SpanAttributes.TOOL_DESCRIPTION}",
function.get("description"),
)
safe_set_attribute(
span,
f"{prefix}.{SpanAttributes.TOOL_PARAMETERS}",
json.dumps(function.get("parameters")),
)
# Capture tool calls made during function-calling LLM flows.
functions = optional_params.get("functions")
if functions:
for idx, function in enumerate(functions):
prefix = f"{MessageAttributes.MESSAGE_TOOL_CALLS}.{idx}"
safe_set_attribute(
span,
f"{prefix}.{ToolCallAttributes.TOOL_CALL_FUNCTION_NAME}",
function.get("name"),
)
# Capture invocation parameters and user ID if available.
model_params = (
standard_logging_payload.get("model_parameters")
if standard_logging_payload
else None
)
if model_params:
# The Generative AI Provider: Azure, OpenAI, etc.
span.set_attribute(
SpanAttributes.LLM_INVOCATION_PARAMETERS, safe_dumps(model_params)
safe_set_attribute(
span,
SpanAttributes.LLM_INVOCATION_PARAMETERS,
safe_dumps(model_params),
)
if model_params.get("user"):
user_id = model_params.get("user")
if user_id is not None:
span.set_attribute(SpanAttributes.USER_ID, user_id)
safe_set_attribute(span, SpanAttributes.USER_ID, user_id)
#############################################
########## LLM Response Attributes ##########
# https://docs.arize.com/arize/large-language-models/tracing/semantic-conventions
#############################################
if hasattr(response_obj, "get"):
for choice in response_obj.get("choices", []):
response_message = choice.get("message", {})
span.set_attribute(
SpanAttributes.OUTPUT_VALUE, response_message.get("content", "")
)
# This shows up under `output_messages` tab on the span page
# This code assumes a single response
span.set_attribute(
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.0.{MessageAttributes.MESSAGE_ROLE}",
response_message.get("role"),
)
span.set_attribute(
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.0.{MessageAttributes.MESSAGE_CONTENT}",
# Captures response tokens, message, and content.
if hasattr(response_obj, "get"):
for idx, choice in enumerate(response_obj.get("choices", [])):
response_message = choice.get("message", {})
safe_set_attribute(
span,
SpanAttributes.OUTPUT_VALUE,
response_message.get("content", ""),
)
usage = response_obj.get("usage")
# This shows up under `output_messages` tab on the span page.
prefix = f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.{idx}"
safe_set_attribute(
span,
f"{prefix}.{MessageAttributes.MESSAGE_ROLE}",
response_message.get("role"),
)
safe_set_attribute(
span,
f"{prefix}.{MessageAttributes.MESSAGE_CONTENT}",
response_message.get("content", ""),
)
# Token usage info.
usage = response_obj and response_obj.get("usage")
if usage:
span.set_attribute(
safe_set_attribute(
span,
SpanAttributes.LLM_TOKEN_COUNT_TOTAL,
usage.get("total_tokens"),
)
# The number of tokens used in the LLM response (completion).
span.set_attribute(
safe_set_attribute(
span,
SpanAttributes.LLM_TOKEN_COUNT_COMPLETION,
usage.get("completion_tokens"),
)
# The number of tokens used in the LLM prompt.
span.set_attribute(
safe_set_attribute(
span,
SpanAttributes.LLM_TOKEN_COUNT_PROMPT,
usage.get("prompt_tokens"),
)
pass
except Exception as e:
verbose_logger.error(f"Error setting arize attributes: {e}")
verbose_logger.error(
f"[Arize/Phoenix] Failed to set OpenInference span attributes: {e}"
)
if hasattr(span, "record_exception"):
span.record_exception(e)

View file

@ -28,6 +28,7 @@ from litellm._logging import _is_debugging_on, verbose_logger
from litellm.batches.batch_utils import _handle_completed_batch
from litellm.caching.caching import DualCache, InMemoryCache
from litellm.caching.caching_handler import LLMCachingHandler
from litellm.constants import (
DEFAULT_MOCK_RESPONSE_COMPLETION_TOKEN_COUNT,
DEFAULT_MOCK_RESPONSE_PROMPT_TOKEN_COUNT,
@ -36,6 +37,7 @@ from litellm.cost_calculator import (
RealtimeAPITokenUsageProcessor,
_select_model_name_for_cost_calc,
)
from litellm.integrations.agentops import AgentOps
from litellm.integrations.anthropic_cache_control_hook import AnthropicCacheControlHook
from litellm.integrations.arize.arize import ArizeLogger
from litellm.integrations.custom_guardrail import CustomGuardrail
@ -2685,7 +2687,15 @@ def _init_custom_logger_compatible_class( # noqa: PLR0915
"""
try:
custom_logger_init_args = custom_logger_init_args or {}
if logging_integration == "lago":
if logging_integration == "agentops": # Add AgentOps initialization
for callback in _in_memory_loggers:
if isinstance(callback, AgentOps):
return callback # type: ignore
agentops_logger = AgentOps()
_in_memory_loggers.append(agentops_logger)
return agentops_logger # type: ignore
elif logging_integration == "lago":
for callback in _in_memory_loggers:
if isinstance(callback, LagoLogger):
return callback # type: ignore

View file

@ -1,11 +1,14 @@
from typing import TYPE_CHECKING, Any, Optional, cast
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, cast
import httpx
import litellm
from litellm._logging import verbose_logger
from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import *
from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
from litellm.utils import _add_path_to_api_base
if TYPE_CHECKING:
@ -41,11 +44,7 @@ class AzureOpenAIResponsesAPIConfig(OpenAIResponsesAPIConfig):
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Constructs a complete URL for the API request.
@ -92,3 +91,48 @@ class AzureOpenAIResponsesAPIConfig(OpenAIResponsesAPIConfig):
final_url = httpx.URL(new_url).copy_with(params=query_params)
return str(final_url)
#########################################################
########## DELETE RESPONSE API TRANSFORMATION ##############
#########################################################
def transform_delete_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
"""
Transform the delete response API request into a URL and data
Azure OpenAI API expects the following request:
- DELETE /openai/responses/{response_id}?api-version=xxx
This function handles URLs with query parameters by inserting the response_id
at the correct location (before any query parameters).
"""
from urllib.parse import urlparse, urlunparse
# Parse the URL to separate its components
parsed_url = urlparse(api_base)
# Insert the response_id at the end of the path component
# Remove trailing slash if present to avoid double slashes
path = parsed_url.path.rstrip("/")
new_path = f"{path}/{response_id}"
# Reconstruct the URL with all original components but with the modified path
delete_url = urlunparse(
(
parsed_url.scheme, # http, https
parsed_url.netloc, # domain name, port
new_path, # path with response_id added
parsed_url.params, # parameters
parsed_url.query, # query string
parsed_url.fragment, # fragment
)
)
data: Dict = {}
verbose_logger.debug(f"delete response url={delete_url}")
return delete_url, data

View file

@ -1,6 +1,6 @@
import types
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Dict, Optional, Union
from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple, Union
import httpx
@ -10,6 +10,7 @@ from litellm.types.llms.openai import (
ResponsesAPIResponse,
ResponsesAPIStreamingResponse,
)
from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
if TYPE_CHECKING:
@ -73,11 +74,7 @@ class BaseResponsesAPIConfig(ABC):
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
OPTIONAL
@ -122,6 +119,31 @@ class BaseResponsesAPIConfig(ABC):
"""
pass
#########################################################
########## DELETE RESPONSE API TRANSFORMATION ##############
#########################################################
@abstractmethod
def transform_delete_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
pass
@abstractmethod
def transform_delete_response_api_response(
self,
raw_response: httpx.Response,
logging_obj: LiteLLMLoggingObj,
) -> DeleteResponseResult:
pass
#########################################################
########## END DELETE RESPONSE API TRANSFORMATION ##########
#########################################################
def get_error_class(
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
) -> BaseLLMException:

View file

@ -650,6 +650,49 @@ class HTTPHandler:
except Exception as e:
raise e
def delete(
self,
url: str,
data: Optional[Union[dict, str]] = None, # type: ignore
json: Optional[dict] = None,
params: Optional[dict] = None,
headers: Optional[dict] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
stream: bool = False,
):
try:
if timeout is not None:
req = self.client.build_request(
"DELETE", url, data=data, json=json, params=params, headers=headers, timeout=timeout # type: ignore
)
else:
req = self.client.build_request(
"DELETE", url, data=data, json=json, params=params, headers=headers # type: ignore
)
response = self.client.send(req, stream=stream)
response.raise_for_status()
return response
except httpx.TimeoutException:
raise litellm.Timeout(
message=f"Connection timed out after {timeout} seconds.",
model="default-model-name",
llm_provider="litellm-httpx-handler",
)
except httpx.HTTPStatusError as e:
if stream is True:
setattr(e, "message", mask_sensitive_info(e.response.read()))
setattr(e, "text", mask_sensitive_info(e.response.read()))
else:
error_text = mask_sensitive_info(e.response.text)
setattr(e, "message", error_text)
setattr(e, "text", error_text)
setattr(e, "status_code", e.response.status_code)
raise e
except Exception as e:
raise e
def __del__(self) -> None:
try:
self.close()

View file

@ -36,6 +36,7 @@ from litellm.types.llms.openai import (
ResponsesAPIResponse,
)
from litellm.types.rerank import OptionalRerankParams, RerankResponse
from litellm.types.responses.main import DeleteResponseResult
from litellm.types.router import GenericLiteLLMParams
from litellm.types.utils import EmbeddingResponse, FileTypes, TranscriptionResponse
from litellm.utils import CustomStreamWrapper, ModelResponse, ProviderConfigManager
@ -1015,6 +1016,7 @@ class BaseLLMHTTPHandler:
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
_is_async: bool = False,
fake_stream: bool = False,
litellm_metadata: Optional[Dict[str, Any]] = None,
) -> Union[
ResponsesAPIResponse,
BaseResponsesAPIStreamingIterator,
@ -1041,6 +1043,7 @@ class BaseLLMHTTPHandler:
timeout=timeout,
client=client if isinstance(client, AsyncHTTPHandler) else None,
fake_stream=fake_stream,
litellm_metadata=litellm_metadata,
)
if client is None or not isinstance(client, HTTPHandler):
@ -1064,11 +1067,7 @@ class BaseLLMHTTPHandler:
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
api_key=litellm_params.api_key,
model=model,
optional_params=response_api_optional_request_params,
litellm_params=dict(litellm_params),
stream=stream,
)
data = responses_api_provider_config.transform_responses_api_request(
@ -1113,6 +1112,8 @@ class BaseLLMHTTPHandler:
model=model,
logging_obj=logging_obj,
responses_api_provider_config=responses_api_provider_config,
litellm_metadata=litellm_metadata,
custom_llm_provider=custom_llm_provider,
)
return SyncResponsesAPIStreamingIterator(
@ -1120,6 +1121,8 @@ class BaseLLMHTTPHandler:
model=model,
logging_obj=logging_obj,
responses_api_provider_config=responses_api_provider_config,
litellm_metadata=litellm_metadata,
custom_llm_provider=custom_llm_provider,
)
else:
# For non-streaming requests
@ -1156,6 +1159,7 @@ class BaseLLMHTTPHandler:
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
fake_stream: bool = False,
litellm_metadata: Optional[Dict[str, Any]] = None,
) -> Union[ResponsesAPIResponse, BaseResponsesAPIStreamingIterator]:
"""
Async version of the responses API handler.
@ -1183,11 +1187,7 @@ class BaseLLMHTTPHandler:
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
api_key=litellm_params.api_key,
model=model,
optional_params=response_api_optional_request_params,
litellm_params=dict(litellm_params),
stream=stream,
)
data = responses_api_provider_config.transform_responses_api_request(
@ -1234,6 +1234,8 @@ class BaseLLMHTTPHandler:
model=model,
logging_obj=logging_obj,
responses_api_provider_config=responses_api_provider_config,
litellm_metadata=litellm_metadata,
custom_llm_provider=custom_llm_provider,
)
# Return the streaming iterator
@ -1242,6 +1244,8 @@ class BaseLLMHTTPHandler:
model=model,
logging_obj=logging_obj,
responses_api_provider_config=responses_api_provider_config,
litellm_metadata=litellm_metadata,
custom_llm_provider=custom_llm_provider,
)
else:
# For non-streaming, proceed as before
@ -1265,6 +1269,163 @@ class BaseLLMHTTPHandler:
logging_obj=logging_obj,
)
async def async_delete_response_api_handler(
self,
response_id: str,
responses_api_provider_config: BaseResponsesAPIConfig,
litellm_params: GenericLiteLLMParams,
logging_obj: LiteLLMLoggingObj,
custom_llm_provider: Optional[str],
extra_headers: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
_is_async: bool = False,
) -> DeleteResponseResult:
"""
Async version of the delete response API handler.
Uses async HTTP client to make requests.
"""
if client is None or not isinstance(client, AsyncHTTPHandler):
async_httpx_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders(custom_llm_provider),
params={"ssl_verify": litellm_params.get("ssl_verify", None)},
)
else:
async_httpx_client = client
headers = responses_api_provider_config.validate_environment(
api_key=litellm_params.api_key,
headers=extra_headers or {},
model="None",
)
if extra_headers:
headers.update(extra_headers)
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
litellm_params=dict(litellm_params),
)
url, data = responses_api_provider_config.transform_delete_response_api_request(
response_id=response_id,
api_base=api_base,
litellm_params=litellm_params,
headers=headers,
)
## LOGGING
logging_obj.pre_call(
input=input,
api_key="",
additional_args={
"complete_input_dict": data,
"api_base": api_base,
"headers": headers,
},
)
try:
response = await async_httpx_client.delete(
url=url, headers=headers, data=json.dumps(data), timeout=timeout
)
except Exception as e:
raise self._handle_error(
e=e,
provider_config=responses_api_provider_config,
)
return responses_api_provider_config.transform_delete_response_api_response(
raw_response=response,
logging_obj=logging_obj,
)
def delete_response_api_handler(
self,
response_id: str,
responses_api_provider_config: BaseResponsesAPIConfig,
litellm_params: GenericLiteLLMParams,
logging_obj: LiteLLMLoggingObj,
custom_llm_provider: Optional[str],
extra_headers: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
client: Optional[Union[HTTPHandler, AsyncHTTPHandler]] = None,
_is_async: bool = False,
) -> Union[DeleteResponseResult, Coroutine[Any, Any, DeleteResponseResult]]:
"""
Async version of the responses API handler.
Uses async HTTP client to make requests.
"""
if _is_async:
return self.async_delete_response_api_handler(
response_id=response_id,
responses_api_provider_config=responses_api_provider_config,
litellm_params=litellm_params,
logging_obj=logging_obj,
custom_llm_provider=custom_llm_provider,
extra_headers=extra_headers,
extra_body=extra_body,
timeout=timeout,
client=client,
)
if client is None or not isinstance(client, HTTPHandler):
sync_httpx_client = _get_httpx_client(
params={"ssl_verify": litellm_params.get("ssl_verify", None)}
)
else:
sync_httpx_client = client
headers = responses_api_provider_config.validate_environment(
api_key=litellm_params.api_key,
headers=extra_headers or {},
model="None",
)
if extra_headers:
headers.update(extra_headers)
api_base = responses_api_provider_config.get_complete_url(
api_base=litellm_params.api_base,
litellm_params=dict(litellm_params),
)
url, data = responses_api_provider_config.transform_delete_response_api_request(
response_id=response_id,
api_base=api_base,
litellm_params=litellm_params,
headers=headers,
)
## LOGGING
logging_obj.pre_call(
input=input,
api_key="",
additional_args={
"complete_input_dict": data,
"api_base": api_base,
"headers": headers,
},
)
try:
response = sync_httpx_client.delete(
url=url, headers=headers, data=json.dumps(data), timeout=timeout
)
except Exception as e:
raise self._handle_error(
e=e,
provider_config=responses_api_provider_config,
)
return responses_api_provider_config.transform_delete_response_api_response(
raw_response=response,
logging_obj=logging_obj,
)
def create_file(
self,
create_file_data: CreateFileRequest,

View file

@ -7,6 +7,7 @@ from litellm._logging import verbose_logger
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.secret_managers.main import get_secret_str
from litellm.types.llms.openai import *
from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
from ..common_utils import OpenAIError
@ -110,11 +111,7 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
def get_complete_url(
self,
api_base: Optional[str],
api_key: Optional[str],
model: str,
optional_params: dict,
litellm_params: dict,
stream: Optional[bool] = None,
) -> str:
"""
Get the endpoint for OpenAI responses API
@ -217,3 +214,39 @@ class OpenAIResponsesAPIConfig(BaseResponsesAPIConfig):
f"Error getting model info in OpenAIResponsesAPIConfig: {e}"
)
return False
#########################################################
########## DELETE RESPONSE API TRANSFORMATION ##############
#########################################################
def transform_delete_response_api_request(
self,
response_id: str,
api_base: str,
litellm_params: GenericLiteLLMParams,
headers: dict,
) -> Tuple[str, Dict]:
"""
Transform the delete response API request into a URL and data
OpenAI API expects the following request
- DELETE /v1/responses/{response_id}
"""
url = f"{api_base}/{response_id}"
data: Dict = {}
return url, data
def transform_delete_response_api_response(
self,
raw_response: httpx.Response,
logging_obj: LiteLLMLoggingObj,
) -> DeleteResponseResult:
"""
Transform the delete response API response into a DeleteResponseResult
"""
try:
raw_response_json = raw_response.json()
except Exception:
raise OpenAIError(
message=raw_response.text, status_code=raw_response.status_code
)
return DeleteResponseResult(**raw_response_json)

View file

@ -1490,7 +1490,6 @@
"supports_prompt_caching": false,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_native_streaming": false,
"supports_reasoning": true
},
"azure/gpt-4o-audio-preview-2024-12-17": {

View file

@ -687,6 +687,8 @@ class GenerateKeyResponse(KeyRequestBase):
token: Optional[str] = None
created_by: Optional[str] = None
updated_by: Optional[str] = None
created_at: Optional[datetime] = None
updated_at: Optional[datetime] = None
@model_validator(mode="before")
@classmethod

View file

@ -43,6 +43,9 @@ from litellm.types.proxy.management_endpoints.common_daily_activity import (
SpendAnalyticsPaginatedResponse,
SpendMetrics,
)
from litellm.types.proxy.management_endpoints.internal_user_endpoints import (
UserListResponse,
)
router = APIRouter()
@ -899,15 +902,47 @@ async def get_user_key_counts(
return result
@router.get(
"/user/get_users",
tags=["Internal User management"],
dependencies=[Depends(user_api_key_auth)],
)
def _validate_sort_params(
sort_by: Optional[str], sort_order: str
) -> Optional[Dict[str, str]]:
order_by: Dict[str, str] = {}
if sort_by is None:
return None
# Validate sort_by is a valid column
valid_columns = [
"user_id",
"user_email",
"created_at",
"spend",
"user_alias",
"user_role",
]
if sort_by not in valid_columns:
raise HTTPException(
status_code=400,
detail={
"error": f"Invalid sort column. Must be one of: {', '.join(valid_columns)}"
},
)
# Validate sort_order
if sort_order.lower() not in ["asc", "desc"]:
raise HTTPException(
status_code=400,
detail={"error": "Invalid sort order. Must be 'asc' or 'desc'"},
)
order_by[sort_by] = sort_order.lower()
return order_by
@router.get(
"/user/list",
tags=["Internal User management"],
dependencies=[Depends(user_api_key_auth)],
response_model=UserListResponse,
)
async def get_users(
role: Optional[str] = fastapi.Query(
@ -916,15 +951,29 @@ async def get_users(
user_ids: Optional[str] = fastapi.Query(
default=None, description="Get list of users by user_ids"
),
sso_user_ids: Optional[str] = fastapi.Query(
default=None, description="Get list of users by sso_user_id"
),
user_email: Optional[str] = fastapi.Query(
default=None, description="Filter users by partial email match"
),
team: Optional[str] = fastapi.Query(
default=None, description="Filter users by team id"
),
page: int = fastapi.Query(default=1, ge=1, description="Page number"),
page_size: int = fastapi.Query(
default=25, ge=1, le=100, description="Number of items per page"
),
sort_by: Optional[str] = fastapi.Query(
default=None,
description="Column to sort by (e.g. 'user_id', 'user_email', 'created_at', 'spend')",
),
sort_order: str = fastapi.Query(
default="asc", description="Sort order ('asc' or 'desc')"
),
):
"""
Get a paginated list of users, optionally filtered by role.
Used by the UI to populate the user lists.
Get a paginated list of users with filtering and sorting options.
Parameters:
role: Optional[str]
@ -935,17 +984,20 @@ async def get_users(
- internal_user_viewer
user_ids: Optional[str]
Get list of users by user_ids. Comma separated list of user_ids.
sso_ids: Optional[str]
Get list of users by sso_ids. Comma separated list of sso_ids.
user_email: Optional[str]
Filter users by partial email match
team: Optional[str]
Filter users by team id. Will match if user has this team in their teams array.
page: int
The page number to return
page_size: int
The number of items per page
Currently - admin-only endpoint.
Example curl:
```
http://0.0.0.0:4000/user/list?user_ids=default_user_id,693c1a4a-1cc0-4c7c-afe8-b5d2c8d52e17
```
sort_by: Optional[str]
Column to sort by (e.g. 'user_id', 'user_email', 'created_at', 'spend')
sort_order: Optional[str]
Sort order ('asc' or 'desc')
"""
from litellm.proxy.proxy_server import prisma_client
@ -958,35 +1010,57 @@ async def get_users(
# Calculate skip and take for pagination
skip = (page - 1) * page_size
# Prepare the query conditions
# Build where conditions based on provided parameters
where_conditions: Dict[str, Any] = {}
if role:
where_conditions["user_role"] = {
"contains": role,
"mode": "insensitive", # Case-insensitive search
}
where_conditions["user_role"] = role # Exact match instead of contains
if user_ids and isinstance(user_ids, str):
user_id_list = [uid.strip() for uid in user_ids.split(",") if uid.strip()]
where_conditions["user_id"] = {
"in": user_id_list, # Now passing a list of strings as required by Prisma
"in": user_id_list,
}
users: Optional[
List[LiteLLM_UserTable]
] = await prisma_client.db.litellm_usertable.find_many(
if user_email is not None and isinstance(user_email, str):
where_conditions["user_email"] = {
"contains": user_email,
"mode": "insensitive", # Case-insensitive search
}
if team is not None and isinstance(team, str):
where_conditions["teams"] = {
"has": team # Array contains for string arrays in Prisma
}
if sso_user_ids is not None and isinstance(sso_user_ids, str):
sso_id_list = [sid.strip() for sid in sso_user_ids.split(",") if sid.strip()]
where_conditions["sso_user_id"] = {
"in": sso_id_list,
}
## Filter any none fastapi.Query params - e.g. where_conditions: {'user_email': {'contains': Query(None), 'mode': 'insensitive'}, 'teams': {'has': Query(None)}}
where_conditions = {k: v for k, v in where_conditions.items() if v is not None}
# Build order_by conditions
order_by: Optional[Dict[str, str]] = (
_validate_sort_params(sort_by, sort_order)
if sort_by is not None and isinstance(sort_by, str)
else None
)
users = await prisma_client.db.litellm_usertable.find_many(
where=where_conditions,
skip=skip,
take=page_size,
order={"created_at": "desc"},
order=order_by
if order_by
else {"created_at": "desc"}, # Default to created_at desc if no sort specified
)
# Get total count of user rows
total_count = await prisma_client.db.litellm_usertable.count(
where=where_conditions # type: ignore
)
total_count = await prisma_client.db.litellm_usertable.count(where=where_conditions)
# Get key count for each user
if users is not None:
@ -1009,7 +1083,7 @@ async def get_users(
LiteLLM_UserTableWithKeyCount(
**user.model_dump(), key_count=user_key_counts.get(user.user_id, 0)
)
) # Return full key object
)
else:
user_list = []

View file

@ -1347,10 +1347,13 @@ async def generate_key_helper_fn( # noqa: PLR0915
create_key_response = await prisma_client.insert_data(
data=key_data, table_name="key"
)
key_data["token_id"] = getattr(create_key_response, "token", None)
key_data["litellm_budget_table"] = getattr(
create_key_response, "litellm_budget_table", None
)
key_data["created_at"] = getattr(create_key_response, "created_at", None)
key_data["updated_at"] = getattr(create_key_response, "updated_at", None)
except Exception as e:
verbose_proxy_logger.error(
"litellm.proxy.proxy_server.generate_key_helper_fn(): Exception occured - {}".format(

View file

@ -1,7 +1,7 @@
import asyncio
import contextvars
from functools import partial
from typing import Any, Dict, Iterable, List, Literal, Optional, Union
from typing import Any, Coroutine, Dict, Iterable, List, Literal, Optional, Union
import httpx
@ -24,6 +24,7 @@ from litellm.types.llms.openai import (
ToolChoice,
ToolParam,
)
from litellm.types.responses.main import *
from litellm.types.router import GenericLiteLLMParams
from litellm.utils import ProviderConfigManager, client
@ -121,7 +122,8 @@ async def aresponses(
if isinstance(response, ResponsesAPIResponse):
response = ResponsesAPIRequestUtils._update_responses_api_response_id_with_model_id(
responses_api_response=response,
kwargs=kwargs,
litellm_metadata=kwargs.get("litellm_metadata", {}),
custom_llm_provider=custom_llm_provider,
)
return response
except Exception as e:
@ -253,13 +255,15 @@ def responses(
fake_stream=responses_api_provider_config.should_fake_stream(
model=model, stream=stream, custom_llm_provider=custom_llm_provider
),
litellm_metadata=kwargs.get("litellm_metadata", {}),
)
# Update the responses_api_response_id with the model_id
if isinstance(response, ResponsesAPIResponse):
response = ResponsesAPIRequestUtils._update_responses_api_response_id_with_model_id(
responses_api_response=response,
kwargs=kwargs,
litellm_metadata=kwargs.get("litellm_metadata", {}),
custom_llm_provider=custom_llm_provider,
)
return response
@ -271,3 +275,162 @@ def responses(
completion_kwargs=local_vars,
extra_kwargs=kwargs,
)
@client
async def adelete_responses(
response_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
extra_query: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
# LiteLLM specific params,
custom_llm_provider: Optional[str] = None,
**kwargs,
) -> DeleteResponseResult:
"""
Async version of the DELETE Responses API
DELETE /v1/responses/{response_id} endpoint in the responses API
"""
local_vars = locals()
try:
loop = asyncio.get_event_loop()
kwargs["adelete_responses"] = True
# get custom llm provider from response_id
decoded_response_id: DecodedResponseId = (
ResponsesAPIRequestUtils._decode_responses_api_response_id(
response_id=response_id,
)
)
response_id = decoded_response_id.get("response_id") or response_id
custom_llm_provider = (
decoded_response_id.get("custom_llm_provider") or custom_llm_provider
)
func = partial(
delete_responses,
response_id=response_id,
custom_llm_provider=custom_llm_provider,
extra_headers=extra_headers,
extra_query=extra_query,
extra_body=extra_body,
timeout=timeout,
**kwargs,
)
ctx = contextvars.copy_context()
func_with_context = partial(ctx.run, func)
init_response = await loop.run_in_executor(None, func_with_context)
if asyncio.iscoroutine(init_response):
response = await init_response
else:
response = init_response
return response
except Exception as e:
raise litellm.exception_type(
model=None,
custom_llm_provider=custom_llm_provider,
original_exception=e,
completion_kwargs=local_vars,
extra_kwargs=kwargs,
)
@client
def delete_responses(
response_id: str,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Optional[Dict[str, Any]] = None,
extra_query: Optional[Dict[str, Any]] = None,
extra_body: Optional[Dict[str, Any]] = None,
timeout: Optional[Union[float, httpx.Timeout]] = None,
# LiteLLM specific params,
custom_llm_provider: Optional[str] = None,
**kwargs,
) -> Union[DeleteResponseResult, Coroutine[Any, Any, DeleteResponseResult]]:
"""
Synchronous version of the DELETE Responses API
DELETE /v1/responses/{response_id} endpoint in the responses API
"""
local_vars = locals()
try:
litellm_logging_obj: LiteLLMLoggingObj = kwargs.get("litellm_logging_obj") # type: ignore
litellm_call_id: Optional[str] = kwargs.get("litellm_call_id", None)
_is_async = kwargs.pop("adelete_responses", False) is True
# get llm provider logic
litellm_params = GenericLiteLLMParams(**kwargs)
# get custom llm provider from response_id
decoded_response_id: DecodedResponseId = (
ResponsesAPIRequestUtils._decode_responses_api_response_id(
response_id=response_id,
)
)
response_id = decoded_response_id.get("response_id") or response_id
custom_llm_provider = (
decoded_response_id.get("custom_llm_provider") or custom_llm_provider
)
if custom_llm_provider is None:
raise ValueError("custom_llm_provider is required but passed as None")
# get provider config
responses_api_provider_config: Optional[BaseResponsesAPIConfig] = (
ProviderConfigManager.get_provider_responses_api_config(
model=None,
provider=litellm.LlmProviders(custom_llm_provider),
)
)
if responses_api_provider_config is None:
raise ValueError(
f"DELETE responses is not supported for {custom_llm_provider}"
)
local_vars.update(kwargs)
# Pre Call logging
litellm_logging_obj.update_environment_variables(
model=None,
optional_params={
"response_id": response_id,
},
litellm_params={
"litellm_call_id": litellm_call_id,
},
custom_llm_provider=custom_llm_provider,
)
# Call the handler with _is_async flag instead of directly calling the async handler
response = base_llm_http_handler.delete_response_api_handler(
response_id=response_id,
custom_llm_provider=custom_llm_provider,
responses_api_provider_config=responses_api_provider_config,
litellm_params=litellm_params,
logging_obj=litellm_logging_obj,
extra_headers=extra_headers,
extra_body=extra_body,
timeout=timeout or request_timeout,
_is_async=_is_async,
client=kwargs.get("client"),
)
return response
except Exception as e:
raise litellm.exception_type(
model=None,
custom_llm_provider=custom_llm_provider,
original_exception=e,
completion_kwargs=local_vars,
extra_kwargs=kwargs,
)

View file

@ -1,7 +1,7 @@
import asyncio
import json
from datetime import datetime
from typing import Optional
from typing import Any, Dict, Optional
import httpx
@ -10,6 +10,7 @@ from litellm.litellm_core_utils.asyncify import run_async_function
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
from litellm.litellm_core_utils.thread_pool_executor import executor
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.responses.utils import ResponsesAPIRequestUtils
from litellm.types.llms.openai import (
OutputTextDeltaEvent,
ResponseCompletedEvent,
@ -33,6 +34,8 @@ class BaseResponsesAPIStreamingIterator:
model: str,
responses_api_provider_config: BaseResponsesAPIConfig,
logging_obj: LiteLLMLoggingObj,
litellm_metadata: Optional[Dict[str, Any]] = None,
custom_llm_provider: Optional[str] = None,
):
self.response = response
self.model = model
@ -42,6 +45,10 @@ class BaseResponsesAPIStreamingIterator:
self.completed_response: Optional[ResponsesAPIStreamingResponse] = None
self.start_time = datetime.now()
# set request kwargs
self.litellm_metadata = litellm_metadata
self.custom_llm_provider = custom_llm_provider
def _process_chunk(self, chunk) -> Optional[ResponsesAPIStreamingResponse]:
"""Process a single chunk of data from the stream"""
if not chunk:
@ -70,6 +77,17 @@ class BaseResponsesAPIStreamingIterator:
logging_obj=self.logging_obj,
)
)
# if "response" in parsed_chunk, then encode litellm specific information like custom_llm_provider
response_object = getattr(openai_responses_api_chunk, "response", None)
if response_object:
response = ResponsesAPIRequestUtils._update_responses_api_response_id_with_model_id(
responses_api_response=response_object,
litellm_metadata=self.litellm_metadata,
custom_llm_provider=self.custom_llm_provider,
)
setattr(openai_responses_api_chunk, "response", response)
# Store the completed response
if (
openai_responses_api_chunk
@ -102,8 +120,17 @@ class ResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator):
model: str,
responses_api_provider_config: BaseResponsesAPIConfig,
logging_obj: LiteLLMLoggingObj,
litellm_metadata: Optional[Dict[str, Any]] = None,
custom_llm_provider: Optional[str] = None,
):
super().__init__(response, model, responses_api_provider_config, logging_obj)
super().__init__(
response,
model,
responses_api_provider_config,
logging_obj,
litellm_metadata,
custom_llm_provider,
)
self.stream_iterator = response.aiter_lines()
def __aiter__(self):
@ -163,8 +190,17 @@ class SyncResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator):
model: str,
responses_api_provider_config: BaseResponsesAPIConfig,
logging_obj: LiteLLMLoggingObj,
litellm_metadata: Optional[Dict[str, Any]] = None,
custom_llm_provider: Optional[str] = None,
):
super().__init__(response, model, responses_api_provider_config, logging_obj)
super().__init__(
response,
model,
responses_api_provider_config,
logging_obj,
litellm_metadata,
custom_llm_provider,
)
self.stream_iterator = response.iter_lines()
def __iter__(self):
@ -228,12 +264,16 @@ class MockResponsesAPIStreamingIterator(BaseResponsesAPIStreamingIterator):
model: str,
responses_api_provider_config: BaseResponsesAPIConfig,
logging_obj: LiteLLMLoggingObj,
litellm_metadata: Optional[Dict[str, Any]] = None,
custom_llm_provider: Optional[str] = None,
):
super().__init__(
response=response,
model=model,
responses_api_provider_config=responses_api_provider_config,
logging_obj=logging_obj,
litellm_metadata=litellm_metadata,
custom_llm_provider=custom_llm_provider,
)
# one-time transform

View file

@ -1,5 +1,5 @@
import base64
from typing import Any, Dict, Optional, Tuple, Union, cast, get_type_hints
from typing import Any, Dict, Optional, Union, cast, get_type_hints
import litellm
from litellm._logging import verbose_logger
@ -9,6 +9,7 @@ from litellm.types.llms.openai import (
ResponsesAPIOptionalRequestParams,
ResponsesAPIResponse,
)
from litellm.types.responses.main import DecodedResponseId
from litellm.types.utils import SpecialEnums, Usage
@ -83,30 +84,36 @@ class ResponsesAPIRequestUtils:
@staticmethod
def _update_responses_api_response_id_with_model_id(
responses_api_response: ResponsesAPIResponse,
kwargs: Dict[str, Any],
custom_llm_provider: Optional[str],
litellm_metadata: Optional[Dict[str, Any]] = None,
) -> ResponsesAPIResponse:
"""Update the responses_api_response_id with the model_id"""
litellm_metadata: Dict[str, Any] = kwargs.get("litellm_metadata", {}) or {}
"""
Update the responses_api_response_id with model_id and custom_llm_provider
This builds a composite ID containing the custom LLM provider, model ID, and original response ID
"""
litellm_metadata = litellm_metadata or {}
model_info: Dict[str, Any] = litellm_metadata.get("model_info", {}) or {}
model_id = model_info.get("id")
updated_id = ResponsesAPIRequestUtils._build_responses_api_response_id(
model_id=model_id,
custom_llm_provider=custom_llm_provider,
response_id=responses_api_response.id,
)
responses_api_response.id = updated_id
return responses_api_response
@staticmethod
def _build_responses_api_response_id(
custom_llm_provider: Optional[str],
model_id: Optional[str],
response_id: str,
) -> str:
"""Build the responses_api_response_id"""
if model_id is None:
return response_id
assembled_id: str = str(
SpecialEnums.LITELLM_MANAGED_RESPONSE_COMPLETE_STR.value
).format(model_id, response_id)
).format(custom_llm_provider, model_id, response_id)
base64_encoded_id: str = base64.b64encode(assembled_id.encode("utf-8")).decode(
"utf-8"
)
@ -115,12 +122,12 @@ class ResponsesAPIRequestUtils:
@staticmethod
def _decode_responses_api_response_id(
response_id: str,
) -> Tuple[Optional[str], str]:
) -> DecodedResponseId:
"""
Decode the responses_api_response_id
Returns:
Tuple of model_id, response_id (from upstream provider)
DecodedResponseId: Structured tuple with custom_llm_provider, model_id, and response_id
"""
try:
# Remove prefix and decode
@ -129,16 +136,45 @@ class ResponsesAPIRequestUtils:
# Parse components using known prefixes
if ";" not in decoded_id:
return None, response_id
return DecodedResponseId(
custom_llm_provider=None,
model_id=None,
response_id=response_id,
)
model_part, response_part = decoded_id.split(";", 1)
model_id = model_part.replace("litellm:model_id:", "")
parts = decoded_id.split(";")
# Format: litellm:custom_llm_provider:{};model_id:{};response_id:{}
custom_llm_provider = None
model_id = None
if (
len(parts) >= 3
): # Full format with custom_llm_provider, model_id, and response_id
custom_llm_provider_part = parts[0]
model_id_part = parts[1]
response_part = parts[2]
custom_llm_provider = custom_llm_provider_part.replace(
"litellm:custom_llm_provider:", ""
)
model_id = model_id_part.replace("model_id:", "")
decoded_response_id = response_part.replace("response_id:", "")
else:
decoded_response_id = response_id
return model_id, decoded_response_id
return DecodedResponseId(
custom_llm_provider=custom_llm_provider,
model_id=model_id,
response_id=decoded_response_id,
)
except Exception as e:
verbose_logger.debug(f"Error decoding response_id '{response_id}': {e}")
return None, response_id
return DecodedResponseId(
custom_llm_provider=None,
model_id=None,
response_id=response_id,
)
class ResponseAPILoggingUtils:

View file

@ -31,11 +31,10 @@ class ResponsesApiDeploymentCheck(CustomLogger):
if previous_response_id is None:
return healthy_deployments
model_id, response_id = (
ResponsesAPIRequestUtils._decode_responses_api_response_id(
decoded_response = ResponsesAPIRequestUtils._decode_responses_api_response_id(
response_id=previous_response_id,
)
)
model_id = decoded_response.get("model_id")
if model_id is None:
return healthy_deployments

View file

@ -0,0 +1,18 @@
from typing import Any, Dict, List, Literal, Optional, Union
from fastapi import HTTPException
from pydantic import BaseModel, EmailStr
from litellm.proxy._types import LiteLLM_UserTableWithKeyCount
class UserListResponse(BaseModel):
"""
Response model for the user list endpoint
"""
users: List[LiteLLM_UserTableWithKeyCount]
total: int
page: int
page_size: int
total_pages: int

View file

@ -1,5 +1,6 @@
from typing import Literal
from pydantic import PrivateAttr
from typing_extensions import Any, List, Optional, TypedDict
from litellm.types.llms.base import BaseLiteLLMOpenAIResponseObject
@ -46,3 +47,30 @@ class GenericResponseOutputItem(BaseLiteLLMOpenAIResponseObject):
status: str # "completed", "in_progress", etc.
role: str # "assistant", "user", etc.
content: List[OutputText]
class DeleteResponseResult(BaseLiteLLMOpenAIResponseObject):
"""
Result of a delete response request
{
"id": "resp_6786a1bec27481909a17d673315b29f6",
"object": "response",
"deleted": true
}
"""
id: Optional[str]
object: Optional[str]
deleted: Optional[bool]
# Define private attributes using PrivateAttr
_hidden_params: dict = PrivateAttr(default_factory=dict)
class DecodedResponseId(TypedDict, total=False):
"""Structure representing a decoded response ID"""
custom_llm_provider: Optional[str]
model_id: Optional[str]
response_id: str

View file

@ -2254,7 +2254,9 @@ class SpecialEnums(Enum):
LITELM_MANAGED_FILE_ID_PREFIX = "litellm_proxy"
LITELLM_MANAGED_FILE_COMPLETE_STR = "litellm_proxy:{};unified_id,{}"
LITELLM_MANAGED_RESPONSE_COMPLETE_STR = "litellm:model_id:{};response_id:{}"
LITELLM_MANAGED_RESPONSE_COMPLETE_STR = (
"litellm:custom_llm_provider:{};model_id:{};response_id:{}"
)
LLMResponseTypes = Union[

View file

@ -180,10 +180,18 @@ from litellm.types.utils import (
all_litellm_params,
)
with resources.open_text(
"litellm.litellm_core_utils.tokenizers", "anthropic_tokenizer.json"
) as f:
try:
# Python 3.9+
with resources.files("litellm.litellm_core_utils.tokenizers").joinpath(
"anthropic_tokenizer.json"
).open("r") as f:
json_data = json.load(f)
except (ImportError, AttributeError, TypeError):
with resources.open_text(
"litellm.litellm_core_utils.tokenizers", "anthropic_tokenizer.json"
) as f:
json_data = json.load(f)
# Convert to str (if necessary)
claude_json_str = json.dumps(json_data)
import importlib.metadata
@ -516,9 +524,9 @@ def function_setup( # noqa: PLR0915
function_id: Optional[str] = kwargs["id"] if "id" in kwargs else None
## DYNAMIC CALLBACKS ##
dynamic_callbacks: Optional[
List[Union[str, Callable, CustomLogger]]
] = kwargs.pop("callbacks", None)
dynamic_callbacks: Optional[List[Union[str, Callable, CustomLogger]]] = (
kwargs.pop("callbacks", None)
)
all_callbacks = get_dynamic_callbacks(dynamic_callbacks=dynamic_callbacks)
if len(all_callbacks) > 0:
@ -1202,9 +1210,9 @@ def client(original_function): # noqa: PLR0915
exception=e,
retry_policy=kwargs.get("retry_policy"),
)
kwargs[
"retry_policy"
] = reset_retry_policy() # prevent infinite loops
kwargs["retry_policy"] = (
reset_retry_policy()
) # prevent infinite loops
litellm.num_retries = (
None # set retries to None to prevent infinite loops
)
@ -3028,16 +3036,16 @@ def get_optional_params( # noqa: PLR0915
True # so that main.py adds the function call to the prompt
)
if "tools" in non_default_params:
optional_params[
"functions_unsupported_model"
] = non_default_params.pop("tools")
optional_params["functions_unsupported_model"] = (
non_default_params.pop("tools")
)
non_default_params.pop(
"tool_choice", None
) # causes ollama requests to hang
elif "functions" in non_default_params:
optional_params[
"functions_unsupported_model"
] = non_default_params.pop("functions")
optional_params["functions_unsupported_model"] = (
non_default_params.pop("functions")
)
elif (
litellm.add_function_to_prompt
): # if user opts to add it to prompt instead
@ -3060,11 +3068,11 @@ def get_optional_params( # noqa: PLR0915
if "response_format" in non_default_params:
if provider_config is not None:
non_default_params[
"response_format"
] = provider_config.get_json_schema_from_pydantic_object(
non_default_params["response_format"] = (
provider_config.get_json_schema_from_pydantic_object(
response_format=non_default_params["response_format"]
)
)
else:
non_default_params["response_format"] = type_to_response_format_param(
response_format=non_default_params["response_format"]
@ -4079,9 +4087,9 @@ def _count_characters(text: str) -> int:
def get_response_string(response_obj: Union[ModelResponse, ModelResponseStream]) -> str:
_choices: Union[
List[Union[Choices, StreamingChoices]], List[StreamingChoices]
] = response_obj.choices
_choices: Union[List[Union[Choices, StreamingChoices]], List[StreamingChoices]] = (
response_obj.choices
)
response_str = ""
for choice in _choices:
@ -6625,8 +6633,8 @@ class ProviderConfigManager:
@staticmethod
def get_provider_responses_api_config(
model: str,
provider: LlmProviders,
model: Optional[str] = None,
) -> Optional[BaseResponsesAPIConfig]:
if litellm.LlmProviders.OPENAI == provider:
return litellm.OpenAIResponsesAPIConfig()

View file

@ -1490,7 +1490,6 @@
"supports_prompt_caching": false,
"supports_system_messages": true,
"supports_tool_choice": true,
"supports_native_streaming": false,
"supports_reasoning": true
},
"azure/gpt-4o-audio-preview-2024-12-17": {

353
poetry.lock generated
View file

@ -548,7 +548,7 @@ version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
groups = ["main"]
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
@ -742,6 +742,24 @@ ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
test-randomorder = ["pytest-randomly"]
[[package]]
name = "deprecated"
version = "1.2.18"
description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
groups = ["dev", "proxy-dev"]
files = [
{file = "Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec"},
{file = "deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d"},
]
[package.dependencies]
wrapt = ">=1.10,<2"
[package.extras]
dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "setuptools ; python_version >= \"3.12\"", "tox"]
[[package]]
name = "distro"
version = "1.9.0"
@ -1116,14 +1134,14 @@ protobuf = ">=3.20.2,<4.21.0 || >4.21.0,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4
name = "googleapis-common-protos"
version = "1.70.0"
description = "Common protobufs used in Google APIs"
optional = true
optional = false
python-versions = ">=3.7"
groups = ["main"]
markers = "extra == \"extra-proxy\""
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "googleapis_common_protos-1.70.0-py3-none-any.whl", hash = "sha256:b8bfcca8c25a2bb253e0e0b0adaf8c00773e5e6af6fd92397576680b807e0fd8"},
{file = "googleapis_common_protos-1.70.0.tar.gz", hash = "sha256:0e1b44e0ea153e6594f9f394fef15193a68aaaea2d843f83e2742717ca753257"},
]
markers = {main = "extra == \"extra-proxy\""}
[package.dependencies]
grpcio = {version = ">=1.44.0,<2.0.0", optional = true, markers = "extra == \"grpc\""}
@ -1154,10 +1172,9 @@ protobuf = ">=3.20.2,<4.21.1 || >4.21.1,<4.21.2 || >4.21.2,<4.21.3 || >4.21.3,<4
name = "grpcio"
version = "1.70.0"
description = "HTTP/2-based RPC framework"
optional = true
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version < \"3.10\" and extra == \"extra-proxy\""
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "grpcio-1.70.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:95469d1977429f45fe7df441f586521361e235982a0b39e33841549143ae2851"},
{file = "grpcio-1.70.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:ed9718f17fbdb472e33b869c77a16d0b55e166b100ec57b016dc7de9c8d236bf"},
@ -1215,6 +1232,7 @@ files = [
{file = "grpcio-1.70.0-cp39-cp39-win_amd64.whl", hash = "sha256:a31d7e3b529c94e930a117b2175b2efd179d96eb3c7a21ccb0289a8ab05b645c"},
{file = "grpcio-1.70.0.tar.gz", hash = "sha256:8d1584a68d5922330025881e63a6c1b54cc8117291d382e4fa69339b6d914c56"},
]
markers = {main = "python_version < \"3.10\" and extra == \"extra-proxy\"", dev = "python_version < \"3.10\"", proxy-dev = "python_version < \"3.10\""}
[package.extras]
protobuf = ["grpcio-tools (>=1.70.0)"]
@ -1223,10 +1241,9 @@ protobuf = ["grpcio-tools (>=1.70.0)"]
name = "grpcio"
version = "1.71.0"
description = "HTTP/2-based RPC framework"
optional = true
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "python_version >= \"3.10\" and extra == \"extra-proxy\""
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "grpcio-1.71.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:c200cb6f2393468142eb50ab19613229dcc7829b5ccee8b658a36005f6669fdd"},
{file = "grpcio-1.71.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:b2266862c5ad664a380fbbcdbdb8289d71464c42a8c29053820ee78ba0119e5d"},
@ -1280,45 +1297,28 @@ files = [
{file = "grpcio-1.71.0-cp39-cp39-win_amd64.whl", hash = "sha256:63e41b91032f298b3e973b3fa4093cbbc620c875e2da7b93e249d4728b54559a"},
{file = "grpcio-1.71.0.tar.gz", hash = "sha256:2b85f7820475ad3edec209d3d89a7909ada16caab05d3f2e08a7e8ae3200a55c"},
]
markers = {main = "python_version >= \"3.10\" and extra == \"extra-proxy\"", dev = "python_version >= \"3.10\"", proxy-dev = "python_version >= \"3.10\""}
[package.extras]
protobuf = ["grpcio-tools (>=1.71.0)"]
[[package]]
name = "grpcio-status"
version = "1.70.0"
version = "1.62.3"
description = "Status proto mapping for gRPC"
optional = true
python-versions = ">=3.8"
python-versions = ">=3.6"
groups = ["main"]
markers = "python_version < \"3.10\" and extra == \"extra-proxy\""
markers = "extra == \"extra-proxy\""
files = [
{file = "grpcio_status-1.70.0-py3-none-any.whl", hash = "sha256:fc5a2ae2b9b1c1969cc49f3262676e6854aa2398ec69cb5bd6c47cd501904a85"},
{file = "grpcio_status-1.70.0.tar.gz", hash = "sha256:0e7b42816512433b18b9d764285ff029bde059e9d41f8fe10a60631bd8348101"},
{file = "grpcio-status-1.62.3.tar.gz", hash = "sha256:289bdd7b2459794a12cf95dc0cb727bd4a1742c37bd823f760236c937e53a485"},
{file = "grpcio_status-1.62.3-py3-none-any.whl", hash = "sha256:f9049b762ba8de6b1086789d8315846e094edac2c50beaf462338b301a8fd4b8"},
]
[package.dependencies]
googleapis-common-protos = ">=1.5.5"
grpcio = ">=1.70.0"
protobuf = ">=5.26.1,<6.0dev"
[[package]]
name = "grpcio-status"
version = "1.71.0"
description = "Status proto mapping for gRPC"
optional = true
python-versions = ">=3.9"
groups = ["main"]
markers = "extra == \"extra-proxy\" and python_version >= \"3.10\""
files = [
{file = "grpcio_status-1.71.0-py3-none-any.whl", hash = "sha256:843934ef8c09e3e858952887467f8256aac3910c55f077a359a65b2b3cde3e68"},
{file = "grpcio_status-1.71.0.tar.gz", hash = "sha256:11405fed67b68f406b3f3c7c5ae5104a79d2d309666d10d61b152e91d28fb968"},
]
[package.dependencies]
googleapis-common-protos = ">=1.5.5"
grpcio = ">=1.71.0"
protobuf = ">=5.26.1,<6.0dev"
grpcio = ">=1.62.3"
protobuf = ">=4.21.6"
[[package]]
name = "gunicorn"
@ -1550,27 +1550,23 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2
[[package]]
name = "importlib-metadata"
version = "8.5.0"
version = "7.1.0"
description = "Read metadata from Python packages"
optional = false
python-versions = ">=3.8"
groups = ["main"]
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "importlib_metadata-8.5.0-py3-none-any.whl", hash = "sha256:45e54197d28b7a7f1559e60b95e7c567032b602131fbd588f1497f47880aa68b"},
{file = "importlib_metadata-8.5.0.tar.gz", hash = "sha256:71522656f0abace1d072b9e5481a48f07c138e00f079c38c8f883823f9c26bd7"},
{file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"},
{file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"},
]
[package.dependencies]
zipp = ">=3.20"
zipp = ">=0.5"
[package.extras]
check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1) ; sys_platform != \"cygwin\""]
cover = ["pytest-cov"]
doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
enabler = ["pytest-enabler (>=2.2)"]
docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
perf = ["ipython"]
test = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
type = ["pytest-mypy"]
testing = ["flufl.flake8", "importlib-resources (>=1.3) ; python_version < \"3.9\"", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy ; platform_python_implementation != \"PyPy\"", "pytest-perf (>=0.9.2)", "pytest-ruff (>=0.2.1)"]
[[package]]
name = "importlib-resources"
@ -2326,6 +2322,142 @@ datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
realtime = ["websockets (>=13,<16)"]
voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
[[package]]
name = "opentelemetry-api"
version = "1.25.0"
description = "OpenTelemetry Python API"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_api-1.25.0-py3-none-any.whl", hash = "sha256:757fa1aa020a0f8fa139f8959e53dec2051cc26b832e76fa839a6d76ecefd737"},
{file = "opentelemetry_api-1.25.0.tar.gz", hash = "sha256:77c4985f62f2614e42ce77ee4c9da5fa5f0bc1e1821085e9a47533a9323ae869"},
]
[package.dependencies]
deprecated = ">=1.2.6"
importlib-metadata = ">=6.0,<=7.1"
[[package]]
name = "opentelemetry-exporter-otlp"
version = "1.25.0"
description = "OpenTelemetry Collector Exporters"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_exporter_otlp-1.25.0-py3-none-any.whl", hash = "sha256:d67a831757014a3bc3174e4cd629ae1493b7ba8d189e8a007003cacb9f1a6b60"},
{file = "opentelemetry_exporter_otlp-1.25.0.tar.gz", hash = "sha256:ce03199c1680a845f82e12c0a6a8f61036048c07ec7a0bd943142aca8fa6ced0"},
]
[package.dependencies]
opentelemetry-exporter-otlp-proto-grpc = "1.25.0"
opentelemetry-exporter-otlp-proto-http = "1.25.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-common"
version = "1.25.0"
description = "OpenTelemetry Protobuf encoding"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_exporter_otlp_proto_common-1.25.0-py3-none-any.whl", hash = "sha256:15637b7d580c2675f70246563363775b4e6de947871e01d0f4e3881d1848d693"},
{file = "opentelemetry_exporter_otlp_proto_common-1.25.0.tar.gz", hash = "sha256:c93f4e30da4eee02bacd1e004eb82ce4da143a2f8e15b987a9f603e0a85407d3"},
]
[package.dependencies]
opentelemetry-proto = "1.25.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-grpc"
version = "1.25.0"
description = "OpenTelemetry Collector Protobuf over gRPC Exporter"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_exporter_otlp_proto_grpc-1.25.0-py3-none-any.whl", hash = "sha256:3131028f0c0a155a64c430ca600fd658e8e37043cb13209f0109db5c1a3e4eb4"},
{file = "opentelemetry_exporter_otlp_proto_grpc-1.25.0.tar.gz", hash = "sha256:c0b1661415acec5af87625587efa1ccab68b873745ca0ee96b69bb1042087eac"},
]
[package.dependencies]
deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
grpcio = ">=1.0.0,<2.0.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.25.0"
opentelemetry-proto = "1.25.0"
opentelemetry-sdk = ">=1.25.0,<1.26.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
version = "1.25.0"
description = "OpenTelemetry Collector Protobuf over HTTP Exporter"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_exporter_otlp_proto_http-1.25.0-py3-none-any.whl", hash = "sha256:2eca686ee11b27acd28198b3ea5e5863a53d1266b91cda47c839d95d5e0541a6"},
{file = "opentelemetry_exporter_otlp_proto_http-1.25.0.tar.gz", hash = "sha256:9f8723859e37c75183ea7afa73a3542f01d0fd274a5b97487ea24cb683d7d684"},
]
[package.dependencies]
deprecated = ">=1.2.6"
googleapis-common-protos = ">=1.52,<2.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.25.0"
opentelemetry-proto = "1.25.0"
opentelemetry-sdk = ">=1.25.0,<1.26.0"
requests = ">=2.7,<3.0"
[[package]]
name = "opentelemetry-proto"
version = "1.25.0"
description = "OpenTelemetry Python Proto"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_proto-1.25.0-py3-none-any.whl", hash = "sha256:f07e3341c78d835d9b86665903b199893befa5e98866f63d22b00d0b7ca4972f"},
{file = "opentelemetry_proto-1.25.0.tar.gz", hash = "sha256:35b6ef9dc4a9f7853ecc5006738ad40443701e52c26099e197895cbda8b815a3"},
]
[package.dependencies]
protobuf = ">=3.19,<5.0"
[[package]]
name = "opentelemetry-sdk"
version = "1.25.0"
description = "OpenTelemetry Python SDK"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_sdk-1.25.0-py3-none-any.whl", hash = "sha256:d97ff7ec4b351692e9d5a15af570c693b8715ad78b8aafbec5c7100fe966b4c9"},
{file = "opentelemetry_sdk-1.25.0.tar.gz", hash = "sha256:ce7fc319c57707ef5bf8b74fb9f8ebdb8bfafbe11898410e0d2a761d08a98ec7"},
]
[package.dependencies]
opentelemetry-api = "1.25.0"
opentelemetry-semantic-conventions = "0.46b0"
typing-extensions = ">=3.7.4"
[[package]]
name = "opentelemetry-semantic-conventions"
version = "0.46b0"
description = "OpenTelemetry Semantic Conventions"
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "opentelemetry_semantic_conventions-0.46b0-py3-none-any.whl", hash = "sha256:6daef4ef9fa51d51855d9f8e0ccd3a1bd59e0e545abe99ac6203804e36ab3e07"},
{file = "opentelemetry_semantic_conventions-0.46b0.tar.gz", hash = "sha256:fbc982ecbb6a6e90869b15c1673be90bd18c8a56ff1cffc0864e38e2edffaefa"},
]
[package.dependencies]
opentelemetry-api = "1.25.0"
[[package]]
name = "orjson"
version = "3.10.15"
@ -2668,25 +2800,25 @@ testing = ["google-api-core (>=1.31.5)"]
[[package]]
name = "protobuf"
version = "5.29.4"
version = "4.25.6"
description = ""
optional = true
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "extra == \"extra-proxy\""
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "protobuf-5.29.4-cp310-abi3-win32.whl", hash = "sha256:13eb236f8eb9ec34e63fc8b1d6efd2777d062fa6aaa68268fb67cf77f6839ad7"},
{file = "protobuf-5.29.4-cp310-abi3-win_amd64.whl", hash = "sha256:bcefcdf3976233f8a502d265eb65ea740c989bacc6c30a58290ed0e519eb4b8d"},
{file = "protobuf-5.29.4-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:307ecba1d852ec237e9ba668e087326a67564ef83e45a0189a772ede9e854dd0"},
{file = "protobuf-5.29.4-cp38-abi3-manylinux2014_aarch64.whl", hash = "sha256:aec4962f9ea93c431d5714ed1be1c93f13e1a8618e70035ba2b0564d9e633f2e"},
{file = "protobuf-5.29.4-cp38-abi3-manylinux2014_x86_64.whl", hash = "sha256:d7d3f7d1d5a66ed4942d4fefb12ac4b14a29028b209d4bfb25c68ae172059922"},
{file = "protobuf-5.29.4-cp38-cp38-win32.whl", hash = "sha256:1832f0515b62d12d8e6ffc078d7e9eb06969aa6dc13c13e1036e39d73bebc2de"},
{file = "protobuf-5.29.4-cp38-cp38-win_amd64.whl", hash = "sha256:476cb7b14914c780605a8cf62e38c2a85f8caff2e28a6a0bad827ec7d6c85d68"},
{file = "protobuf-5.29.4-cp39-cp39-win32.whl", hash = "sha256:fd32223020cb25a2cc100366f1dedc904e2d71d9322403224cdde5fdced0dabe"},
{file = "protobuf-5.29.4-cp39-cp39-win_amd64.whl", hash = "sha256:678974e1e3a9b975b8bc2447fca458db5f93a2fb6b0c8db46b6675b5b5346812"},
{file = "protobuf-5.29.4-py3-none-any.whl", hash = "sha256:3fde11b505e1597f71b875ef2fc52062b6a9740e5f7c8997ce878b6009145862"},
{file = "protobuf-5.29.4.tar.gz", hash = "sha256:4f1dfcd7997b31ef8f53ec82781ff434a28bf71d9102ddde14d076adcfc78c99"},
{file = "protobuf-4.25.6-cp310-abi3-win32.whl", hash = "sha256:61df6b5786e2b49fc0055f636c1e8f0aff263808bb724b95b164685ac1bcc13a"},
{file = "protobuf-4.25.6-cp310-abi3-win_amd64.whl", hash = "sha256:b8f837bfb77513fe0e2f263250f423217a173b6d85135be4d81e96a4653bcd3c"},
{file = "protobuf-4.25.6-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:6d4381f2417606d7e01750e2729fe6fbcda3f9883aa0c32b51d23012bded6c91"},
{file = "protobuf-4.25.6-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:5dd800da412ba7f6f26d2c08868a5023ce624e1fdb28bccca2dc957191e81fb5"},
{file = "protobuf-4.25.6-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:4434ff8bb5576f9e0c78f47c41cdf3a152c0b44de475784cd3fd170aef16205a"},
{file = "protobuf-4.25.6-cp38-cp38-win32.whl", hash = "sha256:8bad0f9e8f83c1fbfcc34e573352b17dfce7d0519512df8519994168dc015d7d"},
{file = "protobuf-4.25.6-cp38-cp38-win_amd64.whl", hash = "sha256:b6905b68cde3b8243a198268bb46fbec42b3455c88b6b02fb2529d2c306d18fc"},
{file = "protobuf-4.25.6-cp39-cp39-win32.whl", hash = "sha256:3f3b0b39db04b509859361ac9bca65a265fe9342e6b9406eda58029f5b1d10b2"},
{file = "protobuf-4.25.6-cp39-cp39-win_amd64.whl", hash = "sha256:6ef2045f89d4ad8d95fd43cd84621487832a61d15b49500e4c1350e8a0ef96be"},
{file = "protobuf-4.25.6-py3-none-any.whl", hash = "sha256:07972021c8e30b870cfc0863409d033af940213e0e7f64e27fe017b929d2c9f7"},
{file = "protobuf-4.25.6.tar.gz", hash = "sha256:f8cfbae7c5afd0d0eaccbe73267339bff605a2315860bb1ba08eb66670a9a91f"},
]
markers = {main = "extra == \"extra-proxy\""}
[[package]]
name = "pyasn1"
@ -3342,7 +3474,7 @@ version = "2.31.0"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.7"
groups = ["main"]
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
{file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
@ -4027,7 +4159,7 @@ version = "1.26.20"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
groups = ["main"]
groups = ["main", "dev", "proxy-dev"]
markers = "python_version < \"3.10\""
files = [
{file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
@ -4045,7 +4177,7 @@ version = "2.2.3"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.8"
groups = ["main", "dev"]
groups = ["main", "dev", "proxy-dev"]
markers = "python_version >= \"3.10\""
files = [
{file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"},
@ -4229,6 +4361,95 @@ files = [
{file = "websockets-13.1.tar.gz", hash = "sha256:a3b3366087c1bc0a2795111edcadddb8b3b59509d5db5d7ea3fdd69f954a8878"},
]
[[package]]
name = "wrapt"
version = "1.17.2"
description = "Module for decorators, wrappers and monkey patching."
optional = false
python-versions = ">=3.8"
groups = ["dev", "proxy-dev"]
files = [
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3d57c572081fed831ad2d26fd430d565b76aa277ed1d30ff4d40670b1c0dd984"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b5e251054542ae57ac7f3fba5d10bfff615b6c2fb09abeb37d2f1463f841ae22"},
{file = "wrapt-1.17.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:80dd7db6a7cb57ffbc279c4394246414ec99537ae81ffd702443335a61dbf3a7"},
{file = "wrapt-1.17.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a6e821770cf99cc586d33833b2ff32faebdbe886bd6322395606cf55153246c"},
{file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b60fb58b90c6d63779cb0c0c54eeb38941bae3ecf7a73c764c52c88c2dcb9d72"},
{file = "wrapt-1.17.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b870b5df5b71d8c3359d21be8f0d6c485fa0ebdb6477dda51a1ea54a9b558061"},
{file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:4011d137b9955791f9084749cba9a367c68d50ab8d11d64c50ba1688c9b457f2"},
{file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:1473400e5b2733e58b396a04eb7f35f541e1fb976d0c0724d0223dd607e0f74c"},
{file = "wrapt-1.17.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3cedbfa9c940fdad3e6e941db7138e26ce8aad38ab5fe9dcfadfed9db7a54e62"},
{file = "wrapt-1.17.2-cp310-cp310-win32.whl", hash = "sha256:582530701bff1dec6779efa00c516496968edd851fba224fbd86e46cc6b73563"},
{file = "wrapt-1.17.2-cp310-cp310-win_amd64.whl", hash = "sha256:58705da316756681ad3c9c73fd15499aa4d8c69f9fd38dc8a35e06c12468582f"},
{file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ff04ef6eec3eee8a5efef2401495967a916feaa353643defcc03fc74fe213b58"},
{file = "wrapt-1.17.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4db983e7bca53819efdbd64590ee96c9213894272c776966ca6306b73e4affda"},
{file = "wrapt-1.17.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:9abc77a4ce4c6f2a3168ff34b1da9b0f311a8f1cfd694ec96b0603dff1c79438"},
{file = "wrapt-1.17.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b929ac182f5ace000d459c59c2c9c33047e20e935f8e39371fa6e3b85d56f4a"},
{file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f09b286faeff3c750a879d336fb6d8713206fc97af3adc14def0cdd349df6000"},
{file = "wrapt-1.17.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a7ed2d9d039bd41e889f6fb9364554052ca21ce823580f6a07c4ec245c1f5d6"},
{file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:129a150f5c445165ff941fc02ee27df65940fcb8a22a61828b1853c98763a64b"},
{file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:1fb5699e4464afe5c7e65fa51d4f99e0b2eadcc176e4aa33600a3df7801d6662"},
{file = "wrapt-1.17.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:9a2bce789a5ea90e51a02dfcc39e31b7f1e662bc3317979aa7e5538e3a034f72"},
{file = "wrapt-1.17.2-cp311-cp311-win32.whl", hash = "sha256:4afd5814270fdf6380616b321fd31435a462019d834f83c8611a0ce7484c7317"},
{file = "wrapt-1.17.2-cp311-cp311-win_amd64.whl", hash = "sha256:acc130bc0375999da18e3d19e5a86403667ac0c4042a094fefb7eec8ebac7cf3"},
{file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925"},
{file = "wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392"},
{file = "wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40"},
{file = "wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d"},
{file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b"},
{file = "wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98"},
{file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82"},
{file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae"},
{file = "wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9"},
{file = "wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9"},
{file = "wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991"},
{file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:6ed6ffac43aecfe6d86ec5b74b06a5be33d5bb9243d055141e8cabb12aa08125"},
{file = "wrapt-1.17.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:35621ae4c00e056adb0009f8e86e28eb4a41a4bfa8f9bfa9fca7d343fe94f998"},
{file = "wrapt-1.17.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a604bf7a053f8362d27eb9fefd2097f82600b856d5abe996d623babd067b1ab5"},
{file = "wrapt-1.17.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5cbabee4f083b6b4cd282f5b817a867cf0b1028c54d445b7ec7cfe6505057cf8"},
{file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49703ce2ddc220df165bd2962f8e03b84c89fee2d65e1c24a7defff6f988f4d6"},
{file = "wrapt-1.17.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8112e52c5822fc4253f3901b676c55ddf288614dc7011634e2719718eaa187dc"},
{file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9fee687dce376205d9a494e9c121e27183b2a3df18037f89d69bd7b35bcf59e2"},
{file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:18983c537e04d11cf027fbb60a1e8dfd5190e2b60cc27bc0808e653e7b218d1b"},
{file = "wrapt-1.17.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:703919b1633412ab54bcf920ab388735832fdcb9f9a00ae49387f0fe67dad504"},
{file = "wrapt-1.17.2-cp313-cp313-win32.whl", hash = "sha256:abbb9e76177c35d4e8568e58650aa6926040d6a9f6f03435b7a522bf1c487f9a"},
{file = "wrapt-1.17.2-cp313-cp313-win_amd64.whl", hash = "sha256:69606d7bb691b50a4240ce6b22ebb319c1cfb164e5f6569835058196e0f3a845"},
{file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:4a721d3c943dae44f8e243b380cb645a709ba5bd35d3ad27bc2ed947e9c68192"},
{file = "wrapt-1.17.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:766d8bbefcb9e00c3ac3b000d9acc51f1b399513f44d77dfe0eb026ad7c9a19b"},
{file = "wrapt-1.17.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e496a8ce2c256da1eb98bd15803a79bee00fc351f5dfb9ea82594a3f058309e0"},
{file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d615e4fe22f4ad3528448c193b218e077656ca9ccb22ce2cb20db730f8d306"},
{file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5aaeff38654462bc4b09023918b7f21790efb807f54c000a39d41d69cf552cb"},
{file = "wrapt-1.17.2-cp313-cp313t-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a7d15bbd2bc99e92e39f49a04653062ee6085c0e18b3b7512a4f2fe91f2d681"},
{file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e3890b508a23299083e065f435a492b5435eba6e304a7114d2f919d400888cc6"},
{file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:8c8b293cd65ad716d13d8dd3624e42e5a19cc2a2f1acc74b30c2c13f15cb61a6"},
{file = "wrapt-1.17.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c82b8785d98cdd9fed4cac84d765d234ed3251bd6afe34cb7ac523cb93e8b4f"},
{file = "wrapt-1.17.2-cp313-cp313t-win32.whl", hash = "sha256:13e6afb7fe71fe7485a4550a8844cc9ffbe263c0f1a1eea569bc7091d4898555"},
{file = "wrapt-1.17.2-cp313-cp313t-win_amd64.whl", hash = "sha256:eaf675418ed6b3b31c7a989fd007fa7c3be66ce14e5c3b27336383604c9da85c"},
{file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5c803c401ea1c1c18de70a06a6f79fcc9c5acfc79133e9869e730ad7f8ad8ef9"},
{file = "wrapt-1.17.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f917c1180fdb8623c2b75a99192f4025e412597c50b2ac870f156de8fb101119"},
{file = "wrapt-1.17.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ecc840861360ba9d176d413a5489b9a0aff6d6303d7e733e2c4623cfa26904a6"},
{file = "wrapt-1.17.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb87745b2e6dc56361bfde481d5a378dc314b252a98d7dd19a651a3fa58f24a9"},
{file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58455b79ec2661c3600e65c0a716955adc2410f7383755d537584b0de41b1d8a"},
{file = "wrapt-1.17.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4e42a40a5e164cbfdb7b386c966a588b1047558a990981ace551ed7e12ca9c2"},
{file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:91bd7d1773e64019f9288b7a5101f3ae50d3d8e6b1de7edee9c2ccc1d32f0c0a"},
{file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:bb90fb8bda722a1b9d48ac1e6c38f923ea757b3baf8ebd0c82e09c5c1a0e7a04"},
{file = "wrapt-1.17.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:08e7ce672e35efa54c5024936e559469436f8b8096253404faeb54d2a878416f"},
{file = "wrapt-1.17.2-cp38-cp38-win32.whl", hash = "sha256:410a92fefd2e0e10d26210e1dfb4a876ddaf8439ef60d6434f21ef8d87efc5b7"},
{file = "wrapt-1.17.2-cp38-cp38-win_amd64.whl", hash = "sha256:95c658736ec15602da0ed73f312d410117723914a5c91a14ee4cdd72f1d790b3"},
{file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99039fa9e6306880572915728d7f6c24a86ec57b0a83f6b2491e1d8ab0235b9a"},
{file = "wrapt-1.17.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2696993ee1eebd20b8e4ee4356483c4cb696066ddc24bd70bcbb80fa56ff9061"},
{file = "wrapt-1.17.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:612dff5db80beef9e649c6d803a8d50c409082f1fedc9dbcdfde2983b2025b82"},
{file = "wrapt-1.17.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62c2caa1585c82b3f7a7ab56afef7b3602021d6da34fbc1cf234ff139fed3cd9"},
{file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c958bcfd59bacc2d0249dcfe575e71da54f9dcf4a8bdf89c4cb9a68a1170d73f"},
{file = "wrapt-1.17.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc78a84e2dfbc27afe4b2bd7c80c8db9bca75cc5b85df52bfe634596a1da846b"},
{file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:ba0f0eb61ef00ea10e00eb53a9129501f52385c44853dbd6c4ad3f403603083f"},
{file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1e1fe0e6ab7775fd842bc39e86f6dcfc4507ab0ffe206093e76d61cde37225c8"},
{file = "wrapt-1.17.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c86563182421896d73858e08e1db93afdd2b947a70064b813d515d66549e15f9"},
{file = "wrapt-1.17.2-cp39-cp39-win32.whl", hash = "sha256:f393cda562f79828f38a819f4788641ac7c4085f30f1ce1a68672baa686482bb"},
{file = "wrapt-1.17.2-cp39-cp39-win_amd64.whl", hash = "sha256:36ccae62f64235cf8ddb682073a60519426fdd4725524ae38874adf72b5f2aeb"},
{file = "wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8"},
{file = "wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3"},
]
[[package]]
name = "wsproto"
version = "1.2.0"
@ -4363,7 +4584,7 @@ version = "3.20.2"
description = "Backport of pathlib-compatible object wrapper for zip files"
optional = false
python-versions = ">=3.8"
groups = ["main"]
groups = ["main", "dev", "proxy-dev"]
files = [
{file = "zipp-3.20.2-py3-none-any.whl", hash = "sha256:a817ac80d6cf4b23bf7f2828b7cabf326f15a001bea8b1f9b49631780ba28350"},
{file = "zipp-3.20.2.tar.gz", hash = "sha256:bc9eb26f4506fda01b81bcde0ca78103b6e62f991b381fec825435c836edbc29"},
@ -4384,4 +4605,4 @@ proxy = ["PyJWT", "apscheduler", "backoff", "boto3", "cryptography", "fastapi",
[metadata]
lock-version = "2.1"
python-versions = ">=3.8.1,<4.0, !=3.9.7"
content-hash = "40074b2e47aae8ece058be9a42eda3ca0618e27e4fc9d6529793816df7adb6c8"
content-hash = "adefc5c35b625ab156ff674c880256643a22880012451d4ade7fa2ef11f5885d"

View file

@ -1,6 +1,6 @@
[tool.poetry]
name = "litellm"
version = "1.67.1"
version = "1.67.2"
description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"]
license = "MIT"
@ -107,18 +107,24 @@ types-requests = "*"
types-setuptools = "*"
types-redis = "*"
types-PyYAML = "*"
opentelemetry-api = "1.25.0"
opentelemetry-sdk = "1.25.0"
opentelemetry-exporter-otlp = "1.25.0"
[tool.poetry.group.proxy-dev.dependencies]
prisma = "0.11.0"
hypercorn = "^0.15.0"
prometheus-client = "0.20.0"
opentelemetry-api = "1.25.0"
opentelemetry-sdk = "1.25.0"
opentelemetry-exporter-otlp = "1.25.0"
[build-system]
requires = ["poetry-core", "wheel"]
build-backend = "poetry.core.masonry.api"
[tool.commitizen]
version = "1.67.1"
version = "1.67.2"
version_files = [
"pyproject.toml:^version"
]

View file

@ -0,0 +1,4 @@
{
"status": "failed",
"failedTests": []
}

View file

@ -0,0 +1,231 @@
import json
import os
import sys
from typing import Optional
# Adds the grandparent directory to sys.path to allow importing project modules
sys.path.insert(0, os.path.abspath("../.."))
import asyncio
import litellm
import pytest
from litellm.integrations.arize.arize import ArizeLogger
from litellm.integrations.custom_logger import CustomLogger
from litellm.integrations._types.open_inference import (
SpanAttributes,
MessageAttributes,
ToolCallAttributes,
)
from litellm.types.utils import Choices, StandardCallbackDynamicParams
def test_arize_set_attributes():
"""
Test setting attributes for Arize, including all custom LLM attributes.
Ensures that the correct span attributes are being added during a request.
"""
from unittest.mock import MagicMock
from litellm.types.utils import ModelResponse
span = MagicMock() # Mocked tracing span to test attribute setting
# Construct kwargs to simulate a real LLM request scenario
kwargs = {
"model": "gpt-4o",
"messages": [{"role": "user", "content": "Basic Request Content"}],
"standard_logging_object": {
"model_parameters": {"user": "test_user"},
"metadata": {"key_1": "value_1", "key_2": None},
"call_type": "completion",
},
"optional_params": {
"max_tokens": "100",
"temperature": "1",
"top_p": "5",
"stream": False,
"user": "test_user",
"tools": [
{
"function": {
"name": "get_weather",
"description": "Fetches weather details.",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name",
}
},
"required": ["location"],
},
}
}
],
"functions": [{"name": "get_weather"}, {"name": "get_stock_price"}],
},
"litellm_params": {"custom_llm_provider": "openai"},
}
# Simulated LLM response object
response_obj = ModelResponse(
usage={"total_tokens": 100, "completion_tokens": 60, "prompt_tokens": 40},
choices=[
Choices(message={"role": "assistant", "content": "Basic Response Content"})
],
model="gpt-4o",
id="chatcmpl-ID",
)
# Apply attribute setting via ArizeLogger
ArizeLogger.set_arize_attributes(span, kwargs, response_obj)
# Validate that the expected number of attributes were set
assert span.set_attribute.call_count == 28
# Metadata attached to the span
span.set_attribute.assert_any_call(
SpanAttributes.METADATA, json.dumps({"key_1": "value_1", "key_2": None})
)
# Basic LLM information
span.set_attribute.assert_any_call(SpanAttributes.LLM_MODEL_NAME, "gpt-4o")
span.set_attribute.assert_any_call("llm.request.type", "completion")
span.set_attribute.assert_any_call(SpanAttributes.LLM_PROVIDER, "openai")
# LLM generation parameters
span.set_attribute.assert_any_call("llm.request.max_tokens", "100")
span.set_attribute.assert_any_call("llm.request.temperature", "1")
span.set_attribute.assert_any_call("llm.request.top_p", "5")
# Streaming and user info
span.set_attribute.assert_any_call("llm.is_streaming", "False")
span.set_attribute.assert_any_call("llm.user", "test_user")
# Response metadata
span.set_attribute.assert_any_call("llm.response.id", "chatcmpl-ID")
span.set_attribute.assert_any_call("llm.response.model", "gpt-4o")
span.set_attribute.assert_any_call(SpanAttributes.OPENINFERENCE_SPAN_KIND, "LLM")
# Request message content and metadata
span.set_attribute.assert_any_call(
SpanAttributes.INPUT_VALUE, "Basic Request Content"
)
span.set_attribute.assert_any_call(
f"{SpanAttributes.LLM_INPUT_MESSAGES}.0.{MessageAttributes.MESSAGE_ROLE}",
"user",
)
span.set_attribute.assert_any_call(
f"{SpanAttributes.LLM_INPUT_MESSAGES}.0.{MessageAttributes.MESSAGE_CONTENT}",
"Basic Request Content",
)
# Tool call definitions and function names
span.set_attribute.assert_any_call(
f"{SpanAttributes.LLM_TOOLS}.0.{SpanAttributes.TOOL_NAME}", "get_weather"
)
span.set_attribute.assert_any_call(
f"{SpanAttributes.LLM_TOOLS}.0.{SpanAttributes.TOOL_DESCRIPTION}",
"Fetches weather details.",
)
span.set_attribute.assert_any_call(
f"{SpanAttributes.LLM_TOOLS}.0.{SpanAttributes.TOOL_PARAMETERS}",
json.dumps(
{
"type": "object",
"properties": {
"location": {"type": "string", "description": "City name"}
},
"required": ["location"],
}
),
)
# Tool calls captured from optional_params
span.set_attribute.assert_any_call(
f"{MessageAttributes.MESSAGE_TOOL_CALLS}.0.{ToolCallAttributes.TOOL_CALL_FUNCTION_NAME}",
"get_weather",
)
span.set_attribute.assert_any_call(
f"{MessageAttributes.MESSAGE_TOOL_CALLS}.1.{ToolCallAttributes.TOOL_CALL_FUNCTION_NAME}",
"get_stock_price",
)
# Invocation parameters
span.set_attribute.assert_any_call(
SpanAttributes.LLM_INVOCATION_PARAMETERS, '{"user": "test_user"}'
)
# User ID
span.set_attribute.assert_any_call(SpanAttributes.USER_ID, "test_user")
# Output message content
span.set_attribute.assert_any_call(
SpanAttributes.OUTPUT_VALUE, "Basic Response Content"
)
span.set_attribute.assert_any_call(
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.0.{MessageAttributes.MESSAGE_ROLE}",
"assistant",
)
span.set_attribute.assert_any_call(
f"{SpanAttributes.LLM_OUTPUT_MESSAGES}.0.{MessageAttributes.MESSAGE_CONTENT}",
"Basic Response Content",
)
# Token counts
span.set_attribute.assert_any_call(SpanAttributes.LLM_TOKEN_COUNT_TOTAL, 100)
span.set_attribute.assert_any_call(SpanAttributes.LLM_TOKEN_COUNT_COMPLETION, 60)
span.set_attribute.assert_any_call(SpanAttributes.LLM_TOKEN_COUNT_PROMPT, 40)
class TestArizeLogger(CustomLogger):
"""
Custom logger implementation to capture standard_callback_dynamic_params.
Used to verify that dynamic config keys are being passed to callbacks.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.standard_callback_dynamic_params: Optional[
StandardCallbackDynamicParams
] = None
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
# Capture dynamic params and print them for verification
print("logged kwargs", json.dumps(kwargs, indent=4, default=str))
self.standard_callback_dynamic_params = kwargs.get(
"standard_callback_dynamic_params"
)
@pytest.mark.asyncio
async def test_arize_dynamic_params():
"""
Test to ensure that dynamic Arize keys (API key and space key)
are received inside the callback logger at runtime.
"""
test_arize_logger = TestArizeLogger()
litellm.callbacks = [test_arize_logger]
# Perform a mocked async completion call to trigger logging
await litellm.acompletion(
model="gpt-4o",
messages=[{"role": "user", "content": "Basic Request Content"}],
mock_response="test",
arize_api_key="test_api_key_dynamic",
arize_space_key="test_space_key_dynamic",
)
# Allow for async propagation
await asyncio.sleep(2)
# Assert dynamic parameters were received in the callback
assert test_arize_logger.standard_callback_dynamic_params is not None
assert (
test_arize_logger.standard_callback_dynamic_params.get("arize_api_key")
== "test_api_key_dynamic"
)
assert (
test_arize_logger.standard_callback_dynamic_params.get("arize_space_key")
== "test_space_key_dynamic"
)

View file

@ -0,0 +1,98 @@
import os
import sys
import pytest
from unittest.mock import patch, MagicMock
sys.path.insert(0, os.path.abspath("../..")) # Adds the parent directory to the system-path
from litellm.integrations.agentops.agentops import AgentOps, AgentOpsConfig
@pytest.fixture
def mock_auth_response():
return {
"token": "test_jwt_token",
"project_id": "test_project_id"
}
@pytest.fixture
def agentops_config():
return AgentOpsConfig(
endpoint="https://otlp.agentops.cloud/v1/traces",
api_key="test_api_key",
service_name="test_service",
deployment_environment="test_env",
auth_endpoint="https://api.agentops.ai/v3/auth/token"
)
def test_agentops_config_from_env():
"""Test that AgentOpsConfig correctly reads from environment variables"""
with patch.dict(os.environ, {
"AGENTOPS_API_KEY": "test_key",
"AGENTOPS_SERVICE_NAME": "test_service",
"AGENTOPS_ENVIRONMENT": "test_env"
}):
config = AgentOpsConfig.from_env()
assert config.api_key == "test_key"
assert config.service_name == "test_service"
assert config.deployment_environment == "test_env"
assert config.endpoint == "https://otlp.agentops.cloud/v1/traces"
assert config.auth_endpoint == "https://api.agentops.ai/v3/auth/token"
def test_agentops_config_defaults():
"""Test that AgentOpsConfig uses correct default values"""
config = AgentOpsConfig()
assert config.service_name is None
assert config.deployment_environment is None
assert config.api_key is None
assert config.endpoint == "https://otlp.agentops.cloud/v1/traces"
assert config.auth_endpoint == "https://api.agentops.ai/v3/auth/token"
@patch('litellm.integrations.agentops.agentops.AgentOps._fetch_auth_token')
def test_fetch_auth_token_success(mock_fetch_auth_token, mock_auth_response):
"""Test successful JWT token fetch"""
mock_fetch_auth_token.return_value = mock_auth_response
config = AgentOpsConfig(api_key="test_key")
agentops = AgentOps(config=config)
mock_fetch_auth_token.assert_called_once_with("test_key", "https://api.agentops.ai/v3/auth/token")
assert agentops.resource_attributes.get("project.id") == mock_auth_response.get("project_id")
@patch('litellm.integrations.agentops.agentops.AgentOps._fetch_auth_token')
def test_fetch_auth_token_failure(mock_fetch_auth_token):
"""Test failed JWT token fetch"""
mock_fetch_auth_token.side_effect = Exception("Failed to fetch auth token: Unauthorized")
config = AgentOpsConfig(api_key="test_key")
agentops = AgentOps(config=config)
mock_fetch_auth_token.assert_called_once()
assert "project.id" not in agentops.resource_attributes
@patch('litellm.integrations.agentops.agentops.AgentOps._fetch_auth_token')
def test_agentops_initialization(mock_fetch_auth_token, agentops_config, mock_auth_response):
"""Test AgentOps initialization with config"""
mock_fetch_auth_token.return_value = mock_auth_response
agentops = AgentOps(config=agentops_config)
assert agentops.resource_attributes["service.name"] == "test_service"
assert agentops.resource_attributes["deployment.environment"] == "test_env"
assert agentops.resource_attributes["telemetry.sdk.name"] == "agentops"
assert agentops.resource_attributes["project.id"] == "test_project_id"
def test_agentops_initialization_no_auth():
"""Test AgentOps initialization without authentication"""
test_config = AgentOpsConfig(
endpoint="https://otlp.agentops.cloud/v1/traces",
api_key=None, # No API key
service_name="test_service",
deployment_environment="test_env"
)
agentops = AgentOps(config=test_config)
assert agentops.resource_attributes["service.name"] == "test_service"
assert agentops.resource_attributes["deployment.environment"] == "test_env"
assert agentops.resource_attributes["telemetry.sdk.name"] == "agentops"
assert "project.id" not in agentops.resource_attributes

View file

@ -203,9 +203,6 @@ class TestOpenAIResponsesAPIConfig:
result = self.config.get_complete_url(
api_base=api_base,
model=self.model,
api_key="test_api_key",
optional_params={},
litellm_params={},
)
@ -215,9 +212,6 @@ class TestOpenAIResponsesAPIConfig:
with patch("litellm.api_base", "https://litellm-api-base.example.com/v1"):
result = self.config.get_complete_url(
api_base=None,
model=self.model,
api_key="test_api_key",
optional_params={},
litellm_params={},
)
@ -231,9 +225,6 @@ class TestOpenAIResponsesAPIConfig:
):
result = self.config.get_complete_url(
api_base=None,
model=self.model,
api_key="test_api_key",
optional_params={},
litellm_params={},
)
@ -247,9 +238,6 @@ class TestOpenAIResponsesAPIConfig:
):
result = self.config.get_complete_url(
api_base=None,
model=self.model,
api_key="test_api_key",
optional_params={},
litellm_params={},
)
@ -260,9 +248,6 @@ class TestOpenAIResponsesAPIConfig:
result = self.config.get_complete_url(
api_base=api_base,
model=self.model,
api_key="test_api_key",
optional_params={},
litellm_params={},
)

View file

@ -153,3 +153,19 @@ async def test_get_users_includes_timestamps(mocker):
assert user_response.created_at == mock_user_data["created_at"]
assert user_response.updated_at == mock_user_data["updated_at"]
assert user_response.key_count == 0
def test_validate_sort_params():
"""
Test that validate_sort_params returns None if sort_by is None
"""
from litellm.proxy.management_endpoints.internal_user_endpoints import (
_validate_sort_params,
)
assert _validate_sort_params(None, "asc") is None
assert _validate_sort_params(None, "desc") is None
assert _validate_sort_params("user_id", "asc") == {"user_id": "asc"}
assert _validate_sort_params("user_id", "desc") == {"user_id": "desc"}
with pytest.raises(Exception):
_validate_sort_params("user_id", "invalid")

View file

@ -189,6 +189,90 @@ class BaseResponsesAPITest(ABC):
@pytest.mark.parametrize("sync_mode", [False, True])
@pytest.mark.asyncio
async def test_basic_openai_responses_delete_endpoint(self, sync_mode):
litellm._turn_on_debug()
litellm.set_verbose = True
base_completion_call_args = self.get_base_completion_call_args()
if sync_mode:
response = litellm.responses(
input="Basic ping", max_output_tokens=20,
**base_completion_call_args
)
# delete the response
if isinstance(response, ResponsesAPIResponse):
litellm.delete_responses(
response_id=response.id,
**base_completion_call_args
)
else:
raise ValueError("response is not a ResponsesAPIResponse")
else:
response = await litellm.aresponses(
input="Basic ping", max_output_tokens=20,
**base_completion_call_args
)
# async delete the response
if isinstance(response, ResponsesAPIResponse):
await litellm.adelete_responses(
response_id=response.id,
**base_completion_call_args
)
else:
raise ValueError("response is not a ResponsesAPIResponse")
@pytest.mark.parametrize("sync_mode", [True, False])
@pytest.mark.asyncio
async def test_basic_openai_responses_streaming_delete_endpoint(self, sync_mode):
#litellm._turn_on_debug()
#litellm.set_verbose = True
base_completion_call_args = self.get_base_completion_call_args()
response_id = None
if sync_mode:
response_id = None
response = litellm.responses(
input="Basic ping", max_output_tokens=20,
stream=True,
**base_completion_call_args
)
for event in response:
print("litellm response=", json.dumps(event, indent=4, default=str))
if "response" in event:
response_obj = event.get("response")
if response_obj is not None:
response_id = response_obj.get("id")
print("got response_id=", response_id)
# delete the response
assert response_id is not None
litellm.delete_responses(
response_id=response_id,
**base_completion_call_args
)
else:
response = await litellm.aresponses(
input="Basic ping", max_output_tokens=20,
stream=True,
**base_completion_call_args
)
async for event in response:
print("litellm response=", json.dumps(event, indent=4, default=str))
if "response" in event:
response_obj = event.get("response")
if response_obj is not None:
response_id = response_obj.get("id")
print("got response_id=", response_id)
# delete the response
assert response_id is not None
await litellm.adelete_responses(
response_id=response_id,
**base_completion_call_args
)

View file

@ -30,6 +30,12 @@ class TestAnthropicResponsesAPITest(BaseResponsesAPITest):
"model": "anthropic/claude-3-5-sonnet-latest",
}
async def test_basic_openai_responses_delete_endpoint(self, sync_mode=False):
pass
async def test_basic_openai_responses_streaming_delete_endpoint(self, sync_mode=False):
pass
def test_multiturn_tool_calls():
# Test streaming response with tools for Anthropic

View file

@ -804,7 +804,7 @@ async def test_openai_o1_pro_response_api(sync_mode):
print("Response:", json.dumps(response, indent=4, default=str))
# Check that the response has the expected structure
assert response["id"] == mock_response["id"]
assert response["id"] is not None
assert response["status"] == "incomplete"
assert response["incomplete_details"].reason == "max_output_tokens"
assert response["max_output_tokens"] == 20

View file

@ -1,111 +0,0 @@
import os
import sys
import time
from unittest.mock import Mock, patch
import json
import opentelemetry.exporter.otlp.proto.grpc.trace_exporter
from typing import Optional
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system-path
from litellm.integrations._types.open_inference import SpanAttributes
from litellm.integrations.arize.arize import ArizeConfig, ArizeLogger
from litellm.integrations.custom_logger import CustomLogger
from litellm.main import completion
import litellm
from litellm.types.utils import Choices, StandardCallbackDynamicParams
import pytest
import asyncio
def test_arize_set_attributes():
"""
Test setting attributes for Arize
"""
from unittest.mock import MagicMock
from litellm.types.utils import ModelResponse
span = MagicMock()
kwargs = {
"role": "user",
"content": "simple arize test",
"model": "gpt-4o",
"messages": [{"role": "user", "content": "basic arize test"}],
"standard_logging_object": {
"model_parameters": {"user": "test_user"},
"metadata": {"key": "value", "key2": None},
},
}
response_obj = ModelResponse(
usage={"total_tokens": 100, "completion_tokens": 60, "prompt_tokens": 40},
choices=[Choices(message={"role": "assistant", "content": "response content"})],
)
ArizeLogger.set_arize_attributes(span, kwargs, response_obj)
assert span.set_attribute.call_count == 14
span.set_attribute.assert_any_call(
SpanAttributes.METADATA, json.dumps({"key": "value", "key2": None})
)
span.set_attribute.assert_any_call(SpanAttributes.LLM_MODEL_NAME, "gpt-4o")
span.set_attribute.assert_any_call(SpanAttributes.OPENINFERENCE_SPAN_KIND, "LLM")
span.set_attribute.assert_any_call(SpanAttributes.INPUT_VALUE, "basic arize test")
span.set_attribute.assert_any_call("llm.input_messages.0.message.role", "user")
span.set_attribute.assert_any_call(
"llm.input_messages.0.message.content", "basic arize test"
)
span.set_attribute.assert_any_call(
SpanAttributes.LLM_INVOCATION_PARAMETERS, '{"user": "test_user"}'
)
span.set_attribute.assert_any_call(SpanAttributes.USER_ID, "test_user")
span.set_attribute.assert_any_call(SpanAttributes.OUTPUT_VALUE, "response content")
span.set_attribute.assert_any_call(
"llm.output_messages.0.message.role", "assistant"
)
span.set_attribute.assert_any_call(
"llm.output_messages.0.message.content", "response content"
)
span.set_attribute.assert_any_call(SpanAttributes.LLM_TOKEN_COUNT_TOTAL, 100)
span.set_attribute.assert_any_call(SpanAttributes.LLM_TOKEN_COUNT_COMPLETION, 60)
span.set_attribute.assert_any_call(SpanAttributes.LLM_TOKEN_COUNT_PROMPT, 40)
class TestArizeLogger(CustomLogger):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.standard_callback_dynamic_params: Optional[
StandardCallbackDynamicParams
] = None
async def async_log_success_event(self, kwargs, response_obj, start_time, end_time):
print("logged kwargs", json.dumps(kwargs, indent=4, default=str))
self.standard_callback_dynamic_params = kwargs.get(
"standard_callback_dynamic_params"
)
@pytest.mark.asyncio
async def test_arize_dynamic_params():
"""verify arize ai dynamic params are recieved by a callback"""
test_arize_logger = TestArizeLogger()
litellm.callbacks = [test_arize_logger]
await litellm.acompletion(
model="gpt-4o",
messages=[{"role": "user", "content": "basic arize test"}],
mock_response="test",
arize_api_key="test_api_key_dynamic",
arize_space_key="test_space_key_dynamic",
)
await asyncio.sleep(2)
assert test_arize_logger.standard_callback_dynamic_params is not None
assert (
test_arize_logger.standard_callback_dynamic_params.get("arize_api_key")
== "test_api_key_dynamic"
)
assert (
test_arize_logger.standard_callback_dynamic_params.get("arize_space_key")
== "test_space_key_dynamic"
)

View file

@ -38,6 +38,7 @@ from litellm.integrations.langfuse.langfuse_prompt_management import (
LangfusePromptManagement,
)
from litellm.integrations.azure_storage.azure_storage import AzureBlobStorageLogger
from litellm.integrations.agentops import AgentOps
from litellm.integrations.humanloop import HumanloopLogger
from litellm.proxy.hooks.dynamic_rate_limiter import _PROXY_DynamicRateLimitHandler
from unittest.mock import patch
@ -75,6 +76,7 @@ callback_class_str_to_classType = {
"pagerduty": PagerDutyAlerting,
"gcs_pubsub": GcsPubSubLogger,
"anthropic_cache_control_hook": AnthropicCacheControlHook,
"agentops": AgentOps,
}
expected_env_vars = {

View file

@ -0,0 +1,214 @@
/*
Search Users in Admin UI
E2E Test for user search functionality
Tests:
1. Navigate to Internal Users tab
2. Verify search input exists
3. Test search functionality
4. Verify results update
5. Test filtering by email, user ID, and SSO user ID
*/
import { test, expect } from "@playwright/test";
test("user search test", async ({ page }) => {
// Set a longer timeout for the entire test
test.setTimeout(60000);
// Enable console logging
page.on("console", (msg) => console.log("PAGE LOG:", msg.text()));
// Login first
await page.goto("http://localhost:4000/ui");
console.log("Navigated to login page");
// Wait for login form to be visible
await page.waitForSelector('input[name="username"]', { timeout: 10000 });
console.log("Login form is visible");
await page.fill('input[name="username"]', "admin");
await page.fill('input[name="password"]', "gm");
console.log("Filled login credentials");
const loginButton = page.locator('input[type="submit"]');
await expect(loginButton).toBeEnabled();
await loginButton.click();
console.log("Clicked login button");
// Wait for navigation to complete and dashboard to load
await page.waitForLoadState("networkidle");
console.log("Page loaded after login");
// Take a screenshot for debugging
await page.screenshot({ path: "after-login.png" });
console.log("Took screenshot after login");
// Try to find the Internal User tab with more debugging
console.log("Looking for Internal User tab...");
const internalUserTab = page.locator("span.ant-menu-title-content", {
hasText: "Internal User",
});
// Wait for the tab to be visible
await internalUserTab.waitFor({ state: "visible", timeout: 10000 });
console.log("Internal User tab is visible");
// Take another screenshot before clicking
await page.screenshot({ path: "before-tab-click.png" });
console.log("Took screenshot before tab click");
await internalUserTab.click();
console.log("Clicked Internal User tab");
// Wait for the page to load and table to be visible
await page.waitForSelector("tbody tr", { timeout: 30000 });
await page.waitForTimeout(2000); // Additional wait for table to stabilize
console.log("Table is visible");
// Take a final screenshot
await page.screenshot({ path: "after-tab-click.png" });
console.log("Took screenshot after tab click");
// Verify search input exists
const searchInput = page.locator('input[placeholder="Search by email..."]');
await expect(searchInput).toBeVisible();
console.log("Search input is visible");
// Test search functionality
const initialUserCount = await page.locator("tbody tr").count();
console.log(`Initial user count: ${initialUserCount}`);
// Perform a search
const testEmail = "test@";
await searchInput.fill(testEmail);
console.log("Filled search input");
// Wait for the debounced search to complete
await page.waitForTimeout(500);
console.log("Waited for debounce");
// Wait for the results count to update
await page.waitForFunction((initialCount) => {
const currentCount = document.querySelectorAll("tbody tr").length;
return currentCount !== initialCount;
}, initialUserCount);
console.log("Results updated");
const filteredUserCount = await page.locator("tbody tr").count();
console.log(`Filtered user count: ${filteredUserCount}`);
expect(filteredUserCount).toBeDefined();
// Clear the search
await searchInput.clear();
console.log("Cleared search");
await page.waitForTimeout(500);
console.log("Waited for debounce after clear");
await page.waitForFunction((initialCount) => {
const currentCount = document.querySelectorAll("tbody tr").length;
return currentCount === initialCount;
}, initialUserCount);
console.log("Results reset");
const resetUserCount = await page.locator("tbody tr").count();
console.log(`Reset user count: ${resetUserCount}`);
expect(resetUserCount).toBe(initialUserCount);
});
test("user filter test", async ({ page }) => {
// Set a longer timeout for the entire test
test.setTimeout(60000);
// Enable console logging
page.on("console", (msg) => console.log("PAGE LOG:", msg.text()));
// Login first
await page.goto("http://localhost:4000/ui");
console.log("Navigated to login page");
// Wait for login form to be visible
await page.waitForSelector('input[name="username"]', { timeout: 10000 });
console.log("Login form is visible");
await page.fill('input[name="username"]', "admin");
await page.fill('input[name="password"]', "gm");
console.log("Filled login credentials");
const loginButton = page.locator('input[type="submit"]');
await expect(loginButton).toBeEnabled();
await loginButton.click();
console.log("Clicked login button");
// Wait for navigation to complete and dashboard to load
await page.waitForLoadState("networkidle");
console.log("Page loaded after login");
// Navigate to Internal Users tab
const internalUserTab = page.locator("span.ant-menu-title-content", {
hasText: "Internal User",
});
await internalUserTab.waitFor({ state: "visible", timeout: 10000 });
await internalUserTab.click();
console.log("Clicked Internal User tab");
// Wait for the page to load and table to be visible
await page.waitForSelector("tbody tr", { timeout: 30000 });
await page.waitForTimeout(2000); // Additional wait for table to stabilize
console.log("Table is visible");
// Get initial user count
const initialUserCount = await page.locator("tbody tr").count();
console.log(`Initial user count: ${initialUserCount}`);
// Click the filter button to show additional filters
const filterButton = page.getByRole("button", {
name: "Filters",
exact: true,
});
await filterButton.click();
console.log("Clicked filter button");
await page.waitForTimeout(500); // Wait for filters to appear
// Test user ID filter
const userIdInput = page.locator('input[placeholder="Filter by User ID"]');
await expect(userIdInput).toBeVisible();
console.log("User ID filter is visible");
await userIdInput.fill("user");
console.log("Filled user ID filter");
await page.waitForTimeout(1000);
const userIdFilteredCount = await page.locator("tbody tr").count();
console.log(`User ID filtered count: ${userIdFilteredCount}`);
expect(userIdFilteredCount).toBeLessThan(initialUserCount);
// Clear user ID filter
await userIdInput.clear();
await page.waitForTimeout(1000);
console.log("Cleared user ID filter");
// Test SSO user ID filter
const ssoUserIdInput = page.locator('input[placeholder="Filter by SSO ID"]');
await expect(ssoUserIdInput).toBeVisible();
console.log("SSO user ID filter is visible");
await ssoUserIdInput.fill("sso");
console.log("Filled SSO user ID filter");
await page.waitForTimeout(1000);
const ssoUserIdFilteredCount = await page.locator("tbody tr").count();
console.log(`SSO user ID filtered count: ${ssoUserIdFilteredCount}`);
expect(ssoUserIdFilteredCount).toBeLessThan(initialUserCount);
// Clear SSO user ID filter
await ssoUserIdInput.clear();
await page.waitForTimeout(5000);
console.log("Cleared SSO user ID filter");
// Verify count returns to initial after clearing all filters
const finalUserCount = await page.locator("tbody tr").count();
console.log(`Final user count: ${finalUserCount}`);
expect(finalUserCount).toBe(initialUserCount);
});

View file

@ -2,45 +2,74 @@
Test view internal user page
*/
import { test, expect } from '@playwright/test';
import { test, expect } from "@playwright/test";
test('view internal user page', async ({ page }) => {
test("view internal user page", async ({ page }) => {
// Go to the specified URL
await page.goto('http://localhost:4000/ui');
await page.goto("http://localhost:4000/ui");
// Enter "admin" in the username input field
await page.fill('input[name="username"]', 'admin');
await page.fill('input[name="username"]', "admin");
// Enter "gm" in the password input field
await page.fill('input[name="password"]', 'gm');
await page.fill('input[name="password"]', "gm");
// Optionally, you can add an assertion to verify the login button is enabled
// Click the login button
const loginButton = page.locator('input[type="submit"]');
await expect(loginButton).toBeEnabled();
// Optionally, you can click the login button to submit the form
await loginButton.click();
const tabElement = page.locator('span.ant-menu-title-content', { hasText: 'Internal User' });
// Wait for the Internal User tab and click it
const tabElement = page.locator("span.ant-menu-title-content", {
hasText: "Internal User",
});
await tabElement.click();
// try to click on button
// <button class="bg-blue-500 hover:bg-blue-700 text-white font-bold py-2 px-4 rounded-l focus:outline-none" disabled="">← Prev</button>
// wait 1-2 seconds
await page.waitForTimeout(10000);
// Wait for the table to load
await page.waitForSelector("tbody tr", { timeout: 10000 });
await page.waitForTimeout(2000); // Additional wait for table to stabilize
// Test all expected fields are present
// number of keys owned by user
const keysBadges = page.locator('p.tremor-Badge-text.text-sm.whitespace-nowrap', { hasText: 'Keys' });
const keysCountArray = await keysBadges.evaluateAll(elements => elements.map(el => parseInt(el.textContent.split(' ')[0], 10)));
const keysBadges = page.locator(
"p.tremor-Badge-text.text-sm.whitespace-nowrap",
{ hasText: "Keys" }
);
const keysCountArray = await keysBadges.evaluateAll((elements) =>
elements.map((el) => {
const text = el.textContent;
return text ? parseInt(text.split(" ")[0], 10) : 0;
})
);
const hasNonZeroKeys = keysCountArray.some(count => count > 0);
const hasNonZeroKeys = keysCountArray.some((count) => count > 0);
expect(hasNonZeroKeys).toBe(true);
// test pagination
const prevButton = page.locator('button.px-3.py-1.text-sm.border.rounded-md.hover\\:bg-gray-50.disabled\\:opacity-50.disabled\\:cursor-not-allowed', { hasText: 'Previous' });
await expect(prevButton).toBeDisabled();
// Wait for pagination controls to be visible
await page.waitForSelector(".flex.justify-between.items-center", {
timeout: 5000,
});
const nextButton = page.locator('button.px-3.py-1.text-sm.border.rounded-md.hover\\:bg-gray-50.disabled\\:opacity-50.disabled\\:cursor-not-allowed', { hasText: 'Next' });
// Check if we're on the first page by looking at the results count
const resultsText =
(await page.locator(".text-sm.text-gray-700").textContent()) || "";
const isFirstPage = resultsText.includes("1 -");
if (isFirstPage) {
// On first page, previous button should be disabled
const prevButton = page.locator("button", { hasText: "Previous" });
await expect(prevButton).toBeDisabled();
}
// Next button should be enabled if there are more pages
const nextButton = page.locator("button", { hasText: "Next" });
const totalResults =
(await page.locator(".text-sm.text-gray-700").textContent()) || "";
const hasMorePages =
totalResults.includes("of") && !totalResults.includes("1 - 25 of 25");
if (hasMorePages) {
await expect(nextButton).toBeEnabled();
}
});

View file

@ -0,0 +1,82 @@
import { test, expect } from "@playwright/test";
import { loginToUI } from "../utils/login";
test.describe("User Info View", () => {
test.beforeEach(async ({ page }) => {
await loginToUI(page);
// Navigate to users page
await page.goto("http://localhost:4000/ui?page=users");
});
test("should display user info when clicking on user ID", async ({
page,
}) => {
// Wait for users table to load
await page.waitForSelector("table");
// Get the first user ID cell
const firstUserIdCell = page.locator(
"table tbody tr:first-child td:first-child"
);
const userId = await firstUserIdCell.textContent();
console.log("Found user ID:", userId);
// Click on the user ID
await firstUserIdCell.click();
// Wait for user info view to load
await page.waitForSelector('h1:has-text("User")');
console.log("User info view loaded");
// Check for tabs
await expect(page.locator('button:has-text("Overview")')).toBeVisible();
await expect(page.locator('button:has-text("Details")')).toBeVisible();
// Switch to details tab
await page.locator('button:has-text("Details")').click();
// Check details section
await expect(page.locator("text=User ID")).toBeVisible();
await expect(page.locator("text=Email")).toBeVisible();
// Go back to users list
await page.locator('button:has-text("Back to Users")').click();
// Verify we're back on the users page
await expect(page.locator('h1:has-text("Users")')).toBeVisible();
});
// test("should handle user deletion", async ({ page }) => {
// // Wait for users table to load
// await page.waitForSelector("table");
// // Get the first user ID cell
// const firstUserIdCell = page.locator(
// "table tbody tr:first-child td:first-child"
// );
// const userId = await firstUserIdCell.textContent();
// // Click on the user ID
// await firstUserIdCell.click();
// // Wait for user info view to load
// await page.waitForSelector('h1:has-text("User")');
// // Click delete button
// await page.locator('button:has-text("Delete User")').click();
// // Confirm deletion in modal
// await page.locator('button:has-text("Delete")').click();
// // Verify success message
// await expect(page.locator("text=User deleted successfully")).toBeVisible();
// // Verify we're back on the users page
// await expect(page.locator('h1:has-text("Users")')).toBeVisible();
// // Verify user is no longer in the table
// if (userId) {
// await expect(page.locator(`text=${userId}`)).not.toBeVisible();
// }
// });
});

View file

@ -0,0 +1,8 @@
{
"status": "failed",
"failedTests": [
"8306bf902634636ae770-183086b993a71bc98dd6",
"1bfc70f64c2dd4741dbb-58cd256736ebe53a2d97",
"ea1c46def20befad7a54-cb6c473c41474485b610"
]
}

View file

@ -0,0 +1,23 @@
import { Page, expect } from "@playwright/test";
export async function loginToUI(page: Page) {
// Login first
await page.goto("http://localhost:4000/ui");
console.log("Navigated to login page");
// Wait for login form to be visible
await page.waitForSelector('input[name="username"]', { timeout: 10000 });
console.log("Login form is visible");
await page.fill('input[name="username"]', "admin");
await page.fill('input[name="password"]', "gm");
console.log("Filled login credentials");
const loginButton = page.locator('input[type="submit"]');
await expect(loginButton).toBeEnabled();
await loginButton.click();
console.log("Clicked login button");
// Wait for navigation to complete
await page.waitForURL("**/*");
}

View file

@ -109,6 +109,18 @@ async def test_key_gen():
await asyncio.gather(*tasks)
@pytest.mark.asyncio
async def test_simple_key_gen():
async with aiohttp.ClientSession() as session:
key_data = await generate_key(session, i=0)
key = key_data["key"]
assert key_data["token"] is not None
assert key_data["token"] != key
assert key_data["token_id"] is not None
assert key_data["created_at"] is not None
assert key_data["updated_at"] is not None
@pytest.mark.asyncio
async def test_key_gen_bad_key():
"""

View file

@ -676,6 +676,12 @@ export const userListCall = async (
userIDs: string[] | null = null,
page: number | null = null,
page_size: number | null = null,
userEmail: string | null = null,
userRole: string | null = null,
team: string | null = null,
sso_user_id: string | null = null,
sortBy: string | null = null,
sortOrder: 'asc' | 'desc' | null = null,
) => {
/**
* Get all available teams on proxy
@ -699,6 +705,30 @@ export const userListCall = async (
queryParams.append('page_size', page_size.toString());
}
if (userEmail) {
queryParams.append('user_email', userEmail);
}
if (userRole) {
queryParams.append('role', userRole);
}
if (team) {
queryParams.append('team', team);
}
if (sso_user_id) {
queryParams.append('sso_user_ids', sso_user_id);
}
if (sortBy) {
queryParams.append('sort_by', sortBy);
}
if (sortOrder) {
queryParams.append('sort_order', sortOrder);
}
const queryString = queryParams.toString();
if (queryString) {
url += `?${queryString}`;
@ -736,8 +766,10 @@ export const userInfoCall = async (
userRole: String,
viewAll: Boolean = false,
page: number | null,
page_size: number | null
page_size: number | null,
lookup_user_id: boolean = false
) => {
console.log(`userInfoCall: ${userID}, ${userRole}, ${viewAll}, ${page}, ${page_size}, ${lookup_user_id}`)
try {
let url: string;
@ -751,7 +783,7 @@ export const userInfoCall = async (
} else {
// Use /user/info endpoint for individual user info
url = proxyBaseUrl ? `${proxyBaseUrl}/user/info` : `/user/info`;
if (userRole === "Admin" || userRole === "Admin Viewer") {
if ((userRole === "Admin" || userRole === "Admin Viewer") && !lookup_user_id) {
// do nothing
} else if (userID) {
url += `?user_id=${userID}`;

View file

@ -1,4 +1,4 @@
import React, { useState, useEffect } from "react";
import React, { useState, useEffect, useCallback, useRef } from "react";
import {
Card,
Title,
@ -23,6 +23,7 @@ import {
DialogPanel,
Icon,
TextInput,
NumberInput,
} from "@tremor/react";
import { message } from "antd";
@ -32,6 +33,7 @@ import {
userInfoCall,
userUpdateUserCall,
getPossibleUserRoles,
userListCall,
} from "./networking";
import { Badge, BadgeDelta, Button } from "@tremor/react";
import RequestAccess from "./request_model_access";
@ -50,6 +52,7 @@ import { UserDataTable } from "./view_users/table";
import { UserInfo } from "./view_users/types";
import BulkCreateUsers from "./bulk_create_users_button";
import SSOSettings from "./SSOSettings";
import debounce from "lodash/debounce";
interface ViewUserDashboardProps {
accessToken: string | null;
@ -77,6 +80,19 @@ interface CreateuserProps {
onUserCreated: () => Promise<void>;
}
interface FilterState {
email: string;
user_id: string;
user_role: string;
sso_user_id: string;
team: string;
model: string;
min_spend: number | null;
max_spend: number | null;
sort_by: string;
sort_order: 'asc' | 'desc';
}
const isLocal = process.env.NODE_ENV === "development";
const proxyBaseUrl = isLocal ? "http://localhost:4000" : null;
if (isLocal != true) {
@ -93,7 +109,6 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
setKeys,
}) => {
const [userListResponse, setUserListResponse] = useState<UserListResponse | null>(null);
const [userData, setUserData] = useState<null | any[]>(null);
const [endUsers, setEndUsers] = useState<null | any[]>(null);
const [currentPage, setCurrentPage] = useState(1);
const [openDialogId, setOpenDialogId] = React.useState<null | number>(null);
@ -108,6 +123,23 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
const defaultPageSize = 25;
const [searchTerm, setSearchTerm] = useState("");
const [activeTab, setActiveTab] = useState("users");
const [filters, setFilters] = useState<FilterState>({
email: "",
user_id: "",
user_role: "",
sso_user_id: "",
team: "",
model: "",
min_spend: null,
max_spend: null,
sort_by: "created_at",
sort_order: "desc"
});
const [showFilters, setShowFilters] = useState(false);
const [showColumnDropdown, setShowColumnDropdown] = useState(false);
const [selectedFilter, setSelectedFilter] = useState("Email");
const filtersRef = useRef(null);
const lastSearchTimestamp = useRef(0);
// check if window is not undefined
if (typeof window !== "undefined") {
@ -122,15 +154,88 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
setIsDeleteModalOpen(true);
};
const handleFilterChange = (key: keyof FilterState, value: string | number | null) => {
const newFilters = { ...filters, [key]: value };
setFilters(newFilters);
console.log("called from handleFilterChange - newFilters:", JSON.stringify(newFilters));
debouncedSearch(newFilters);
};
const handleSortChange = (sortBy: string, sortOrder: 'asc' | 'desc') => {
const newFilters = {
...filters,
sort_by: sortBy,
sort_order: sortOrder
};
setFilters(newFilters);
debouncedSearch(newFilters);
};
// Create a debounced version of the search function
const debouncedSearch = useCallback(
debounce(async (filters: FilterState) => {
if (!accessToken || !token || !userRole || !userID) {
return;
}
const currentTimestamp = Date.now();
lastSearchTimestamp.current = currentTimestamp;
try {
// Make the API call using userListCall with all filter parameters
const data = await userListCall(
accessToken,
filters.user_id ? [filters.user_id] : null,
1, // Reset to first page when searching
defaultPageSize,
filters.email || null,
filters.user_role || null,
filters.team || null,
filters.sso_user_id || null,
filters.sort_by,
filters.sort_order
);
// Only update state if this is the most recent search
if (currentTimestamp === lastSearchTimestamp.current) {
if (data) {
setUserListResponse(data);
console.log("called from debouncedSearch filters:", JSON.stringify(filters));
console.log("called from debouncedSearch data:", JSON.stringify(data));
}
}
} catch (error) {
console.error("Error searching users:", error);
}
}, 300),
[accessToken, token, userRole, userID]
);
// Cleanup the debounced function on component unmount
useEffect(() => {
return () => {
debouncedSearch.cancel();
};
}, [debouncedSearch]);
const handleSearch = (value: string) => {
setSearchTerm(value);
if (value === "") {
refreshUserData(); // Reset to original data when search is cleared
} else {
debouncedSearch(filters);
}
};
const confirmDelete = async () => {
if (userToDelete && accessToken) {
try {
await userDeleteCall(accessToken, [userToDelete]);
message.success("User deleted successfully");
// Update the user list after deletion
if (userData) {
const updatedUserData = userData.filter(user => user.user_id !== userToDelete);
setUserData(updatedUserData);
if (userListResponse) {
const updatedUserData = userListResponse.users?.filter(user => user.user_id !== userToDelete);
setUserListResponse({ ...userListResponse, users: updatedUserData || [] });
}
} catch (error) {
console.error("Error deleting user:", error);
@ -164,11 +269,11 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
} catch (error) {
console.error("There was an error updating the user", error);
}
if (userData) {
const updatedUserData = userData.map((user) =>
if (userListResponse) {
const updatedUserData = userListResponse.users?.map((user) =>
user.user_id === editedUser.user_id ? editedUser : user
);
setUserData(updatedUserData);
setUserListResponse({ ...userListResponse, users: updatedUserData || [] });
}
setSelectedUser(null);
setEditModalVisible(false);
@ -176,6 +281,7 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
};
const refreshUserData = async () => {
console.log("called from refreshUserData");
if (!accessToken || !token || !userRole || !userID) {
return;
}
@ -195,14 +301,45 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
`userList_${currentPage}`,
JSON.stringify(userDataResponse)
);
console.log("called from refreshUserData");
setUserListResponse(userDataResponse);
setUserData(userDataResponse.users || []);
} catch (error) {
console.error("Error refreshing user data:", error);
}
};
const handlePageChange = async (newPage: number) => {
if (!accessToken || !token || !userRole || !userID) {
return;
}
try {
const userDataResponse = await userListCall(
accessToken,
filters.user_id ? [filters.user_id] : null,
newPage,
defaultPageSize,
filters.email || null,
filters.user_role || null,
filters.team || null,
filters.sso_user_id || null,
filters.sort_by,
filters.sort_order
);
// Update session storage with new data
sessionStorage.setItem(
`userList_${newPage}`,
JSON.stringify(userDataResponse)
);
setUserListResponse(userDataResponse);
setCurrentPage(newPage);
} catch (error) {
console.error("Error changing page:", error);
}
};
useEffect(() => {
if (!accessToken || !token || !userRole || !userID) {
return;
@ -214,16 +351,20 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
if (cachedUserData) {
const parsedData = JSON.parse(cachedUserData);
setUserListResponse(parsedData);
setUserData(parsedData.users || []);
console.log("called from useEffect");
} else {
// Fetch from API if not in cache
const userDataResponse = await userInfoCall(
// Fetch from API using userListCall with current filters
const userDataResponse = await userListCall(
accessToken,
null,
userRole,
true,
filters.user_id ? [filters.user_id] : null,
currentPage,
defaultPageSize
defaultPageSize,
filters.email || null,
filters.user_role || null,
filters.team || null,
filters.sso_user_id || null,
filters.sort_by,
filters.sort_order
);
// Store in session storage
@ -233,7 +374,7 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
);
setUserListResponse(userDataResponse);
setUserData(userDataResponse.users || []);
console.log("called from useEffect 2");
}
// Fetch roles if not cached
@ -254,9 +395,9 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
fetchData();
}
}, [accessToken, token, userRole, userID, currentPage]);
}, [accessToken, token, userRole, userID]);
if (!userData) {
if (!userListResponse) {
return <div>Loading...</div>;
}
@ -297,8 +438,164 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
<TabPanel>
<div className="bg-white rounded-lg shadow">
<div className="border-b px-6 py-4">
<div className="flex flex-col md:flex-row items-start md:items-center justify-between space-y-4 md:space-y-0">
<div className="flex items-center space-x-4">
<div className="flex flex-col space-y-4">
{/* Search and Filter Controls */}
<div className="flex flex-wrap items-center gap-3">
{/* Email Search */}
<div className="relative w-64">
<input
type="text"
placeholder="Search by email..."
className="w-full px-3 py-2 pl-8 border rounded-md text-sm focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
value={filters.email}
onChange={(e) => handleFilterChange('email', e.target.value)}
/>
<svg
className="absolute left-2.5 top-2.5 h-4 w-4 text-gray-500"
fill="none"
stroke="currentColor"
viewBox="0 0 24 24"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={2}
d="M21 21l-6-6m2-5a7 7 0 11-14 0 7 7 0 0114 0z"
/>
</svg>
</div>
{/* Filter Button */}
<button
className={`px-3 py-2 text-sm border rounded-md hover:bg-gray-50 flex items-center gap-2 ${showFilters ? 'bg-gray-100' : ''}`}
onClick={() => setShowFilters(!showFilters)}
>
<svg
className="w-4 h-4"
fill="none"
stroke="currentColor"
viewBox="0 0 24 24"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={2}
d="M3 4a1 1 0 011-1h16a1 1 0 011 1v2.586a1 1 0 01-.293.707l-6.414 6.414a1 1 0 00-.293.707V17l-4 4v-6.586a1 1 0 00-.293-.707L3.293 7.293A1 1 0 013 6.586V4z"
/>
</svg>
Filters
{(filters.user_id || filters.user_role || filters.team) && (
<span className="w-2 h-2 rounded-full bg-blue-500"></span>
)}
</button>
{/* Reset Filters Button */}
<button
className="px-3 py-2 text-sm border rounded-md hover:bg-gray-50 flex items-center gap-2"
onClick={() => {
setFilters({
email: "",
user_id: "",
user_role: "",
team: "",
sso_user_id: "",
model: "",
min_spend: null,
max_spend: null,
sort_by: "created_at",
sort_order: "desc"
});
}}
>
<svg
className="w-4 h-4"
fill="none"
stroke="currentColor"
viewBox="0 0 24 24"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={2}
d="M4 4v5h.582m15.356 2A8.001 8.001 0 004.582 9m0 0H9m11 11v-5h-.581m0 0a8.003 8.003 0 01-15.357-2m15.357 2H15"
/>
</svg>
Reset Filters
</button>
</div>
{/* Additional Filters */}
{showFilters && (
<div className="flex flex-wrap items-center gap-3 mt-3">
{/* User ID Search */}
<div className="relative w-64">
<input
type="text"
placeholder="Filter by User ID"
className="w-full px-3 py-2 pl-8 border rounded-md text-sm focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
value={filters.user_id}
onChange={(e) => handleFilterChange('user_id', e.target.value)}
/>
<svg
className="absolute left-2.5 top-2.5 h-4 w-4 text-gray-500"
fill="none"
stroke="currentColor"
viewBox="0 0 24 24"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
strokeWidth={2}
d="M5.121 17.804A13.937 13.937 0 0112 16c2.5 0 4.847.655 6.879 1.804M15 10a3 3 0 11-6 0 3 3 0 016 0zm6 2a9 9 0 11-18 0 9 9 0 0118 0z"
/>
</svg>
</div>
{/* Role Dropdown */}
<div className="w-64">
<Select
value={filters.user_role}
onValueChange={(value) => handleFilterChange('user_role', value)}
placeholder="Select Role"
>
{Object.entries(possibleUIRoles).map(([key, value]) => (
<SelectItem key={key} value={key}>
{value.ui_label}
</SelectItem>
))}
</Select>
</div>
{/* Team Dropdown */}
<div className="w-64">
<Select
value={filters.team}
onValueChange={(value) => handleFilterChange('team', value)}
placeholder="Select Team"
>
{teams?.map((team) => (
<SelectItem key={team.team_id} value={team.team_id}>
{team.team_alias || team.team_id}
</SelectItem>
))}
</Select>
</div>
{/* SSO ID Search */}
<div className="relative w-64">
<input
type="text"
placeholder="Filter by SSO ID"
className="w-full px-3 py-2 pl-8 border rounded-md text-sm focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-blue-500"
value={filters.sso_user_id}
onChange={(e) => handleFilterChange('sso_user_id', e.target.value)}
/>
</div>
</div>
)}
{/* Results Count and Pagination */}
<div className="flex justify-between items-center">
<span className="text-sm text-gray-700">
Showing{" "}
{userListResponse && userListResponse.users && userListResponse.users.length > 0
@ -313,25 +610,28 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
: 0}{" "}
of {userListResponse ? userListResponse.total : 0} results
</span>
<div className="flex items-center space-x-2">
{/* Pagination Buttons */}
<div className="flex space-x-2">
<button
onClick={() => setCurrentPage((p) => Math.max(1, p - 1))}
disabled={!userListResponse || currentPage <= 1}
className="px-3 py-1 text-sm border rounded-md hover:bg-gray-50 disabled:opacity-50 disabled:cursor-not-allowed"
onClick={() => handlePageChange(currentPage - 1)}
disabled={currentPage === 1}
className={`px-3 py-1 text-sm border rounded-md ${
currentPage === 1
? 'bg-gray-100 text-gray-400 cursor-not-allowed'
: 'hover:bg-gray-50'
}`}
>
Previous
</button>
<span className="text-sm text-gray-700">
Page {userListResponse ? userListResponse.page : "-"} of{" "}
{userListResponse ? userListResponse.total_pages : "-"}
</span>
<button
onClick={() => setCurrentPage((p) => p + 1)}
disabled={
!userListResponse ||
currentPage >= userListResponse.total_pages
}
className="px-3 py-1 text-sm border rounded-md hover:bg-gray-50 disabled:opacity-50 disabled:cursor-not-allowed"
onClick={() => handlePageChange(currentPage + 1)}
disabled={!userListResponse || currentPage >= userListResponse.total_pages}
className={`px-3 py-1 text-sm border rounded-md ${
!userListResponse || currentPage >= userListResponse.total_pages
? 'bg-gray-100 text-gray-400 cursor-not-allowed'
: 'hover:bg-gray-50'
}`}
>
Next
</button>
@ -339,10 +639,18 @@ const ViewUserDashboard: React.FC<ViewUserDashboardProps> = ({
</div>
</div>
</div>
<UserDataTable
data={userData || []}
data={userListResponse?.users || []}
columns={tableColumns}
isLoading={!userData}
isLoading={!userListResponse}
accessToken={accessToken}
userRole={userRole}
onSortChange={handleSortChange}
currentSort={{
sortBy: filters.sort_by,
sortOrder: filters.sort_order
}}
/>
</div>
</TabPanel>

View file

@ -18,21 +18,37 @@ import {
} from "@tremor/react";
import { SwitchVerticalIcon, ChevronUpIcon, ChevronDownIcon } from "@heroicons/react/outline";
import { UserInfo } from "./types";
import UserInfoView from "./user_info_view";
interface UserDataTableProps {
data: UserInfo[];
columns: ColumnDef<UserInfo, any>[];
isLoading?: boolean;
onSortChange?: (sortBy: string, sortOrder: 'asc' | 'desc') => void;
currentSort?: {
sortBy: string;
sortOrder: 'asc' | 'desc';
};
accessToken: string | null;
userRole: string | null;
}
export function UserDataTable({
data = [],
columns,
isLoading = false,
onSortChange,
currentSort,
accessToken,
userRole,
}: UserDataTableProps) {
const [sorting, setSorting] = React.useState<SortingState>([
{ id: "created_at", desc: true }
{
id: currentSort?.sortBy || "created_at",
desc: currentSort?.sortOrder === "desc"
}
]);
const [selectedUserId, setSelectedUserId] = React.useState<string | null>(null);
const table = useReactTable({
data,
@ -40,12 +56,49 @@ export function UserDataTable({
state: {
sorting,
},
onSortingChange: setSorting,
onSortingChange: (newSorting: any) => {
setSorting(newSorting);
if (newSorting.length > 0) {
const sortState = newSorting[0];
const sortBy = sortState.id;
const sortOrder = sortState.desc ? 'desc' : 'asc';
onSortChange?.(sortBy, sortOrder);
}
},
getCoreRowModel: getCoreRowModel(),
getSortedRowModel: getSortedRowModel(),
enableSorting: true,
});
const handleUserClick = (userId: string) => {
setSelectedUserId(userId);
};
const handleCloseUserInfo = () => {
setSelectedUserId(null);
};
// Update local sorting state when currentSort prop changes
React.useEffect(() => {
if (currentSort) {
setSorting([{
id: currentSort.sortBy,
desc: currentSort.sortOrder === 'desc'
}]);
}
}, [currentSort]);
if (selectedUserId) {
return (
<UserInfoView
userId={selectedUserId}
onClose={handleCloseUserInfo}
accessToken={accessToken}
userRole={userRole}
/>
);
}
return (
<div className="rounded-lg custom-border relative">
<div className="overflow-x-auto">
@ -110,6 +163,15 @@ export function UserDataTable({
? 'sticky right-0 bg-white shadow-[-4px_0_8px_-6px_rgba(0,0,0,0.1)]'
: ''
}`}
onClick={() => {
if (cell.column.id === 'user_id') {
handleUserClick(cell.getValue() as string);
}
}}
style={{
cursor: cell.column.id === 'user_id' ? 'pointer' : 'default',
color: cell.column.id === 'user_id' ? '#3b82f6' : 'inherit',
}}
>
{flexRender(cell.column.columnDef.cell, cell.getContext())}
</TableCell>

View file

@ -0,0 +1,297 @@
import React, { useState } from "react";
import {
Card,
Text,
Button,
Grid,
Col,
Tab,
TabList,
TabGroup,
TabPanel,
TabPanels,
Title,
Badge,
} from "@tremor/react";
import { ArrowLeftIcon, TrashIcon } from "@heroicons/react/outline";
import { userInfoCall, userDeleteCall } from "../networking";
import { message } from "antd";
import { rolesWithWriteAccess } from '../../utils/roles';
interface UserInfoViewProps {
userId: string;
onClose: () => void;
accessToken: string | null;
userRole: string | null;
onDelete?: () => void;
}
interface UserInfo {
user_id: string;
user_info: {
user_email: string | null;
user_role: string | null;
teams: any[] | null;
models: string[] | null;
max_budget: number | null;
spend: number | null;
metadata: Record<string, any> | null;
created_at: string | null;
updated_at: string | null;
};
keys: any[] | null;
teams: any[] | null;
}
export default function UserInfoView({ userId, onClose, accessToken, userRole, onDelete }: UserInfoViewProps) {
const [userData, setUserData] = useState<UserInfo | null>(null);
const [isDeleteModalOpen, setIsDeleteModalOpen] = useState(false);
const [isLoading, setIsLoading] = useState(true);
React.useEffect(() => {
console.log(`userId: ${userId}, userRole: ${userRole}, accessToken: ${accessToken}`)
const fetchUserData = async () => {
try {
if (!accessToken) return;
const data = await userInfoCall(accessToken, userId, userRole || "", false, null, null, true);
setUserData(data);
} catch (error) {
console.error("Error fetching user data:", error);
message.error("Failed to fetch user data");
} finally {
setIsLoading(false);
}
};
fetchUserData();
}, [accessToken, userId, userRole]);
const handleDelete = async () => {
try {
if (!accessToken) return;
await userDeleteCall(accessToken, [userId]);
message.success("User deleted successfully");
if (onDelete) {
onDelete();
}
onClose();
} catch (error) {
console.error("Error deleting user:", error);
message.error("Failed to delete user");
}
};
if (isLoading) {
return (
<div className="p-4">
<Button
icon={ArrowLeftIcon}
variant="light"
onClick={onClose}
className="mb-4"
>
Back to Users
</Button>
<Text>Loading user data...</Text>
</div>
);
}
if (!userData) {
return (
<div className="p-4">
<Button
icon={ArrowLeftIcon}
variant="light"
onClick={onClose}
className="mb-4"
>
Back to Users
</Button>
<Text>User not found</Text>
</div>
);
}
return (
<div className="p-4">
<div className="flex justify-between items-center mb-6">
<div>
<Button
icon={ArrowLeftIcon}
variant="light"
onClick={onClose}
className="mb-4"
>
Back to Users
</Button>
<Title>{userData.user_info?.user_email || "User"}</Title>
<Text className="text-gray-500 font-mono">{userData.user_id}</Text>
</div>
{userRole && rolesWithWriteAccess.includes(userRole) && (
<Button
icon={TrashIcon}
variant="secondary"
onClick={() => setIsDeleteModalOpen(true)}
className="flex items-center"
>
Delete User
</Button>
)}
</div>
{/* Delete Confirmation Modal */}
{isDeleteModalOpen && (
<div className="fixed z-10 inset-0 overflow-y-auto">
<div className="flex items-end justify-center min-h-screen pt-4 px-4 pb-20 text-center sm:block sm:p-0">
<div className="fixed inset-0 transition-opacity" aria-hidden="true">
<div className="absolute inset-0 bg-gray-500 opacity-75"></div>
</div>
<span className="hidden sm:inline-block sm:align-middle sm:h-screen" aria-hidden="true">&#8203;</span>
<div className="inline-block align-bottom bg-white rounded-lg text-left overflow-hidden shadow-xl transform transition-all sm:my-8 sm:align-middle sm:max-w-lg sm:w-full">
<div className="bg-white px-4 pt-5 pb-4 sm:p-6 sm:pb-4">
<div className="sm:flex sm:items-start">
<div className="mt-3 text-center sm:mt-0 sm:ml-4 sm:text-left">
<h3 className="text-lg leading-6 font-medium text-gray-900">
Delete User
</h3>
<div className="mt-2">
<p className="text-sm text-gray-500">
Are you sure you want to delete this user?
</p>
</div>
</div>
</div>
</div>
<div className="bg-gray-50 px-4 py-3 sm:px-6 sm:flex sm:flex-row-reverse">
<Button
onClick={handleDelete}
color="red"
className="ml-2"
>
Delete
</Button>
<Button onClick={() => setIsDeleteModalOpen(false)}>
Cancel
</Button>
</div>
</div>
</div>
</div>
)}
<TabGroup>
<TabList className="mb-4">
<Tab>Overview</Tab>
<Tab>Details</Tab>
</TabList>
<TabPanels>
{/* Overview Panel */}
<TabPanel>
<Grid numItems={1} numItemsSm={2} numItemsLg={3} className="gap-6">
<Card>
<Text>Spend</Text>
<div className="mt-2">
<Title>${Number(userData.user_info?.spend || 0).toFixed(4)}</Title>
<Text>of {userData.user_info?.max_budget !== null ? `$${userData.user_info.max_budget}` : "Unlimited"}</Text>
</div>
</Card>
<Card>
<Text>Teams</Text>
<div className="mt-2">
<Text>{userData.teams?.length || 0} teams</Text>
</div>
</Card>
<Card>
<Text>API Keys</Text>
<div className="mt-2">
<Text>{userData.keys?.length || 0} keys</Text>
</div>
</Card>
</Grid>
</TabPanel>
{/* Details Panel */}
<TabPanel>
<Card>
<div className="space-y-4">
<div>
<Text className="font-medium">User ID</Text>
<Text className="font-mono">{userData.user_id}</Text>
</div>
<div>
<Text className="font-medium">Email</Text>
<Text>{userData.user_info?.user_email || "Not Set"}</Text>
</div>
<div>
<Text className="font-medium">Role</Text>
<Text>{userData.user_info?.user_role || "Not Set"}</Text>
</div>
<div>
<Text className="font-medium">Created</Text>
<Text>{userData.user_info?.created_at ? new Date(userData.user_info.created_at).toLocaleString() : "Unknown"}</Text>
</div>
<div>
<Text className="font-medium">Last Updated</Text>
<Text>{userData.user_info?.updated_at ? new Date(userData.user_info.updated_at).toLocaleString() : "Unknown"}</Text>
</div>
<div>
<Text className="font-medium">Teams</Text>
<div className="flex flex-wrap gap-2 mt-1">
{userData.teams?.length && userData.teams?.length > 0 ? (
userData.teams?.map((team, index) => (
<span
key={index}
className="px-2 py-1 bg-blue-100 rounded text-xs"
>
{team.team_alias || team.team_id}
</span>
))
) : (
<Text>No teams</Text>
)}
</div>
</div>
<div>
<Text className="font-medium">API Keys</Text>
<div className="flex flex-wrap gap-2 mt-1">
{userData.keys?.length && userData.keys?.length > 0 ? (
userData.keys.map((key, index) => (
<span
key={index}
className="px-2 py-1 bg-green-100 rounded text-xs"
>
{key.key_alias || key.token}
</span>
))
) : (
<Text>No API keys</Text>
)}
</div>
</div>
<div>
<Text className="font-medium">Metadata</Text>
<pre className="bg-gray-100 p-2 rounded text-xs overflow-auto mt-1">
{JSON.stringify(userData.user_info?.metadata || {}, null, 2)}
</pre>
</div>
</div>
</Card>
</TabPanel>
</TabPanels>
</TabGroup>
</div>
);
}