forked from phoenix/litellm-mirror
Merge branch 'main' into litellm_dev_11_22_2024
This commit is contained in:
commit
4021206ac2
26 changed files with 845 additions and 146 deletions
|
@ -625,6 +625,48 @@ jobs:
|
|||
paths:
|
||||
- llm_translation_coverage.xml
|
||||
- llm_translation_coverage
|
||||
pass_through_unit_testing:
|
||||
docker:
|
||||
- image: cimg/python:3.11
|
||||
auth:
|
||||
username: ${DOCKERHUB_USERNAME}
|
||||
password: ${DOCKERHUB_PASSWORD}
|
||||
working_directory: ~/project
|
||||
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Install Dependencies
|
||||
command: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install -r requirements.txt
|
||||
pip install "pytest==7.3.1"
|
||||
pip install "pytest-retry==1.6.3"
|
||||
pip install "pytest-cov==5.0.0"
|
||||
pip install "pytest-asyncio==0.21.1"
|
||||
pip install "respx==0.21.1"
|
||||
# Run pytest and generate JUnit XML report
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
pwd
|
||||
ls
|
||||
python -m pytest -vv tests/pass_through_unit_tests --cov=litellm --cov-report=xml -x -s -v --junitxml=test-results/junit.xml --durations=5
|
||||
no_output_timeout: 120m
|
||||
- run:
|
||||
name: Rename the coverage files
|
||||
command: |
|
||||
mv coverage.xml pass_through_unit_tests_coverage.xml
|
||||
mv .coverage pass_through_unit_tests_coverage
|
||||
|
||||
# Store test results
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- pass_through_unit_tests_coverage.xml
|
||||
- pass_through_unit_tests_coverage
|
||||
image_gen_testing:
|
||||
docker:
|
||||
- image: cimg/python:3.11
|
||||
|
@ -923,7 +965,7 @@ jobs:
|
|||
command: |
|
||||
pwd
|
||||
ls
|
||||
python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/image_gen_tests
|
||||
python -m pytest -s -vv tests/*.py -x --junitxml=test-results/junit.xml --durations=5 --ignore=tests/otel_tests --ignore=tests/pass_through_tests --ignore=tests/proxy_admin_ui_tests --ignore=tests/load_tests --ignore=tests/llm_translation --ignore=tests/image_gen_tests --ignore=tests/pass_through_unit_tests
|
||||
no_output_timeout: 120m
|
||||
|
||||
# Store test results
|
||||
|
@ -1137,6 +1179,7 @@ jobs:
|
|||
pip install "PyGithub==1.59.1"
|
||||
pip install "google-cloud-aiplatform==1.59.0"
|
||||
pip install anthropic
|
||||
# Run pytest and generate JUnit XML report
|
||||
- run:
|
||||
name: Build Docker image
|
||||
command: docker build -t my-app:latest -f ./docker/Dockerfile.database .
|
||||
|
@ -1172,6 +1215,26 @@ jobs:
|
|||
- run:
|
||||
name: Wait for app to be ready
|
||||
command: dockerize -wait http://localhost:4000 -timeout 5m
|
||||
# New steps to run Node.js test
|
||||
- run:
|
||||
name: Install Node.js
|
||||
command: |
|
||||
curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash -
|
||||
sudo apt-get install -y nodejs
|
||||
node --version
|
||||
npm --version
|
||||
|
||||
- run:
|
||||
name: Install Node.js dependencies
|
||||
command: |
|
||||
npm install @google-cloud/vertexai
|
||||
npm install --save-dev jest
|
||||
|
||||
- run:
|
||||
name: Run Vertex AI tests
|
||||
command: |
|
||||
npx jest tests/pass_through_tests/test_vertex.test.js --verbose
|
||||
no_output_timeout: 30m
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
|
@ -1179,7 +1242,6 @@ jobs:
|
|||
ls
|
||||
python -m pytest -vv tests/pass_through_tests/ -x --junitxml=test-results/junit.xml --durations=5
|
||||
no_output_timeout: 120m
|
||||
|
||||
# Store test results
|
||||
- store_test_results:
|
||||
path: test-results
|
||||
|
@ -1205,7 +1267,7 @@ jobs:
|
|||
python -m venv venv
|
||||
. venv/bin/activate
|
||||
pip install coverage
|
||||
coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage
|
||||
coverage combine llm_translation_coverage logging_coverage litellm_router_coverage local_testing_coverage litellm_assistants_api_coverage auth_ui_unit_tests_coverage langfuse_coverage caching_coverage litellm_proxy_unit_tests_coverage image_gen_coverage pass_through_unit_tests_coverage
|
||||
coverage xml
|
||||
- codecov/upload:
|
||||
file: ./coverage.xml
|
||||
|
@ -1494,6 +1556,12 @@ workflows:
|
|||
only:
|
||||
- main
|
||||
- /litellm_.*/
|
||||
- pass_through_unit_testing:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- main
|
||||
- /litellm_.*/
|
||||
- image_gen_testing:
|
||||
filters:
|
||||
branches:
|
||||
|
@ -1509,6 +1577,7 @@ workflows:
|
|||
- upload-coverage:
|
||||
requires:
|
||||
- llm_translation_testing
|
||||
- pass_through_unit_testing
|
||||
- image_gen_testing
|
||||
- logging_testing
|
||||
- litellm_router_testing
|
||||
|
@ -1549,6 +1618,7 @@ workflows:
|
|||
- load_testing
|
||||
- test_bad_database_url
|
||||
- llm_translation_testing
|
||||
- pass_through_unit_testing
|
||||
- image_gen_testing
|
||||
- logging_testing
|
||||
- litellm_router_testing
|
||||
|
|
|
@ -12,6 +12,71 @@ Looking for the Unified API (OpenAI format) for VertexAI ? [Go here - using vert
|
|||
|
||||
:::
|
||||
|
||||
Pass-through endpoints for Vertex AI - call provider-specific endpoint, in native format (no translation).
|
||||
|
||||
Just replace `https://REGION-aiplatform.googleapis.com` with `LITELLM_PROXY_BASE_URL/vertex-ai`
|
||||
|
||||
|
||||
#### **Example Usage**
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="curl" label="curl">
|
||||
|
||||
```bash
|
||||
curl http://localhost:4000/vertex-ai/publishers/google/models/gemini-1.0-pro:generateContent \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "Authorization: Bearer sk-1234" \
|
||||
-d '{
|
||||
"contents":[{
|
||||
"role": "user",
|
||||
"parts":[{"text": "How are you doing today?"}]
|
||||
}]
|
||||
}'
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="js" label="Vertex Node.js SDK">
|
||||
|
||||
```javascript
|
||||
const { VertexAI } = require('@google-cloud/vertexai');
|
||||
|
||||
const vertexAI = new VertexAI({
|
||||
project: 'your-project-id', // enter your vertex project id
|
||||
location: 'us-central1', // enter your vertex region
|
||||
apiEndpoint: "localhost:4000/vertex-ai" // <proxy-server-url>/vertex-ai # note, do not include 'https://' in the url
|
||||
});
|
||||
|
||||
const model = vertexAI.getGenerativeModel({
|
||||
model: 'gemini-1.0-pro'
|
||||
}, {
|
||||
customHeaders: {
|
||||
"x-litellm-api-key": "sk-1234" // Your litellm Virtual Key
|
||||
}
|
||||
});
|
||||
|
||||
async function generateContent() {
|
||||
try {
|
||||
const prompt = {
|
||||
contents: [{
|
||||
role: 'user',
|
||||
parts: [{ text: 'How are you doing today?' }]
|
||||
}]
|
||||
};
|
||||
|
||||
const response = await model.generateContent(prompt);
|
||||
console.log('Response:', response);
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
generateContent();
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
||||
## Supported API Endpoints
|
||||
|
||||
- Gemini API
|
||||
|
|
|
@ -1528,7 +1528,8 @@ class AzureChatCompletion(BaseLLM):
|
|||
prompt: Optional[str] = None,
|
||||
) -> dict:
|
||||
client_session = (
|
||||
litellm.aclient_session or httpx.AsyncClient()
|
||||
litellm.aclient_session
|
||||
or get_async_httpx_client(llm_provider=litellm.LlmProviders.AZURE).client
|
||||
) # handle dall-e-2 calls
|
||||
|
||||
if "gateway.ai.cloudflare.com" in api_base:
|
||||
|
|
|
@ -8,8 +8,7 @@ from httpx import USE_CLIENT_DEFAULT, AsyncHTTPTransport, HTTPTransport
|
|||
|
||||
import litellm
|
||||
from litellm.caching import InMemoryCache
|
||||
|
||||
from .types import httpxSpecialProvider
|
||||
from litellm.types.llms.custom_http import *
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from litellm import LlmProviders
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
from enum import Enum
|
||||
|
||||
import litellm
|
||||
|
||||
|
||||
class httpxSpecialProvider(str, Enum):
|
||||
LoggingCallback = "logging_callback"
|
||||
GuardrailCallback = "guardrail_callback"
|
||||
Caching = "caching"
|
||||
Oauth2Check = "oauth2_check"
|
||||
SecretManager = "secret_manager"
|
|
@ -14,6 +14,7 @@ import requests # type: ignore
|
|||
|
||||
import litellm
|
||||
from litellm import verbose_logger
|
||||
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
from litellm.types.utils import ModelInfo, ProviderField, StreamingChoices
|
||||
|
||||
|
@ -456,7 +457,10 @@ def ollama_completion_stream(url, data, logging_obj):
|
|||
|
||||
async def ollama_async_streaming(url, data, model_response, encoding, logging_obj):
|
||||
try:
|
||||
client = httpx.AsyncClient()
|
||||
_async_http_client = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.OLLAMA
|
||||
)
|
||||
client = _async_http_client.client
|
||||
async with client.stream(
|
||||
url=f"{url}", json=data, method="POST", timeout=litellm.request_timeout
|
||||
) as response:
|
||||
|
|
|
@ -13,6 +13,7 @@ from pydantic import BaseModel
|
|||
|
||||
import litellm
|
||||
from litellm import verbose_logger
|
||||
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
||||
from litellm.types.llms.ollama import OllamaToolCall, OllamaToolCallFunction
|
||||
from litellm.types.llms.openai import ChatCompletionAssistantToolCall
|
||||
from litellm.types.utils import StreamingChoices
|
||||
|
@ -445,7 +446,10 @@ async def ollama_async_streaming(
|
|||
url, api_key, data, model_response, encoding, logging_obj
|
||||
):
|
||||
try:
|
||||
client = httpx.AsyncClient()
|
||||
_async_http_client = get_async_httpx_client(
|
||||
llm_provider=litellm.LlmProviders.OLLAMA
|
||||
)
|
||||
client = _async_http_client.client
|
||||
_request = {
|
||||
"url": f"{url}",
|
||||
"json": data,
|
||||
|
|
|
@ -11,7 +11,28 @@ model_list:
|
|||
model: vertex_ai/claude-3-5-sonnet-v2
|
||||
vertex_ai_project: "adroit-crow-413218"
|
||||
vertex_ai_location: "us-east5"
|
||||
- model_name: fake-openai-endpoint
|
||||
litellm_params:
|
||||
model: openai/fake
|
||||
api_key: fake-key
|
||||
api_base: https://exampleopenaiendpoint-production.up.railway.app/
|
||||
|
||||
router_settings:
|
||||
model_group_alias:
|
||||
"gpt-4-turbo": # Aliased model name
|
||||
model: "gpt-4" # Actual model name in 'model_list'
|
||||
hidden: true
|
||||
|
||||
litellm_settings:
|
||||
success_callback: ["langfuse"]
|
||||
callbacks: ["prometheus"]
|
||||
default_team_settings:
|
||||
- team_id: team-1
|
||||
success_callback: ["langfuse"]
|
||||
failure_callback: ["langfuse"]
|
||||
langfuse_public_key: os.environ/LANGFUSE_PROJECT1_PUBLIC # Project 1
|
||||
langfuse_secret: os.environ/LANGFUSE_PROJECT1_SECRET # Project 1
|
||||
- team_id: team-2
|
||||
success_callback: ["langfuse"]
|
||||
failure_callback: ["langfuse"]
|
||||
langfuse_public_key: os.environ/LANGFUSE_PROJECT2_PUBLIC # Project 2
|
||||
langfuse_secret: os.environ/LANGFUSE_PROJECT2_SECRET # Project 2
|
||||
langfuse_host: https://us.cloud.langfuse.com
|
||||
|
|
|
@ -96,7 +96,7 @@ class AnthropicPassthroughLoggingHandler:
|
|||
kwargs["response_cost"] = response_cost
|
||||
kwargs["model"] = model
|
||||
|
||||
# Make standard logging object for Vertex AI
|
||||
# Make standard logging object for Anthropic
|
||||
standard_logging_object = get_standard_logging_object_payload(
|
||||
kwargs=kwargs,
|
||||
init_response_obj=litellm_model_response,
|
||||
|
|
|
@ -57,8 +57,14 @@ class VertexPassthroughLoggingHandler:
|
|||
encoding=None,
|
||||
)
|
||||
)
|
||||
logging_obj.model = litellm_model_response.model or model
|
||||
logging_obj.model_call_details["model"] = logging_obj.model
|
||||
kwargs = VertexPassthroughLoggingHandler._create_vertex_response_logging_payload_for_generate_content(
|
||||
litellm_model_response=litellm_model_response,
|
||||
model=model,
|
||||
kwargs=kwargs,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=logging_obj,
|
||||
)
|
||||
|
||||
return {
|
||||
"result": litellm_model_response,
|
||||
|
@ -147,10 +153,14 @@ class VertexPassthroughLoggingHandler:
|
|||
verbose_proxy_logger.error(
|
||||
"Unable to build complete streaming response for Vertex passthrough endpoint, not logging..."
|
||||
)
|
||||
return {
|
||||
"result": None,
|
||||
"kwargs": kwargs,
|
||||
}
|
||||
kwargs = VertexPassthroughLoggingHandler._create_vertex_response_logging_payload_for_generate_content(
|
||||
litellm_model_response=complete_streaming_response,
|
||||
model=model,
|
||||
kwargs=kwargs,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=litellm_logging_obj,
|
||||
)
|
||||
|
||||
return {
|
||||
"result": complete_streaming_response,
|
||||
|
@ -195,3 +205,47 @@ class VertexPassthroughLoggingHandler:
|
|||
if match:
|
||||
return match.group(1)
|
||||
return "unknown"
|
||||
|
||||
@staticmethod
|
||||
def _create_vertex_response_logging_payload_for_generate_content(
|
||||
litellm_model_response: Union[
|
||||
litellm.ModelResponse, litellm.TextCompletionResponse
|
||||
],
|
||||
model: str,
|
||||
kwargs: dict,
|
||||
start_time: datetime,
|
||||
end_time: datetime,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
):
|
||||
"""
|
||||
Create the standard logging object for Vertex passthrough generateContent (streaming and non-streaming)
|
||||
|
||||
"""
|
||||
response_cost = litellm.completion_cost(
|
||||
completion_response=litellm_model_response,
|
||||
model=model,
|
||||
)
|
||||
kwargs["response_cost"] = response_cost
|
||||
kwargs["model"] = model
|
||||
|
||||
# Make standard logging object for Vertex AI
|
||||
standard_logging_object = get_standard_logging_object_payload(
|
||||
kwargs=kwargs,
|
||||
init_response_obj=litellm_model_response,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
logging_obj=logging_obj,
|
||||
status="success",
|
||||
)
|
||||
|
||||
# pretty print standard logging object
|
||||
verbose_proxy_logger.debug(
|
||||
"standard_logging_object= %s", json.dumps(standard_logging_object, indent=4)
|
||||
)
|
||||
kwargs["standard_logging_object"] = standard_logging_object
|
||||
|
||||
# set litellm_call_id to logging response object
|
||||
litellm_model_response.id = logging_obj.litellm_call_id
|
||||
logging_obj.model = litellm_model_response.model or model
|
||||
logging_obj.model_call_details["model"] = logging_obj.model
|
||||
return kwargs
|
||||
|
|
|
@ -22,6 +22,7 @@ import litellm
|
|||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
||||
from litellm.llms.vertex_ai_and_google_ai_studio.gemini.vertex_and_google_ai_studio_gemini import (
|
||||
ModelResponseIterator,
|
||||
)
|
||||
|
@ -35,8 +36,9 @@ from litellm.proxy._types import (
|
|||
)
|
||||
from litellm.proxy.auth.user_api_key_auth import user_api_key_auth
|
||||
from litellm.secret_managers.main import get_secret_str
|
||||
from litellm.types.llms.custom_http import httpxSpecialProvider
|
||||
|
||||
from .streaming_handler import chunk_processor
|
||||
from .streaming_handler import PassThroughStreamingHandler
|
||||
from .success_handler import PassThroughEndpointLogging
|
||||
from .types import EndpointType, PassthroughStandardLoggingPayload
|
||||
|
||||
|
@ -363,8 +365,11 @@ async def pass_through_request( # noqa: PLR0915
|
|||
data=_parsed_body,
|
||||
call_type="pass_through_endpoint",
|
||||
)
|
||||
|
||||
async_client = httpx.AsyncClient(timeout=600)
|
||||
async_client_obj = get_async_httpx_client(
|
||||
llm_provider=httpxSpecialProvider.PassThroughEndpoint,
|
||||
params={"timeout": 600},
|
||||
)
|
||||
async_client = async_client_obj.client
|
||||
|
||||
litellm_call_id = str(uuid.uuid4())
|
||||
|
||||
|
@ -448,7 +453,7 @@ async def pass_through_request( # noqa: PLR0915
|
|||
)
|
||||
|
||||
return StreamingResponse(
|
||||
chunk_processor(
|
||||
PassThroughStreamingHandler.chunk_processor(
|
||||
response=response,
|
||||
request_body=_parsed_body,
|
||||
litellm_logging_obj=logging_obj,
|
||||
|
@ -491,7 +496,7 @@ async def pass_through_request( # noqa: PLR0915
|
|||
)
|
||||
|
||||
return StreamingResponse(
|
||||
chunk_processor(
|
||||
PassThroughStreamingHandler.chunk_processor(
|
||||
response=response,
|
||||
request_body=_parsed_body,
|
||||
litellm_logging_obj=logging_obj,
|
||||
|
|
|
@ -33,93 +33,72 @@ from .success_handler import PassThroughEndpointLogging
|
|||
from .types import EndpointType
|
||||
|
||||
|
||||
async def chunk_processor(
|
||||
response: httpx.Response,
|
||||
request_body: Optional[dict],
|
||||
litellm_logging_obj: LiteLLMLoggingObj,
|
||||
endpoint_type: EndpointType,
|
||||
start_time: datetime,
|
||||
passthrough_success_handler_obj: PassThroughEndpointLogging,
|
||||
url_route: str,
|
||||
):
|
||||
"""
|
||||
- Yields chunks from the response
|
||||
- Collect non-empty chunks for post-processing (logging)
|
||||
"""
|
||||
collected_chunks: List[str] = [] # List to store all chunks
|
||||
try:
|
||||
async for chunk in response.aiter_lines():
|
||||
verbose_proxy_logger.debug(f"Processing chunk: {chunk}")
|
||||
if not chunk:
|
||||
continue
|
||||
class PassThroughStreamingHandler:
|
||||
|
||||
# Handle SSE format - pass through the raw SSE format
|
||||
if isinstance(chunk, bytes):
|
||||
chunk = chunk.decode("utf-8")
|
||||
@staticmethod
|
||||
async def chunk_processor(
|
||||
response: httpx.Response,
|
||||
request_body: Optional[dict],
|
||||
litellm_logging_obj: LiteLLMLoggingObj,
|
||||
endpoint_type: EndpointType,
|
||||
start_time: datetime,
|
||||
passthrough_success_handler_obj: PassThroughEndpointLogging,
|
||||
url_route: str,
|
||||
):
|
||||
"""
|
||||
- Yields chunks from the response
|
||||
- Collect non-empty chunks for post-processing (logging)
|
||||
"""
|
||||
try:
|
||||
raw_bytes: List[bytes] = []
|
||||
async for chunk in response.aiter_bytes():
|
||||
raw_bytes.append(chunk)
|
||||
yield chunk
|
||||
|
||||
# Store the chunk for post-processing
|
||||
if chunk.strip(): # Only store non-empty chunks
|
||||
collected_chunks.append(chunk)
|
||||
yield f"{chunk}\n"
|
||||
# After all chunks are processed, handle post-processing
|
||||
end_time = datetime.now()
|
||||
|
||||
# After all chunks are processed, handle post-processing
|
||||
end_time = datetime.now()
|
||||
await PassThroughStreamingHandler._route_streaming_logging_to_handler(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
request_body=request_body or {},
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
raw_bytes=raw_bytes,
|
||||
end_time=end_time,
|
||||
)
|
||||
except Exception as e:
|
||||
verbose_proxy_logger.error(f"Error in chunk_processor: {str(e)}")
|
||||
raise
|
||||
|
||||
await _route_streaming_logging_to_handler(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
request_body=request_body or {},
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
all_chunks=collected_chunks,
|
||||
end_time=end_time,
|
||||
@staticmethod
|
||||
async def _route_streaming_logging_to_handler(
|
||||
litellm_logging_obj: LiteLLMLoggingObj,
|
||||
passthrough_success_handler_obj: PassThroughEndpointLogging,
|
||||
url_route: str,
|
||||
request_body: dict,
|
||||
endpoint_type: EndpointType,
|
||||
start_time: datetime,
|
||||
raw_bytes: List[bytes],
|
||||
end_time: datetime,
|
||||
):
|
||||
"""
|
||||
Route the logging for the collected chunks to the appropriate handler
|
||||
|
||||
Supported endpoint types:
|
||||
- Anthropic
|
||||
- Vertex AI
|
||||
"""
|
||||
all_chunks = PassThroughStreamingHandler._convert_raw_bytes_to_str_lines(
|
||||
raw_bytes
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
verbose_proxy_logger.error(f"Error in chunk_processor: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
async def _route_streaming_logging_to_handler(
|
||||
litellm_logging_obj: LiteLLMLoggingObj,
|
||||
passthrough_success_handler_obj: PassThroughEndpointLogging,
|
||||
url_route: str,
|
||||
request_body: dict,
|
||||
endpoint_type: EndpointType,
|
||||
start_time: datetime,
|
||||
all_chunks: List[str],
|
||||
end_time: datetime,
|
||||
):
|
||||
"""
|
||||
Route the logging for the collected chunks to the appropriate handler
|
||||
|
||||
Supported endpoint types:
|
||||
- Anthropic
|
||||
- Vertex AI
|
||||
"""
|
||||
standard_logging_response_object: Optional[
|
||||
PassThroughEndpointLoggingResultValues
|
||||
] = None
|
||||
kwargs: dict = {}
|
||||
if endpoint_type == EndpointType.ANTHROPIC:
|
||||
anthropic_passthrough_logging_handler_result = AnthropicPassthroughLoggingHandler._handle_logging_anthropic_collected_chunks(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
request_body=request_body,
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
all_chunks=all_chunks,
|
||||
end_time=end_time,
|
||||
)
|
||||
standard_logging_response_object = anthropic_passthrough_logging_handler_result[
|
||||
"result"
|
||||
]
|
||||
kwargs = anthropic_passthrough_logging_handler_result["kwargs"]
|
||||
elif endpoint_type == EndpointType.VERTEX_AI:
|
||||
vertex_passthrough_logging_handler_result = (
|
||||
VertexPassthroughLoggingHandler._handle_logging_vertex_collected_chunks(
|
||||
standard_logging_response_object: Optional[
|
||||
PassThroughEndpointLoggingResultValues
|
||||
] = None
|
||||
kwargs: dict = {}
|
||||
if endpoint_type == EndpointType.ANTHROPIC:
|
||||
anthropic_passthrough_logging_handler_result = AnthropicPassthroughLoggingHandler._handle_logging_anthropic_collected_chunks(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
|
@ -129,29 +108,64 @@ async def _route_streaming_logging_to_handler(
|
|||
all_chunks=all_chunks,
|
||||
end_time=end_time,
|
||||
)
|
||||
)
|
||||
standard_logging_response_object = vertex_passthrough_logging_handler_result[
|
||||
"result"
|
||||
]
|
||||
kwargs = vertex_passthrough_logging_handler_result["kwargs"]
|
||||
standard_logging_response_object = anthropic_passthrough_logging_handler_result[
|
||||
"result"
|
||||
]
|
||||
kwargs = anthropic_passthrough_logging_handler_result["kwargs"]
|
||||
elif endpoint_type == EndpointType.VERTEX_AI:
|
||||
vertex_passthrough_logging_handler_result = (
|
||||
VertexPassthroughLoggingHandler._handle_logging_vertex_collected_chunks(
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
request_body=request_body,
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
all_chunks=all_chunks,
|
||||
end_time=end_time,
|
||||
)
|
||||
)
|
||||
standard_logging_response_object = vertex_passthrough_logging_handler_result[
|
||||
"result"
|
||||
]
|
||||
kwargs = vertex_passthrough_logging_handler_result["kwargs"]
|
||||
|
||||
if standard_logging_response_object is None:
|
||||
standard_logging_response_object = StandardPassThroughResponseObject(
|
||||
response=f"cannot parse chunks to standard response object. Chunks={all_chunks}"
|
||||
if standard_logging_response_object is None:
|
||||
standard_logging_response_object = StandardPassThroughResponseObject(
|
||||
response=f"cannot parse chunks to standard response object. Chunks={all_chunks}"
|
||||
)
|
||||
threading.Thread(
|
||||
target=litellm_logging_obj.success_handler,
|
||||
args=(
|
||||
standard_logging_response_object,
|
||||
start_time,
|
||||
end_time,
|
||||
False,
|
||||
),
|
||||
).start()
|
||||
await litellm_logging_obj.async_success_handler(
|
||||
result=standard_logging_response_object,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
cache_hit=False,
|
||||
**kwargs,
|
||||
)
|
||||
threading.Thread(
|
||||
target=litellm_logging_obj.success_handler,
|
||||
args=(
|
||||
standard_logging_response_object,
|
||||
start_time,
|
||||
end_time,
|
||||
False,
|
||||
),
|
||||
).start()
|
||||
await litellm_logging_obj.async_success_handler(
|
||||
result=standard_logging_response_object,
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
cache_hit=False,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _convert_raw_bytes_to_str_lines(raw_bytes: List[bytes]) -> List[str]:
|
||||
"""
|
||||
Converts a list of raw bytes into a list of string lines, similar to aiter_lines()
|
||||
|
||||
Args:
|
||||
raw_bytes: List of bytes chunks from aiter.bytes()
|
||||
|
||||
Returns:
|
||||
List of string lines, with each line being a complete data: {} chunk
|
||||
"""
|
||||
# Combine all bytes and decode to string
|
||||
combined_str = b"".join(raw_bytes).decode("utf-8")
|
||||
|
||||
# Split by newlines and filter out empty lines
|
||||
lines = [line.strip() for line in combined_str.split("\n") if line.strip()]
|
||||
|
||||
return lines
|
|
@ -119,7 +119,6 @@ async def vertex_proxy_route(
|
|||
endpoint: str,
|
||||
request: Request,
|
||||
fastapi_response: Response,
|
||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||
):
|
||||
encoded_endpoint = httpx.URL(endpoint).path
|
||||
|
||||
|
@ -127,6 +126,11 @@ async def vertex_proxy_route(
|
|||
|
||||
verbose_proxy_logger.debug("requested endpoint %s", endpoint)
|
||||
headers: dict = {}
|
||||
api_key_to_use = get_litellm_virtual_key(request=request)
|
||||
user_api_key_dict = await user_api_key_auth(
|
||||
request=request,
|
||||
api_key=api_key_to_use,
|
||||
)
|
||||
|
||||
vertex_project = None
|
||||
vertex_location = None
|
||||
|
@ -214,3 +218,18 @@ async def vertex_proxy_route(
|
|||
)
|
||||
|
||||
return received_value
|
||||
|
||||
|
||||
def get_litellm_virtual_key(request: Request) -> str:
|
||||
"""
|
||||
Extract and format API key from request headers.
|
||||
Prioritizes x-litellm-api-key over Authorization header.
|
||||
|
||||
|
||||
Vertex JS SDK uses `Authorization` header, we use `x-litellm-api-key` to pass litellm virtual key
|
||||
|
||||
"""
|
||||
litellm_api_key = request.headers.get("x-litellm-api-key")
|
||||
if litellm_api_key:
|
||||
return f"Bearer {litellm_api_key}"
|
||||
return request.headers.get("Authorization", "")
|
||||
|
|
|
@ -31,8 +31,8 @@ from litellm.llms.custom_httpx.http_handler import (
|
|||
_get_httpx_client,
|
||||
get_async_httpx_client,
|
||||
)
|
||||
from litellm.llms.custom_httpx.types import httpxSpecialProvider
|
||||
from litellm.proxy._types import KeyManagementSystem
|
||||
from litellm.types.llms.custom_http import httpxSpecialProvider
|
||||
|
||||
|
||||
class AWSSecretsManagerV2(BaseAWSLLM):
|
||||
|
|
20
litellm/types/llms/custom_http.py
Normal file
20
litellm/types/llms/custom_http.py
Normal file
|
@ -0,0 +1,20 @@
|
|||
from enum import Enum
|
||||
|
||||
import litellm
|
||||
|
||||
|
||||
class httpxSpecialProvider(str, Enum):
|
||||
"""
|
||||
Httpx Clients can be created for these litellm internal providers
|
||||
|
||||
Example:
|
||||
- langsmith logging would need a custom async httpx client
|
||||
- pass through endpoint would need a custom async httpx client
|
||||
"""
|
||||
|
||||
LoggingCallback = "logging_callback"
|
||||
GuardrailCallback = "guardrail_callback"
|
||||
Caching = "caching"
|
||||
Oauth2Check = "oauth2_check"
|
||||
SecretManager = "secret_manager"
|
||||
PassThroughEndpoint = "pass_through_endpoint"
|
|
@ -5,9 +5,19 @@ ALLOWED_FILES = [
|
|||
# local files
|
||||
"../../litellm/__init__.py",
|
||||
"../../litellm/llms/custom_httpx/http_handler.py",
|
||||
"../../litellm/router_utils/client_initalization_utils.py",
|
||||
"../../litellm/llms/custom_httpx/http_handler.py",
|
||||
"../../litellm/llms/huggingface_restapi.py",
|
||||
"../../litellm/llms/base.py",
|
||||
"../../litellm/llms/custom_httpx/httpx_handler.py",
|
||||
# when running on ci/cd
|
||||
"./litellm/__init__.py",
|
||||
"./litellm/llms/custom_httpx/http_handler.py",
|
||||
"./litellm/router_utils/client_initalization_utils.py",
|
||||
"./litellm/llms/custom_httpx/http_handler.py",
|
||||
"./litellm/llms/huggingface_restapi.py",
|
||||
"./litellm/llms/base.py",
|
||||
"./litellm/llms/custom_httpx/httpx_handler.py",
|
||||
]
|
||||
|
||||
warning_msg = "this is a serious violation that can impact latency. Creating Async clients per request can add +500ms per request"
|
||||
|
@ -43,6 +53,19 @@ def check_for_async_http_handler(file_path):
|
|||
raise ValueError(
|
||||
f"found violation in file {file_path} line: {node.lineno}. Please use `get_async_httpx_client` instead. {warning_msg}"
|
||||
)
|
||||
# Check for attribute calls like httpx.AsyncClient()
|
||||
elif isinstance(node.func, ast.Attribute):
|
||||
full_name = ""
|
||||
current = node.func
|
||||
while isinstance(current, ast.Attribute):
|
||||
full_name = "." + current.attr + full_name
|
||||
current = current.value
|
||||
if isinstance(current, ast.Name):
|
||||
full_name = current.id + full_name
|
||||
if full_name.lower() in [name.lower() for name in target_names]:
|
||||
raise ValueError(
|
||||
f"found violation in file {file_path} line: {node.lineno}. Please use `get_async_httpx_client` instead. {warning_msg}"
|
||||
)
|
||||
return violations
|
||||
|
||||
|
||||
|
|
23
tests/pass_through_tests/test_gemini.js
Normal file
23
tests/pass_through_tests/test_gemini.js
Normal file
|
@ -0,0 +1,23 @@
|
|||
// const { GoogleGenerativeAI } = require("@google/generative-ai");
|
||||
|
||||
// const genAI = new GoogleGenerativeAI("sk-1234");
|
||||
// const model = genAI.getGenerativeModel({ model: "gemini-1.5-flash" });
|
||||
|
||||
// const prompt = "Explain how AI works in 2 pages";
|
||||
|
||||
// async function run() {
|
||||
// try {
|
||||
// const result = await model.generateContentStream(prompt, { baseUrl: "http://localhost:4000/gemini" });
|
||||
// const response = await result.response;
|
||||
// console.log(response.text());
|
||||
// for await (const chunk of result.stream) {
|
||||
// const chunkText = chunk.text();
|
||||
// console.log(chunkText);
|
||||
// process.stdout.write(chunkText);
|
||||
// }
|
||||
// } catch (error) {
|
||||
// console.error("Error:", error);
|
||||
// }
|
||||
// }
|
||||
|
||||
// run();
|
68
tests/pass_through_tests/test_local_vertex.js
Normal file
68
tests/pass_through_tests/test_local_vertex.js
Normal file
|
@ -0,0 +1,68 @@
|
|||
const { VertexAI, RequestOptions } = require('@google-cloud/vertexai');
|
||||
|
||||
|
||||
// Import fetch if the SDK uses it
|
||||
const originalFetch = global.fetch || require('node-fetch');
|
||||
|
||||
// Monkey-patch the fetch used internally
|
||||
global.fetch = async function patchedFetch(url, options) {
|
||||
// Modify the URL to use HTTP instead of HTTPS
|
||||
if (url.startsWith('https://localhost:4000')) {
|
||||
url = url.replace('https://', 'http://');
|
||||
}
|
||||
console.log('Patched fetch sending request to:', url);
|
||||
return originalFetch(url, options);
|
||||
};
|
||||
|
||||
const vertexAI = new VertexAI({
|
||||
project: 'adroit-crow-413218',
|
||||
location: 'us-central1',
|
||||
apiEndpoint: "localhost:4000/vertex-ai"
|
||||
});
|
||||
|
||||
|
||||
// Use customHeaders in RequestOptions
|
||||
const requestOptions = {
|
||||
customHeaders: new Headers({
|
||||
"x-litellm-api-key": "sk-1234"
|
||||
})
|
||||
};
|
||||
|
||||
const generativeModel = vertexAI.getGenerativeModel(
|
||||
{ model: 'gemini-1.0-pro' },
|
||||
requestOptions
|
||||
);
|
||||
|
||||
async function streamingResponse() {
|
||||
try {
|
||||
const request = {
|
||||
contents: [{role: 'user', parts: [{text: 'How are you doing today tell me your name?'}]}],
|
||||
};
|
||||
const streamingResult = await generativeModel.generateContentStream(request);
|
||||
for await (const item of streamingResult.stream) {
|
||||
console.log('stream chunk: ', JSON.stringify(item));
|
||||
}
|
||||
const aggregatedResponse = await streamingResult.response;
|
||||
console.log('aggregated response: ', JSON.stringify(aggregatedResponse));
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
async function nonStreamingResponse() {
|
||||
try {
|
||||
const request = {
|
||||
contents: [{role: 'user', parts: [{text: 'How are you doing today tell me your name?'}]}],
|
||||
};
|
||||
const response = await generativeModel.generateContent(request);
|
||||
console.log('non streaming response: ', JSON.stringify(response));
|
||||
} catch (error) {
|
||||
console.error('Error:', error);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
streamingResponse();
|
||||
nonStreamingResponse();
|
114
tests/pass_through_tests/test_vertex.test.js
Normal file
114
tests/pass_through_tests/test_vertex.test.js
Normal file
|
@ -0,0 +1,114 @@
|
|||
const { VertexAI, RequestOptions } = require('@google-cloud/vertexai');
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
const os = require('os');
|
||||
const { writeFileSync } = require('fs');
|
||||
|
||||
|
||||
// Import fetch if the SDK uses it
|
||||
const originalFetch = global.fetch || require('node-fetch');
|
||||
|
||||
// Monkey-patch the fetch used internally
|
||||
global.fetch = async function patchedFetch(url, options) {
|
||||
// Modify the URL to use HTTP instead of HTTPS
|
||||
if (url.startsWith('https://localhost:4000')) {
|
||||
url = url.replace('https://', 'http://');
|
||||
}
|
||||
console.log('Patched fetch sending request to:', url);
|
||||
return originalFetch(url, options);
|
||||
};
|
||||
|
||||
function loadVertexAiCredentials() {
|
||||
console.log("loading vertex ai credentials");
|
||||
const filepath = path.dirname(__filename);
|
||||
const vertexKeyPath = path.join(filepath, "vertex_key.json");
|
||||
|
||||
// Initialize default empty service account data
|
||||
let serviceAccountKeyData = {};
|
||||
|
||||
// Try to read existing vertex_key.json
|
||||
try {
|
||||
const content = fs.readFileSync(vertexKeyPath, 'utf8');
|
||||
if (content && content.trim()) {
|
||||
serviceAccountKeyData = JSON.parse(content);
|
||||
}
|
||||
} catch (error) {
|
||||
// File doesn't exist or is invalid, continue with empty object
|
||||
}
|
||||
|
||||
// Update with environment variables
|
||||
const privateKeyId = process.env.VERTEX_AI_PRIVATE_KEY_ID || "";
|
||||
const privateKey = (process.env.VERTEX_AI_PRIVATE_KEY || "").replace(/\\n/g, "\n");
|
||||
|
||||
serviceAccountKeyData.private_key_id = privateKeyId;
|
||||
serviceAccountKeyData.private_key = privateKey;
|
||||
|
||||
// Create temporary file
|
||||
const tempFilePath = path.join(os.tmpdir(), `vertex-credentials-${Date.now()}.json`);
|
||||
writeFileSync(tempFilePath, JSON.stringify(serviceAccountKeyData, null, 2));
|
||||
|
||||
// Set environment variable
|
||||
process.env.GOOGLE_APPLICATION_CREDENTIALS = tempFilePath;
|
||||
}
|
||||
|
||||
// Run credential loading before tests
|
||||
beforeAll(() => {
|
||||
loadVertexAiCredentials();
|
||||
});
|
||||
|
||||
|
||||
|
||||
describe('Vertex AI Tests', () => {
|
||||
test('should successfully generate content from Vertex AI', async () => {
|
||||
const vertexAI = new VertexAI({
|
||||
project: 'adroit-crow-413218',
|
||||
location: 'us-central1',
|
||||
apiEndpoint: "localhost:4000/vertex-ai"
|
||||
});
|
||||
|
||||
const customHeaders = new Headers({
|
||||
"x-litellm-api-key": "sk-1234"
|
||||
});
|
||||
|
||||
const requestOptions = {
|
||||
customHeaders: customHeaders
|
||||
};
|
||||
|
||||
const generativeModel = vertexAI.getGenerativeModel(
|
||||
{ model: 'gemini-1.0-pro' },
|
||||
requestOptions
|
||||
);
|
||||
|
||||
const request = {
|
||||
contents: [{role: 'user', parts: [{text: 'How are you doing today tell me your name?'}]}],
|
||||
};
|
||||
|
||||
const streamingResult = await generativeModel.generateContentStream(request);
|
||||
|
||||
// Add some assertions
|
||||
expect(streamingResult).toBeDefined();
|
||||
|
||||
for await (const item of streamingResult.stream) {
|
||||
console.log('stream chunk:', JSON.stringify(item));
|
||||
expect(item).toBeDefined();
|
||||
}
|
||||
|
||||
const aggregatedResponse = await streamingResult.response;
|
||||
console.log('aggregated response:', JSON.stringify(aggregatedResponse));
|
||||
expect(aggregatedResponse).toBeDefined();
|
||||
});
|
||||
|
||||
|
||||
test('should successfully generate non-streaming content from Vertex AI', async () => {
|
||||
const vertexAI = new VertexAI({project: 'adroit-crow-413218', location: 'us-central1', apiEndpoint: "localhost:4000/vertex-ai"});
|
||||
const customHeaders = new Headers({"x-litellm-api-key": "sk-1234"});
|
||||
const requestOptions = {customHeaders: customHeaders};
|
||||
const generativeModel = vertexAI.getGenerativeModel({model: 'gemini-1.0-pro'}, requestOptions);
|
||||
const request = {contents: [{role: 'user', parts: [{text: 'What is 2+2?'}]}]};
|
||||
|
||||
const result = await generativeModel.generateContent(request);
|
||||
expect(result).toBeDefined();
|
||||
expect(result.response).toBeDefined();
|
||||
console.log('non-streaming response:', JSON.stringify(result.response));
|
||||
});
|
||||
});
|
118
tests/pass_through_unit_tests/test_unit_test_streaming.py
Normal file
118
tests/pass_through_unit_tests/test_unit_test_streaming.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from unittest.mock import AsyncMock, Mock, patch, MagicMock
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import litellm
|
||||
from typing import AsyncGenerator
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.proxy.pass_through_endpoints.types import EndpointType
|
||||
from litellm.proxy.pass_through_endpoints.success_handler import (
|
||||
PassThroughEndpointLogging,
|
||||
)
|
||||
from litellm.proxy.pass_through_endpoints.streaming_handler import (
|
||||
PassThroughStreamingHandler,
|
||||
)
|
||||
|
||||
|
||||
# Helper function to mock async iteration
|
||||
async def aiter_mock(iterable):
|
||||
for item in iterable:
|
||||
yield item
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.parametrize(
|
||||
"endpoint_type,url_route",
|
||||
[
|
||||
(
|
||||
EndpointType.VERTEX_AI,
|
||||
"v1/projects/adroit-crow-413218/locations/us-central1/publishers/google/models/gemini-1.0-pro:generateContent",
|
||||
),
|
||||
(EndpointType.ANTHROPIC, "/v1/messages"),
|
||||
],
|
||||
)
|
||||
async def test_chunk_processor_yields_raw_bytes(endpoint_type, url_route):
|
||||
"""
|
||||
Test that the chunk_processor yields raw bytes
|
||||
|
||||
This is CRITICAL for pass throughs streaming with Vertex AI and Anthropic
|
||||
"""
|
||||
# Mock inputs
|
||||
response = AsyncMock(spec=httpx.Response)
|
||||
raw_chunks = [
|
||||
b'{"id": "1", "content": "Hello"}',
|
||||
b'{"id": "2", "content": "World"}',
|
||||
b'\n\ndata: {"id": "3"}', # Testing different byte formats
|
||||
]
|
||||
|
||||
# Mock aiter_bytes to return an async generator
|
||||
async def mock_aiter_bytes():
|
||||
for chunk in raw_chunks:
|
||||
yield chunk
|
||||
|
||||
response.aiter_bytes = mock_aiter_bytes
|
||||
|
||||
request_body = {"key": "value"}
|
||||
litellm_logging_obj = MagicMock()
|
||||
start_time = datetime.now()
|
||||
passthrough_success_handler_obj = MagicMock()
|
||||
|
||||
# Capture yielded chunks and perform detailed assertions
|
||||
received_chunks = []
|
||||
async for chunk in PassThroughStreamingHandler.chunk_processor(
|
||||
response=response,
|
||||
request_body=request_body,
|
||||
litellm_logging_obj=litellm_logging_obj,
|
||||
endpoint_type=endpoint_type,
|
||||
start_time=start_time,
|
||||
passthrough_success_handler_obj=passthrough_success_handler_obj,
|
||||
url_route=url_route,
|
||||
):
|
||||
# Assert each chunk is bytes
|
||||
assert isinstance(chunk, bytes), f"Chunk should be bytes, got {type(chunk)}"
|
||||
# Assert no decoding/encoding occurred (chunk should be exactly as input)
|
||||
assert (
|
||||
chunk in raw_chunks
|
||||
), f"Chunk {chunk} was modified during processing. For pass throughs streaming, chunks should be raw bytes"
|
||||
received_chunks.append(chunk)
|
||||
|
||||
# Assert all chunks were processed
|
||||
assert len(received_chunks) == len(raw_chunks), "Not all chunks were processed"
|
||||
|
||||
# collected chunks all together
|
||||
assert b"".join(received_chunks) == b"".join(
|
||||
raw_chunks
|
||||
), "Collected chunks do not match raw chunks"
|
||||
|
||||
|
||||
def test_convert_raw_bytes_to_str_lines():
|
||||
"""
|
||||
Test that the _convert_raw_bytes_to_str_lines method correctly converts raw bytes to a list of strings
|
||||
"""
|
||||
# Test case 1: Single chunk
|
||||
raw_bytes = [b'data: {"content": "Hello"}\n']
|
||||
result = PassThroughStreamingHandler._convert_raw_bytes_to_str_lines(raw_bytes)
|
||||
assert result == ['data: {"content": "Hello"}']
|
||||
|
||||
# Test case 2: Multiple chunks
|
||||
raw_bytes = [b'data: {"content": "Hello"}\n', b'data: {"content": "World"}\n']
|
||||
result = PassThroughStreamingHandler._convert_raw_bytes_to_str_lines(raw_bytes)
|
||||
assert result == ['data: {"content": "Hello"}', 'data: {"content": "World"}']
|
||||
|
||||
# Test case 3: Empty input
|
||||
raw_bytes = []
|
||||
result = PassThroughStreamingHandler._convert_raw_bytes_to_str_lines(raw_bytes)
|
||||
assert result == []
|
||||
|
||||
# Test case 4: Chunks with empty lines
|
||||
raw_bytes = [b'data: {"content": "Hello"}\n\n', b'\ndata: {"content": "World"}\n']
|
||||
result = PassThroughStreamingHandler._convert_raw_bytes_to_str_lines(raw_bytes)
|
||||
assert result == ['data: {"content": "Hello"}', 'data: {"content": "World"}']
|
|
@ -0,0 +1,84 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
from unittest.mock import AsyncMock, Mock, patch
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
import litellm
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
|
||||
|
||||
from litellm.proxy.vertex_ai_endpoints.vertex_endpoints import (
|
||||
get_litellm_virtual_key,
|
||||
vertex_proxy_route,
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_litellm_virtual_key():
|
||||
"""
|
||||
Test that the get_litellm_virtual_key function correctly handles the API key authentication
|
||||
"""
|
||||
# Test with x-litellm-api-key
|
||||
mock_request = Mock()
|
||||
mock_request.headers = {"x-litellm-api-key": "test-key-123"}
|
||||
result = get_litellm_virtual_key(mock_request)
|
||||
assert result == "Bearer test-key-123"
|
||||
|
||||
# Test with Authorization header
|
||||
mock_request.headers = {"Authorization": "Bearer auth-key-456"}
|
||||
result = get_litellm_virtual_key(mock_request)
|
||||
assert result == "Bearer auth-key-456"
|
||||
|
||||
# Test with both headers (x-litellm-api-key should take precedence)
|
||||
mock_request.headers = {
|
||||
"x-litellm-api-key": "test-key-123",
|
||||
"Authorization": "Bearer auth-key-456",
|
||||
}
|
||||
result = get_litellm_virtual_key(mock_request)
|
||||
assert result == "Bearer test-key-123"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_vertex_proxy_route_api_key_auth():
|
||||
"""
|
||||
Critical
|
||||
|
||||
This is how Vertex AI JS SDK will Auth to Litellm Proxy
|
||||
"""
|
||||
# Mock dependencies
|
||||
mock_request = Mock()
|
||||
mock_request.headers = {"x-litellm-api-key": "test-key-123"}
|
||||
mock_request.method = "POST"
|
||||
mock_response = Mock()
|
||||
|
||||
with patch(
|
||||
"litellm.proxy.vertex_ai_endpoints.vertex_endpoints.user_api_key_auth"
|
||||
) as mock_auth:
|
||||
mock_auth.return_value = {"api_key": "test-key-123"}
|
||||
|
||||
with patch(
|
||||
"litellm.proxy.vertex_ai_endpoints.vertex_endpoints.create_pass_through_route"
|
||||
) as mock_pass_through:
|
||||
mock_pass_through.return_value = AsyncMock(
|
||||
return_value={"status": "success"}
|
||||
)
|
||||
|
||||
# Call the function
|
||||
result = await vertex_proxy_route(
|
||||
endpoint="v1/projects/test-project/locations/us-central1/publishers/google/models/gemini-1.5-pro:generateContent",
|
||||
request=mock_request,
|
||||
fastapi_response=mock_response,
|
||||
)
|
||||
|
||||
# Verify user_api_key_auth was called with the correct Bearer token
|
||||
mock_auth.assert_called_once()
|
||||
call_args = mock_auth.call_args[1]
|
||||
assert call_args["api_key"] == "Bearer test-key-123"
|
|
@ -39,13 +39,12 @@ async def list_organization(session, i):
|
|||
response_json = await response.json()
|
||||
|
||||
print(f"Response {i} (Status code: {status}):")
|
||||
print(response_json)
|
||||
print()
|
||||
|
||||
if status != 200:
|
||||
raise Exception(f"Request {i} did not return a 200 status code: {status}")
|
||||
|
||||
return await response.json()
|
||||
return response_json
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
|
|
@ -61,6 +61,7 @@ async def chat_completion(session, key, model="azure-gpt-3.5", request_metadata=
|
|||
raise Exception(f"Request did not return a 200 status code: {status}")
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="flaky test - covered by simpler unit testing.")
|
||||
@pytest.mark.asyncio
|
||||
@pytest.mark.flaky(retries=12, delay=2)
|
||||
async def test_aaateam_logging():
|
||||
|
@ -94,9 +95,12 @@ async def test_aaateam_logging():
|
|||
# Test - if the logs were sent to the correct team on langfuse
|
||||
import langfuse
|
||||
|
||||
print(f"langfuse_public_key: {os.getenv('LANGFUSE_PROJECT1_PUBLIC')}")
|
||||
print(f"langfuse_secret_key: {os.getenv('LANGFUSE_HOST')}")
|
||||
langfuse_client = langfuse.Langfuse(
|
||||
public_key=os.getenv("LANGFUSE_PROJECT1_PUBLIC"),
|
||||
secret_key=os.getenv("LANGFUSE_PROJECT1_SECRET"),
|
||||
host="https://us.cloud.langfuse.com",
|
||||
)
|
||||
|
||||
await asyncio.sleep(30)
|
||||
|
@ -177,6 +181,7 @@ async def test_team_2logging():
|
|||
langfuse_client_1 = langfuse.Langfuse(
|
||||
public_key=os.getenv("LANGFUSE_PROJECT1_PUBLIC"),
|
||||
secret_key=os.getenv("LANGFUSE_PROJECT1_SECRET"),
|
||||
host="https://us.cloud.langfuse.com",
|
||||
)
|
||||
|
||||
generations_team_1 = langfuse_client_1.get_generations(
|
||||
|
|
6
ui/litellm-dashboard/package-lock.json
generated
6
ui/litellm-dashboard/package-lock.json
generated
|
@ -1852,9 +1852,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/cross-spawn": {
|
||||
"version": "7.0.3",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz",
|
||||
"integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==",
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
||||
"dependencies": {
|
||||
"path-key": "^3.1.0",
|
||||
"shebang-command": "^2.0.0",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue