mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 10:14:26 +00:00
Merge branch 'main' into litellm_dev_04_08_2025_p1
This commit is contained in:
commit
a206b0088c
111 changed files with 3507 additions and 662 deletions
|
@ -610,6 +610,8 @@ jobs:
|
|||
name: Install Dependencies
|
||||
command: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install wheel
|
||||
pip install --upgrade pip wheel setuptools
|
||||
python -m pip install -r requirements.txt
|
||||
pip install "pytest==7.3.1"
|
||||
pip install "respx==0.21.1"
|
||||
|
@ -1125,6 +1127,7 @@ jobs:
|
|||
name: Install Dependencies
|
||||
command: |
|
||||
python -m pip install --upgrade pip
|
||||
python -m pip install wheel setuptools
|
||||
python -m pip install -r requirements.txt
|
||||
pip install "pytest==7.3.1"
|
||||
pip install "pytest-retry==1.6.3"
|
||||
|
|
|
@ -2,6 +2,10 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: {{ include "litellm.fullname" . }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
labels:
|
||||
{{- include "litellm.labels" . | nindent 4 }}
|
||||
spec:
|
||||
|
|
|
@ -438,6 +438,179 @@ assert isinstance(
|
|||
```
|
||||
|
||||
|
||||
### Google Search Tool
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="sdk" label="SDK">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
os.environ["GEMINI_API_KEY"] = ".."
|
||||
|
||||
tools = [{"googleSearch": {}}] # 👈 ADD GOOGLE SEARCH
|
||||
|
||||
response = completion(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
messages=[{"role": "user", "content": "What is the weather in San Francisco?"}],
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
print(response)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="proxy" label="PROXY">
|
||||
|
||||
1. Setup config.yaml
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gemini-2.0-flash
|
||||
litellm_params:
|
||||
model: gemini/gemini-2.0-flash
|
||||
api_key: os.environ/GEMINI_API_KEY
|
||||
```
|
||||
|
||||
2. Start Proxy
|
||||
```bash
|
||||
$ litellm --config /path/to/config.yaml
|
||||
```
|
||||
|
||||
3. Make Request!
|
||||
```bash
|
||||
curl -X POST 'http://0.0.0.0:4000/chat/completions' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer sk-1234' \
|
||||
-d '{
|
||||
"model": "gemini-2.0-flash",
|
||||
"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
|
||||
"tools": [{"googleSearch": {}}]
|
||||
}
|
||||
'
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Google Search Retrieval
|
||||
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="sdk" label="SDK">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
os.environ["GEMINI_API_KEY"] = ".."
|
||||
|
||||
tools = [{"googleSearchRetrieval": {}}] # 👈 ADD GOOGLE SEARCH
|
||||
|
||||
response = completion(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
messages=[{"role": "user", "content": "What is the weather in San Francisco?"}],
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
print(response)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="proxy" label="PROXY">
|
||||
|
||||
1. Setup config.yaml
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gemini-2.0-flash
|
||||
litellm_params:
|
||||
model: gemini/gemini-2.0-flash
|
||||
api_key: os.environ/GEMINI_API_KEY
|
||||
```
|
||||
|
||||
2. Start Proxy
|
||||
```bash
|
||||
$ litellm --config /path/to/config.yaml
|
||||
```
|
||||
|
||||
3. Make Request!
|
||||
```bash
|
||||
curl -X POST 'http://0.0.0.0:4000/chat/completions' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer sk-1234' \
|
||||
-d '{
|
||||
"model": "gemini-2.0-flash",
|
||||
"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
|
||||
"tools": [{"googleSearchRetrieval": {}}]
|
||||
}
|
||||
'
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
||||
### Code Execution Tool
|
||||
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="sdk" label="SDK">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
os.environ["GEMINI_API_KEY"] = ".."
|
||||
|
||||
tools = [{"codeExecution": {}}] # 👈 ADD GOOGLE SEARCH
|
||||
|
||||
response = completion(
|
||||
model="gemini/gemini-2.0-flash",
|
||||
messages=[{"role": "user", "content": "What is the weather in San Francisco?"}],
|
||||
tools=tools,
|
||||
)
|
||||
|
||||
print(response)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="proxy" label="PROXY">
|
||||
|
||||
1. Setup config.yaml
|
||||
```yaml
|
||||
model_list:
|
||||
- model_name: gemini-2.0-flash
|
||||
litellm_params:
|
||||
model: gemini/gemini-2.0-flash
|
||||
api_key: os.environ/GEMINI_API_KEY
|
||||
```
|
||||
|
||||
2. Start Proxy
|
||||
```bash
|
||||
$ litellm --config /path/to/config.yaml
|
||||
```
|
||||
|
||||
3. Make Request!
|
||||
```bash
|
||||
curl -X POST 'http://0.0.0.0:4000/chat/completions' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-H 'Authorization: Bearer sk-1234' \
|
||||
-d '{
|
||||
"model": "gemini-2.0-flash",
|
||||
"messages": [{"role": "user", "content": "What is the weather in San Francisco?"}],
|
||||
"tools": [{"codeExecution": {}}]
|
||||
}
|
||||
'
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
## JSON Mode
|
||||
|
||||
<Tabs>
|
||||
|
|
|
@ -398,6 +398,8 @@ curl http://localhost:4000/v1/chat/completions \
|
|||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
You can also use the `enterpriseWebSearch` tool for an [enterprise compliant search](https://cloud.google.com/vertex-ai/generative-ai/docs/grounding/web-grounding-enterprise).
|
||||
|
||||
#### **Moving from Vertex AI SDK to LiteLLM (GROUNDING)**
|
||||
|
||||
|
||||
|
|
|
@ -449,6 +449,7 @@ router_settings:
|
|||
| MICROSOFT_CLIENT_ID | Client ID for Microsoft services
|
||||
| MICROSOFT_CLIENT_SECRET | Client secret for Microsoft services
|
||||
| MICROSOFT_TENANT | Tenant ID for Microsoft Azure
|
||||
| MICROSOFT_SERVICE_PRINCIPAL_ID | Service Principal ID for Microsoft Enterprise Application. (This is an advanced feature if you want litellm to auto-assign members to Litellm Teams based on their Microsoft Entra ID Groups)
|
||||
| NO_DOCS | Flag to disable documentation generation
|
||||
| NO_PROXY | List of addresses to bypass proxy
|
||||
| OAUTH_TOKEN_INFO_ENDPOINT | Endpoint for OAuth token info retrieval
|
||||
|
|
|
@ -161,6 +161,89 @@ Here's the available UI roles for a LiteLLM Internal User:
|
|||
- `internal_user`: can login, view/create/delete their own keys, view their spend. **Cannot** add new users.
|
||||
- `internal_user_viewer`: can login, view their own keys, view their own spend. **Cannot** create/delete keys, add new users.
|
||||
|
||||
## Auto-add SSO users to teams
|
||||
|
||||
This walks through setting up sso auto-add for **Okta, Google SSO**
|
||||
|
||||
### Okta, Google SSO
|
||||
|
||||
1. Specify the JWT field that contains the team ids, that the user belongs to.
|
||||
|
||||
```yaml
|
||||
general_settings:
|
||||
master_key: sk-1234
|
||||
litellm_jwtauth:
|
||||
team_ids_jwt_field: "groups" # 👈 CAN BE ANY FIELD
|
||||
```
|
||||
|
||||
This is assuming your SSO token looks like this. **If you need to inspect the JWT fields received from your SSO provider by LiteLLM, follow these instructions [here](#debugging-sso-jwt-fields)**
|
||||
|
||||
```
|
||||
{
|
||||
...,
|
||||
"groups": ["team_id_1", "team_id_2"]
|
||||
}
|
||||
```
|
||||
|
||||
2. Create the teams on LiteLLM
|
||||
|
||||
```bash
|
||||
curl -X POST '<PROXY_BASE_URL>/team/new' \
|
||||
-H 'Authorization: Bearer <PROXY_MASTER_KEY>' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-D '{
|
||||
"team_alias": "team_1",
|
||||
"team_id": "team_id_1" # 👈 MUST BE THE SAME AS THE SSO GROUP ID
|
||||
}'
|
||||
```
|
||||
|
||||
3. Test the SSO flow
|
||||
|
||||
Here's a walkthrough of [how it works](https://www.loom.com/share/8959be458edf41fd85937452c29a33f3?sid=7ebd6d37-569a-4023-866e-e0cde67cb23e)
|
||||
|
||||
### Microsoft Entra ID SSO group assignment
|
||||
|
||||
This walks through setting up sso auto-add for **Microsoft Entra ID**
|
||||
|
||||
Follow along this video for a walkthrough of how to set this up with Microsoft Entra ID
|
||||
|
||||
|
||||
<iframe width="840" height="500" src="https://www.loom.com/embed/ea711323aa9a496d84a01fd7b2a12f54?sid=c53e238c-5bfd-4135-b8fb-b5b1a08632cf" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>
|
||||
|
||||
|
||||
### Debugging SSO JWT fields
|
||||
|
||||
If you need to inspect the JWT fields received from your SSO provider by LiteLLM, follow these instructions. This guide walks you through setting up a debug callback to view the JWT data during the SSO process.
|
||||
|
||||
|
||||
<Image img={require('../../img/debug_sso.png')} style={{ width: '500px', height: 'auto' }} />
|
||||
<br />
|
||||
|
||||
1. Add `/sso/debug/callback` as a redirect URL in your SSO provider
|
||||
|
||||
In your SSO provider's settings, add the following URL as a new redirect (callback) URL:
|
||||
|
||||
```bash showLineNumbers title="Redirect URL"
|
||||
http://<proxy_base_url>/sso/debug/callback
|
||||
```
|
||||
|
||||
|
||||
2. Navigate to the debug login page on your browser
|
||||
|
||||
Navigate to the following URL on your browser:
|
||||
|
||||
```bash showLineNumbers title="URL to navigate to"
|
||||
https://<proxy_base_url>/sso/debug/login
|
||||
```
|
||||
|
||||
This will initiate the standard SSO flow. You will be redirected to your SSO provider's login screen, and after successful authentication, you will be redirected back to LiteLLM's debug callback route.
|
||||
|
||||
|
||||
3. View the JWT fields
|
||||
|
||||
Once redirected, you should see a page called "SSO Debug Information". This page displays the JWT fields received from your SSO provider (as shown in the image above)
|
||||
|
||||
|
||||
## Advanced
|
||||
### Setting custom logout URLs
|
||||
|
||||
|
@ -196,40 +279,6 @@ This budget does not apply to keys created under non-default teams.
|
|||
|
||||
[**Go Here**](./team_budgets.md)
|
||||
|
||||
### Auto-add SSO users to teams
|
||||
|
||||
1. Specify the JWT field that contains the team ids, that the user belongs to.
|
||||
|
||||
```yaml
|
||||
general_settings:
|
||||
master_key: sk-1234
|
||||
litellm_jwtauth:
|
||||
team_ids_jwt_field: "groups" # 👈 CAN BE ANY FIELD
|
||||
```
|
||||
|
||||
This is assuming your SSO token looks like this:
|
||||
```
|
||||
{
|
||||
...,
|
||||
"groups": ["team_id_1", "team_id_2"]
|
||||
}
|
||||
```
|
||||
|
||||
2. Create the teams on LiteLLM
|
||||
|
||||
```bash
|
||||
curl -X POST '<PROXY_BASE_URL>/team/new' \
|
||||
-H 'Authorization: Bearer <PROXY_MASTER_KEY>' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-D '{
|
||||
"team_alias": "team_1",
|
||||
"team_id": "team_id_1" # 👈 MUST BE THE SAME AS THE SSO GROUP ID
|
||||
}'
|
||||
```
|
||||
|
||||
3. Test the SSO flow
|
||||
|
||||
Here's a walkthrough of [how it works](https://www.loom.com/share/8959be458edf41fd85937452c29a33f3?sid=7ebd6d37-569a-4023-866e-e0cde67cb23e)
|
||||
|
||||
### Restrict Users from creating personal keys
|
||||
|
||||
|
|
BIN
docs/my-website/img/debug_sso.png
Normal file
BIN
docs/my-website/img/debug_sso.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 167 KiB |
161
docs/my-website/src/components/TransformRequestPlayground.tsx
Normal file
161
docs/my-website/src/components/TransformRequestPlayground.tsx
Normal file
|
@ -0,0 +1,161 @@
|
|||
import React, { useState } from 'react';
|
||||
import styles from './transform_request.module.css';
|
||||
|
||||
const DEFAULT_REQUEST = {
|
||||
"model": "bedrock/gpt-4",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant."
|
||||
},
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Explain quantum computing in simple terms"
|
||||
}
|
||||
],
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 500,
|
||||
"stream": true
|
||||
};
|
||||
|
||||
type ViewMode = 'split' | 'request' | 'transformed';
|
||||
|
||||
const TransformRequestPlayground: React.FC = () => {
|
||||
const [request, setRequest] = useState(JSON.stringify(DEFAULT_REQUEST, null, 2));
|
||||
const [transformedRequest, setTransformedRequest] = useState('');
|
||||
const [viewMode, setViewMode] = useState<ViewMode>('split');
|
||||
|
||||
const handleTransform = async () => {
|
||||
try {
|
||||
// Here you would make the actual API call to transform the request
|
||||
// For now, we'll just set a sample response
|
||||
const sampleResponse = `curl -X POST \\
|
||||
https://api.openai.com/v1/chat/completions \\
|
||||
-H 'Authorization: Bearer sk-xxx' \\
|
||||
-H 'Content-Type: application/json' \\
|
||||
-d '{
|
||||
"model": "gpt-4",
|
||||
"messages": [
|
||||
{
|
||||
"role": "system",
|
||||
"content": "You are a helpful assistant."
|
||||
}
|
||||
],
|
||||
"temperature": 0.7
|
||||
}'`;
|
||||
setTransformedRequest(sampleResponse);
|
||||
} catch (error) {
|
||||
console.error('Error transforming request:', error);
|
||||
}
|
||||
};
|
||||
|
||||
const handleCopy = () => {
|
||||
navigator.clipboard.writeText(transformedRequest);
|
||||
};
|
||||
|
||||
const renderContent = () => {
|
||||
switch (viewMode) {
|
||||
case 'request':
|
||||
return (
|
||||
<div className={styles.panel}>
|
||||
<div className={styles['panel-header']}>
|
||||
<h2>Original Request</h2>
|
||||
<p>The request you would send to LiteLLM /chat/completions endpoint.</p>
|
||||
</div>
|
||||
<textarea
|
||||
className={styles['code-input']}
|
||||
value={request}
|
||||
onChange={(e) => setRequest(e.target.value)}
|
||||
spellCheck={false}
|
||||
/>
|
||||
<div className={styles['panel-footer']}>
|
||||
<button className={styles['transform-button']} onClick={handleTransform}>
|
||||
Transform →
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
case 'transformed':
|
||||
return (
|
||||
<div className={styles.panel}>
|
||||
<div className={styles['panel-header']}>
|
||||
<h2>Transformed Request</h2>
|
||||
<p>How LiteLLM transforms your request for the specified provider.</p>
|
||||
<p className={styles.note}>Note: Sensitive headers are not shown.</p>
|
||||
</div>
|
||||
<div className={styles['code-output-container']}>
|
||||
<pre className={styles['code-output']}>{transformedRequest}</pre>
|
||||
<button className={styles['copy-button']} onClick={handleCopy}>
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
default:
|
||||
return (
|
||||
<>
|
||||
<div className={styles.panel}>
|
||||
<div className={styles['panel-header']}>
|
||||
<h2>Original Request</h2>
|
||||
<p>The request you would send to LiteLLM /chat/completions endpoint.</p>
|
||||
</div>
|
||||
<textarea
|
||||
className={styles['code-input']}
|
||||
value={request}
|
||||
onChange={(e) => setRequest(e.target.value)}
|
||||
spellCheck={false}
|
||||
/>
|
||||
<div className={styles['panel-footer']}>
|
||||
<button className={styles['transform-button']} onClick={handleTransform}>
|
||||
Transform →
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
<div className={styles.panel}>
|
||||
<div className={styles['panel-header']}>
|
||||
<h2>Transformed Request</h2>
|
||||
<p>How LiteLLM transforms your request for the specified provider.</p>
|
||||
<p className={styles.note}>Note: Sensitive headers are not shown.</p>
|
||||
</div>
|
||||
<div className={styles['code-output-container']}>
|
||||
<pre className={styles['code-output']}>{transformedRequest}</pre>
|
||||
<button className={styles['copy-button']} onClick={handleCopy}>
|
||||
Copy
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
</>
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
return (
|
||||
<div className={styles['transform-playground']}>
|
||||
<div className={styles['view-toggle']}>
|
||||
<button
|
||||
className={viewMode === 'split' ? styles.active : ''}
|
||||
onClick={() => setViewMode('split')}
|
||||
>
|
||||
Split View
|
||||
</button>
|
||||
<button
|
||||
className={viewMode === 'request' ? styles.active : ''}
|
||||
onClick={() => setViewMode('request')}
|
||||
>
|
||||
Request
|
||||
</button>
|
||||
<button
|
||||
className={viewMode === 'transformed' ? styles.active : ''}
|
||||
onClick={() => setViewMode('transformed')}
|
||||
>
|
||||
Transformed
|
||||
</button>
|
||||
</div>
|
||||
<div className={styles['playground-container']}>
|
||||
{renderContent()}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
export default TransformRequestPlayground;
|
|
@ -110,5 +110,8 @@ def get_litellm_params(
|
|||
"azure_password": kwargs.get("azure_password"),
|
||||
"max_retries": max_retries,
|
||||
"timeout": kwargs.get("timeout"),
|
||||
"bucket_name": kwargs.get("bucket_name"),
|
||||
"vertex_credentials": kwargs.get("vertex_credentials"),
|
||||
"vertex_project": kwargs.get("vertex_project"),
|
||||
}
|
||||
return litellm_params
|
||||
|
|
|
@ -90,35 +90,45 @@ def _generic_cost_per_character(
|
|||
return prompt_cost, completion_cost
|
||||
|
||||
|
||||
def _get_prompt_token_base_cost(model_info: ModelInfo, usage: Usage) -> float:
|
||||
def _get_token_base_cost(model_info: ModelInfo, usage: Usage) -> Tuple[float, float]:
|
||||
"""
|
||||
Return prompt cost for a given model and usage.
|
||||
|
||||
If input_tokens > 128k and `input_cost_per_token_above_128k_tokens` is set, then we use the `input_cost_per_token_above_128k_tokens` field.
|
||||
If input_tokens > threshold and `input_cost_per_token_above_[x]k_tokens` or `input_cost_per_token_above_[x]_tokens` is set,
|
||||
then we use the corresponding threshold cost.
|
||||
"""
|
||||
input_cost_per_token_above_128k_tokens = model_info.get(
|
||||
"input_cost_per_token_above_128k_tokens"
|
||||
)
|
||||
if _is_above_128k(usage.prompt_tokens) and input_cost_per_token_above_128k_tokens:
|
||||
return input_cost_per_token_above_128k_tokens
|
||||
return model_info["input_cost_per_token"]
|
||||
prompt_base_cost = model_info["input_cost_per_token"]
|
||||
completion_base_cost = model_info["output_cost_per_token"]
|
||||
|
||||
## CHECK IF ABOVE THRESHOLD
|
||||
threshold: Optional[float] = None
|
||||
for key, value in sorted(model_info.items(), reverse=True):
|
||||
if key.startswith("input_cost_per_token_above_") and value is not None:
|
||||
try:
|
||||
# Handle both formats: _above_128k_tokens and _above_128_tokens
|
||||
threshold_str = key.split("_above_")[1].split("_tokens")[0]
|
||||
threshold = float(threshold_str.replace("k", "")) * (
|
||||
1000 if "k" in threshold_str else 1
|
||||
)
|
||||
if usage.prompt_tokens > threshold:
|
||||
prompt_base_cost = cast(
|
||||
float,
|
||||
model_info.get(key, prompt_base_cost),
|
||||
)
|
||||
completion_base_cost = cast(
|
||||
float,
|
||||
model_info.get(
|
||||
f"output_cost_per_token_above_{threshold_str}_tokens",
|
||||
completion_base_cost,
|
||||
),
|
||||
)
|
||||
break
|
||||
except (IndexError, ValueError):
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
def _get_completion_token_base_cost(model_info: ModelInfo, usage: Usage) -> float:
|
||||
"""
|
||||
Return prompt cost for a given model and usage.
|
||||
|
||||
If input_tokens > 128k and `input_cost_per_token_above_128k_tokens` is set, then we use the `input_cost_per_token_above_128k_tokens` field.
|
||||
"""
|
||||
output_cost_per_token_above_128k_tokens = model_info.get(
|
||||
"output_cost_per_token_above_128k_tokens"
|
||||
)
|
||||
if (
|
||||
_is_above_128k(usage.completion_tokens)
|
||||
and output_cost_per_token_above_128k_tokens
|
||||
):
|
||||
return output_cost_per_token_above_128k_tokens
|
||||
return model_info["output_cost_per_token"]
|
||||
return prompt_base_cost, completion_base_cost
|
||||
|
||||
|
||||
def calculate_cost_component(
|
||||
|
@ -215,7 +225,9 @@ def generic_cost_per_token(
|
|||
if text_tokens == 0:
|
||||
text_tokens = usage.prompt_tokens - cache_hit_tokens - audio_tokens
|
||||
|
||||
prompt_base_cost = _get_prompt_token_base_cost(model_info=model_info, usage=usage)
|
||||
prompt_base_cost, completion_base_cost = _get_token_base_cost(
|
||||
model_info=model_info, usage=usage
|
||||
)
|
||||
|
||||
prompt_cost = float(text_tokens) * prompt_base_cost
|
||||
|
||||
|
@ -253,9 +265,6 @@ def generic_cost_per_token(
|
|||
)
|
||||
|
||||
## CALCULATE OUTPUT COST
|
||||
completion_base_cost = _get_completion_token_base_cost(
|
||||
model_info=model_info, usage=usage
|
||||
)
|
||||
text_tokens = usage.completion_tokens
|
||||
audio_tokens = 0
|
||||
if usage.completion_tokens_details is not None:
|
||||
|
|
|
@ -2,7 +2,10 @@
|
|||
Common utility functions used for translating messages across providers
|
||||
"""
|
||||
|
||||
from typing import Dict, List, Literal, Optional, Union, cast
|
||||
import io
|
||||
import mimetypes
|
||||
from os import PathLike
|
||||
from typing import Dict, List, Literal, Mapping, Optional, Union, cast
|
||||
|
||||
from litellm.types.llms.openai import (
|
||||
AllMessageValues,
|
||||
|
@ -10,7 +13,13 @@ from litellm.types.llms.openai import (
|
|||
ChatCompletionFileObject,
|
||||
ChatCompletionUserMessage,
|
||||
)
|
||||
from litellm.types.utils import Choices, ModelResponse, StreamingChoices
|
||||
from litellm.types.utils import (
|
||||
Choices,
|
||||
ExtractedFileData,
|
||||
FileTypes,
|
||||
ModelResponse,
|
||||
StreamingChoices,
|
||||
)
|
||||
|
||||
DEFAULT_USER_CONTINUE_MESSAGE = ChatCompletionUserMessage(
|
||||
content="Please continue.", role="user"
|
||||
|
@ -350,6 +359,68 @@ def update_messages_with_model_file_ids(
|
|||
return messages
|
||||
|
||||
|
||||
def extract_file_data(file_data: FileTypes) -> ExtractedFileData:
|
||||
"""
|
||||
Extracts and processes file data from various input formats.
|
||||
|
||||
Args:
|
||||
file_data: Can be a tuple of (filename, content, [content_type], [headers]) or direct file content
|
||||
|
||||
Returns:
|
||||
ExtractedFileData containing:
|
||||
- filename: Name of the file if provided
|
||||
- content: The file content in bytes
|
||||
- content_type: MIME type of the file
|
||||
- headers: Any additional headers
|
||||
"""
|
||||
# Parse the file_data based on its type
|
||||
filename = None
|
||||
file_content = None
|
||||
content_type = None
|
||||
file_headers: Mapping[str, str] = {}
|
||||
|
||||
if isinstance(file_data, tuple):
|
||||
if len(file_data) == 2:
|
||||
filename, file_content = file_data
|
||||
elif len(file_data) == 3:
|
||||
filename, file_content, content_type = file_data
|
||||
elif len(file_data) == 4:
|
||||
filename, file_content, content_type, file_headers = file_data
|
||||
else:
|
||||
file_content = file_data
|
||||
# Convert content to bytes
|
||||
if isinstance(file_content, (str, PathLike)):
|
||||
# If it's a path, open and read the file
|
||||
with open(file_content, "rb") as f:
|
||||
content = f.read()
|
||||
elif isinstance(file_content, io.IOBase):
|
||||
# If it's a file-like object
|
||||
content = file_content.read()
|
||||
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
# Reset file pointer to beginning
|
||||
file_content.seek(0)
|
||||
elif isinstance(file_content, bytes):
|
||||
content = file_content
|
||||
else:
|
||||
raise ValueError(f"Unsupported file content type: {type(file_content)}")
|
||||
|
||||
# Use provided content type or guess based on filename
|
||||
if not content_type:
|
||||
content_type = (
|
||||
mimetypes.guess_type(filename)[0]
|
||||
if filename
|
||||
else "application/octet-stream"
|
||||
)
|
||||
|
||||
return ExtractedFileData(
|
||||
filename=filename,
|
||||
content=content,
|
||||
content_type=content_type,
|
||||
headers=file_headers,
|
||||
)
|
||||
|
||||
def unpack_defs(schema, defs):
|
||||
properties = schema.get("properties", None)
|
||||
if properties is None:
|
||||
|
@ -381,3 +452,4 @@ def unpack_defs(schema, defs):
|
|||
unpack_defs(ref, defs)
|
||||
value["items"] = ref
|
||||
continue
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ class AiohttpOpenAIChatConfig(OpenAILikeChatConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -301,6 +301,7 @@ class AnthropicChatCompletion(BaseLLM):
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params={**optional_params, "is_vertex_request": is_vertex_request},
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
config = ProviderConfigManager.get_provider_chat_config(
|
||||
|
|
|
@ -876,6 +876,7 @@ class AnthropicConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> Dict:
|
||||
|
|
|
@ -87,6 +87,7 @@ class AnthropicTextConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -293,6 +293,7 @@ class AzureOpenAIConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -39,6 +39,7 @@ class AzureAIStudioConfig(OpenAIConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -262,6 +262,7 @@ class BaseConfig(ABC):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from abc import abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, List, Optional
|
||||
from typing import TYPE_CHECKING, Any, List, Optional, Union
|
||||
|
||||
import httpx
|
||||
|
||||
|
@ -33,23 +33,22 @@ class BaseFilesConfig(BaseConfig):
|
|||
) -> List[OpenAICreateFileRequestOptionalParams]:
|
||||
pass
|
||||
|
||||
def get_complete_url(
|
||||
def get_complete_file_url(
|
||||
self,
|
||||
api_base: Optional[str],
|
||||
api_key: Optional[str],
|
||||
model: str,
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
stream: Optional[bool] = None,
|
||||
) -> str:
|
||||
"""
|
||||
OPTIONAL
|
||||
|
||||
Get the complete url for the request
|
||||
|
||||
Some providers need `model` in `api_base`
|
||||
"""
|
||||
return api_base or ""
|
||||
data: CreateFileRequest,
|
||||
):
|
||||
return self.get_complete_url(
|
||||
api_base=api_base,
|
||||
api_key=api_key,
|
||||
model=model,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def transform_create_file_request(
|
||||
|
@ -58,7 +57,7 @@ class BaseFilesConfig(BaseConfig):
|
|||
create_file_data: CreateFileRequest,
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
) -> dict:
|
||||
) -> Union[dict, str, bytes]:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
|
|
|
@ -65,6 +65,7 @@ class BaseImageVariationConfig(BaseConfig, ABC):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -833,6 +833,7 @@ class AmazonConverseConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -442,6 +442,7 @@ class AmazonInvokeConfig(BaseConfig, BaseAWSLLM):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -118,6 +118,7 @@ class ClarifaiConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -60,6 +60,7 @@ class CloudflareChatConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -118,6 +118,7 @@ class CohereChatConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -101,6 +101,7 @@ class CohereTextConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -229,6 +229,7 @@ class BaseLLMAIOHTTPHandler:
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
api_base=api_base,
|
||||
)
|
||||
|
||||
|
@ -498,6 +499,7 @@ class BaseLLMAIOHTTPHandler:
|
|||
model=model,
|
||||
messages=[{"role": "user", "content": "test"}],
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
api_base=api_base,
|
||||
)
|
||||
|
||||
|
|
|
@ -192,7 +192,7 @@ class AsyncHTTPHandler:
|
|||
async def post(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None, # type: ignore
|
||||
data: Optional[Union[dict, str, bytes]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
|
@ -427,7 +427,7 @@ class AsyncHTTPHandler:
|
|||
self,
|
||||
url: str,
|
||||
client: httpx.AsyncClient,
|
||||
data: Optional[Union[dict, str]] = None, # type: ignore
|
||||
data: Optional[Union[dict, str, bytes]] = None, # type: ignore
|
||||
json: Optional[dict] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
|
@ -527,7 +527,7 @@ class HTTPHandler:
|
|||
def post(
|
||||
self,
|
||||
url: str,
|
||||
data: Optional[Union[dict, str]] = None,
|
||||
data: Optional[Union[dict, str, bytes]] = None,
|
||||
json: Optional[Union[dict, str, List]] = None,
|
||||
params: Optional[dict] = None,
|
||||
headers: Optional[dict] = None,
|
||||
|
@ -573,7 +573,6 @@ class HTTPHandler:
|
|||
setattr(e, "text", error_text)
|
||||
|
||||
setattr(e, "status_code", e.response.status_code)
|
||||
|
||||
raise e
|
||||
except Exception as e:
|
||||
raise e
|
||||
|
|
|
@ -247,6 +247,7 @@ class BaseLLMHTTPHandler:
|
|||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
api_base=api_base,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
api_base = provider_config.get_complete_url(
|
||||
|
@ -625,6 +626,7 @@ class BaseLLMHTTPHandler:
|
|||
model=model,
|
||||
messages=[],
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
api_base = provider_config.get_complete_url(
|
||||
|
@ -896,6 +898,7 @@ class BaseLLMHTTPHandler:
|
|||
model=model,
|
||||
messages=[],
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
if client is None or not isinstance(client, HTTPHandler):
|
||||
|
@ -1228,15 +1231,19 @@ class BaseLLMHTTPHandler:
|
|||
model="",
|
||||
messages=[],
|
||||
optional_params={},
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
api_base = provider_config.get_complete_url(
|
||||
api_base = provider_config.get_complete_file_url(
|
||||
api_base=api_base,
|
||||
api_key=api_key,
|
||||
model="",
|
||||
optional_params={},
|
||||
litellm_params=litellm_params,
|
||||
data=create_file_data,
|
||||
)
|
||||
if api_base is None:
|
||||
raise ValueError("api_base is required for create_file")
|
||||
|
||||
# Get the transformed request data for both steps
|
||||
transformed_request = provider_config.transform_create_file_request(
|
||||
|
@ -1263,48 +1270,57 @@ class BaseLLMHTTPHandler:
|
|||
else:
|
||||
sync_httpx_client = client
|
||||
|
||||
try:
|
||||
# Step 1: Initial request to get upload URL
|
||||
initial_response = sync_httpx_client.post(
|
||||
url=api_base,
|
||||
headers={
|
||||
**headers,
|
||||
**transformed_request["initial_request"]["headers"],
|
||||
},
|
||||
data=json.dumps(transformed_request["initial_request"]["data"]),
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# Extract upload URL from response headers
|
||||
upload_url = initial_response.headers.get("X-Goog-Upload-URL")
|
||||
|
||||
if not upload_url:
|
||||
raise ValueError("Failed to get upload URL from initial request")
|
||||
|
||||
# Step 2: Upload the actual file
|
||||
if isinstance(transformed_request, str) or isinstance(
|
||||
transformed_request, bytes
|
||||
):
|
||||
upload_response = sync_httpx_client.post(
|
||||
url=upload_url,
|
||||
headers=transformed_request["upload_request"]["headers"],
|
||||
data=transformed_request["upload_request"]["data"],
|
||||
url=api_base,
|
||||
headers=headers,
|
||||
data=transformed_request,
|
||||
timeout=timeout,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
# Step 1: Initial request to get upload URL
|
||||
initial_response = sync_httpx_client.post(
|
||||
url=api_base,
|
||||
headers={
|
||||
**headers,
|
||||
**transformed_request["initial_request"]["headers"],
|
||||
},
|
||||
data=json.dumps(transformed_request["initial_request"]["data"]),
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return provider_config.transform_create_file_response(
|
||||
model=None,
|
||||
raw_response=upload_response,
|
||||
logging_obj=logging_obj,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
# Extract upload URL from response headers
|
||||
upload_url = initial_response.headers.get("X-Goog-Upload-URL")
|
||||
|
||||
except Exception as e:
|
||||
raise self._handle_error(
|
||||
e=e,
|
||||
provider_config=provider_config,
|
||||
)
|
||||
if not upload_url:
|
||||
raise ValueError("Failed to get upload URL from initial request")
|
||||
|
||||
# Step 2: Upload the actual file
|
||||
upload_response = sync_httpx_client.post(
|
||||
url=upload_url,
|
||||
headers=transformed_request["upload_request"]["headers"],
|
||||
data=transformed_request["upload_request"]["data"],
|
||||
timeout=timeout,
|
||||
)
|
||||
except Exception as e:
|
||||
raise self._handle_error(
|
||||
e=e,
|
||||
provider_config=provider_config,
|
||||
)
|
||||
|
||||
return provider_config.transform_create_file_response(
|
||||
model=None,
|
||||
raw_response=upload_response,
|
||||
logging_obj=logging_obj,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
async def async_create_file(
|
||||
self,
|
||||
transformed_request: dict,
|
||||
transformed_request: Union[bytes, str, dict],
|
||||
litellm_params: dict,
|
||||
provider_config: BaseFilesConfig,
|
||||
headers: dict,
|
||||
|
@ -1323,45 +1339,54 @@ class BaseLLMHTTPHandler:
|
|||
else:
|
||||
async_httpx_client = client
|
||||
|
||||
try:
|
||||
# Step 1: Initial request to get upload URL
|
||||
initial_response = await async_httpx_client.post(
|
||||
url=api_base,
|
||||
headers={
|
||||
**headers,
|
||||
**transformed_request["initial_request"]["headers"],
|
||||
},
|
||||
data=json.dumps(transformed_request["initial_request"]["data"]),
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
# Extract upload URL from response headers
|
||||
upload_url = initial_response.headers.get("X-Goog-Upload-URL")
|
||||
|
||||
if not upload_url:
|
||||
raise ValueError("Failed to get upload URL from initial request")
|
||||
|
||||
# Step 2: Upload the actual file
|
||||
if isinstance(transformed_request, str) or isinstance(
|
||||
transformed_request, bytes
|
||||
):
|
||||
upload_response = await async_httpx_client.post(
|
||||
url=upload_url,
|
||||
headers=transformed_request["upload_request"]["headers"],
|
||||
data=transformed_request["upload_request"]["data"],
|
||||
url=api_base,
|
||||
headers=headers,
|
||||
data=transformed_request,
|
||||
timeout=timeout,
|
||||
)
|
||||
else:
|
||||
try:
|
||||
# Step 1: Initial request to get upload URL
|
||||
initial_response = await async_httpx_client.post(
|
||||
url=api_base,
|
||||
headers={
|
||||
**headers,
|
||||
**transformed_request["initial_request"]["headers"],
|
||||
},
|
||||
data=json.dumps(transformed_request["initial_request"]["data"]),
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
return provider_config.transform_create_file_response(
|
||||
model=None,
|
||||
raw_response=upload_response,
|
||||
logging_obj=logging_obj,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
# Extract upload URL from response headers
|
||||
upload_url = initial_response.headers.get("X-Goog-Upload-URL")
|
||||
|
||||
except Exception as e:
|
||||
verbose_logger.exception(f"Error creating file: {e}")
|
||||
raise self._handle_error(
|
||||
e=e,
|
||||
provider_config=provider_config,
|
||||
)
|
||||
if not upload_url:
|
||||
raise ValueError("Failed to get upload URL from initial request")
|
||||
|
||||
# Step 2: Upload the actual file
|
||||
upload_response = await async_httpx_client.post(
|
||||
url=upload_url,
|
||||
headers=transformed_request["upload_request"]["headers"],
|
||||
data=transformed_request["upload_request"]["data"],
|
||||
timeout=timeout,
|
||||
)
|
||||
except Exception as e:
|
||||
verbose_logger.exception(f"Error creating file: {e}")
|
||||
raise self._handle_error(
|
||||
e=e,
|
||||
provider_config=provider_config,
|
||||
)
|
||||
|
||||
return provider_config.transform_create_file_response(
|
||||
model=None,
|
||||
raw_response=upload_response,
|
||||
logging_obj=logging_obj,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
def list_files(self):
|
||||
"""
|
||||
|
|
|
@ -116,6 +116,7 @@ class DatabricksConfig(DatabricksBase, OpenAILikeChatConfig, AnthropicConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -171,6 +171,7 @@ class DeepgramAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -41,6 +41,7 @@ class FireworksAIMixin:
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -20,6 +20,7 @@ class GeminiModelInfo(BaseLLMModelInfo):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -4,11 +4,12 @@ Supports writing files to Google AI Studio Files API.
|
|||
For vertex ai, check out the vertex_ai/files/handler.py file.
|
||||
"""
|
||||
import time
|
||||
from typing import List, Mapping, Optional
|
||||
from typing import List, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from litellm._logging import verbose_logger
|
||||
from litellm.litellm_core_utils.prompt_templates.common_utils import extract_file_data
|
||||
from litellm.llms.base_llm.files.transformation import (
|
||||
BaseFilesConfig,
|
||||
LiteLLMLoggingObj,
|
||||
|
@ -91,66 +92,28 @@ class GoogleAIStudioFilesHandler(GeminiModelInfo, BaseFilesConfig):
|
|||
if file_data is None:
|
||||
raise ValueError("File data is required")
|
||||
|
||||
# Parse the file_data based on its type
|
||||
filename = None
|
||||
file_content = None
|
||||
content_type = None
|
||||
file_headers: Mapping[str, str] = {}
|
||||
|
||||
if isinstance(file_data, tuple):
|
||||
if len(file_data) == 2:
|
||||
filename, file_content = file_data
|
||||
elif len(file_data) == 3:
|
||||
filename, file_content, content_type = file_data
|
||||
elif len(file_data) == 4:
|
||||
filename, file_content, content_type, file_headers = file_data
|
||||
else:
|
||||
file_content = file_data
|
||||
|
||||
# Handle the file content based on its type
|
||||
import io
|
||||
from os import PathLike
|
||||
|
||||
# Convert content to bytes
|
||||
if isinstance(file_content, (str, PathLike)):
|
||||
# If it's a path, open and read the file
|
||||
with open(file_content, "rb") as f:
|
||||
content = f.read()
|
||||
elif isinstance(file_content, io.IOBase):
|
||||
# If it's a file-like object
|
||||
content = file_content.read()
|
||||
if isinstance(content, str):
|
||||
content = content.encode("utf-8")
|
||||
elif isinstance(file_content, bytes):
|
||||
content = file_content
|
||||
else:
|
||||
raise ValueError(f"Unsupported file content type: {type(file_content)}")
|
||||
# Use the common utility function to extract file data
|
||||
extracted_data = extract_file_data(file_data)
|
||||
|
||||
# Get file size
|
||||
file_size = len(content)
|
||||
|
||||
# Use provided content type or guess based on filename
|
||||
if not content_type:
|
||||
import mimetypes
|
||||
|
||||
content_type = (
|
||||
mimetypes.guess_type(filename)[0]
|
||||
if filename
|
||||
else "application/octet-stream"
|
||||
)
|
||||
file_size = len(extracted_data["content"])
|
||||
|
||||
# Step 1: Initial resumable upload request
|
||||
headers = {
|
||||
"X-Goog-Upload-Protocol": "resumable",
|
||||
"X-Goog-Upload-Command": "start",
|
||||
"X-Goog-Upload-Header-Content-Length": str(file_size),
|
||||
"X-Goog-Upload-Header-Content-Type": content_type,
|
||||
"X-Goog-Upload-Header-Content-Type": extracted_data["content_type"],
|
||||
"Content-Type": "application/json",
|
||||
}
|
||||
headers.update(file_headers) # Add any custom headers
|
||||
headers.update(extracted_data["headers"]) # Add any custom headers
|
||||
|
||||
# Initial metadata request body
|
||||
initial_data = {"file": {"display_name": filename or str(int(time.time()))}}
|
||||
initial_data = {
|
||||
"file": {
|
||||
"display_name": extracted_data["filename"] or str(int(time.time()))
|
||||
}
|
||||
}
|
||||
|
||||
# Step 2: Actual file upload data
|
||||
upload_headers = {
|
||||
|
@ -161,7 +124,10 @@ class GoogleAIStudioFilesHandler(GeminiModelInfo, BaseFilesConfig):
|
|||
|
||||
return {
|
||||
"initial_request": {"headers": headers, "data": initial_data},
|
||||
"upload_request": {"headers": upload_headers, "data": content},
|
||||
"upload_request": {
|
||||
"headers": upload_headers,
|
||||
"data": extracted_data["content"],
|
||||
},
|
||||
}
|
||||
|
||||
def transform_create_file_response(
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
import logging
|
||||
import os
|
||||
from typing import TYPE_CHECKING, Any, List, Optional, Union
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
||||
|
||||
import httpx
|
||||
|
||||
|
@ -18,7 +18,6 @@ from litellm.llms.base_llm.chat.transformation import BaseLLMException
|
|||
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
|
||||
from ..common_utils import HuggingFaceError, _fetch_inference_provider_mapping
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
BASE_URL = "https://router.huggingface.co"
|
||||
|
@ -34,7 +33,8 @@ class HuggingFaceChatConfig(OpenAIGPTConfig):
|
|||
headers: dict,
|
||||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
optional_params: Dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
@ -51,7 +51,9 @@ class HuggingFaceChatConfig(OpenAIGPTConfig):
|
|||
def get_error_class(
|
||||
self, error_message: str, status_code: int, headers: Union[dict, httpx.Headers]
|
||||
) -> BaseLLMException:
|
||||
return HuggingFaceError(status_code=status_code, message=error_message, headers=headers)
|
||||
return HuggingFaceError(
|
||||
status_code=status_code, message=error_message, headers=headers
|
||||
)
|
||||
|
||||
def get_base_url(self, model: str, base_url: Optional[str]) -> Optional[str]:
|
||||
"""
|
||||
|
@ -82,7 +84,9 @@ class HuggingFaceChatConfig(OpenAIGPTConfig):
|
|||
if api_base is not None:
|
||||
complete_url = api_base
|
||||
elif os.getenv("HF_API_BASE") or os.getenv("HUGGINGFACE_API_BASE"):
|
||||
complete_url = str(os.getenv("HF_API_BASE")) or str(os.getenv("HUGGINGFACE_API_BASE"))
|
||||
complete_url = str(os.getenv("HF_API_BASE")) or str(
|
||||
os.getenv("HUGGINGFACE_API_BASE")
|
||||
)
|
||||
elif model.startswith(("http://", "https://")):
|
||||
complete_url = model
|
||||
# 4. Default construction with provider
|
||||
|
@ -138,4 +142,8 @@ class HuggingFaceChatConfig(OpenAIGPTConfig):
|
|||
)
|
||||
mapped_model = provider_mapping["providerId"]
|
||||
messages = self._transform_messages(messages=messages, model=mapped_model)
|
||||
return dict(ChatCompletionRequest(model=mapped_model, messages=messages, **optional_params))
|
||||
return dict(
|
||||
ChatCompletionRequest(
|
||||
model=mapped_model, messages=messages, **optional_params
|
||||
)
|
||||
)
|
||||
|
|
|
@ -1,15 +1,6 @@
|
|||
import json
|
||||
import os
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Literal,
|
||||
Optional,
|
||||
Union,
|
||||
get_args,
|
||||
)
|
||||
from typing import Any, Callable, Dict, List, Literal, Optional, Union, get_args
|
||||
|
||||
import httpx
|
||||
|
||||
|
@ -35,8 +26,9 @@ hf_tasks_embeddings = Literal[ # pipeline tags + hf tei endpoints - https://hug
|
|||
]
|
||||
|
||||
|
||||
|
||||
def get_hf_task_embedding_for_model(model: str, task_type: Optional[str], api_base: str) -> Optional[str]:
|
||||
def get_hf_task_embedding_for_model(
|
||||
model: str, task_type: Optional[str], api_base: str
|
||||
) -> Optional[str]:
|
||||
if task_type is not None:
|
||||
if task_type in get_args(hf_tasks_embeddings):
|
||||
return task_type
|
||||
|
@ -57,7 +49,9 @@ def get_hf_task_embedding_for_model(model: str, task_type: Optional[str], api_ba
|
|||
return pipeline_tag
|
||||
|
||||
|
||||
async def async_get_hf_task_embedding_for_model(model: str, task_type: Optional[str], api_base: str) -> Optional[str]:
|
||||
async def async_get_hf_task_embedding_for_model(
|
||||
model: str, task_type: Optional[str], api_base: str
|
||||
) -> Optional[str]:
|
||||
if task_type is not None:
|
||||
if task_type in get_args(hf_tasks_embeddings):
|
||||
return task_type
|
||||
|
@ -116,7 +110,9 @@ class HuggingFaceEmbedding(BaseLLM):
|
|||
input: List,
|
||||
optional_params: dict,
|
||||
) -> dict:
|
||||
hf_task = await async_get_hf_task_embedding_for_model(model=model, task_type=task_type, api_base=HF_HUB_URL)
|
||||
hf_task = await async_get_hf_task_embedding_for_model(
|
||||
model=model, task_type=task_type, api_base=HF_HUB_URL
|
||||
)
|
||||
|
||||
data = self._transform_input_on_pipeline_tag(input=input, pipeline_tag=hf_task)
|
||||
|
||||
|
@ -173,7 +169,9 @@ class HuggingFaceEmbedding(BaseLLM):
|
|||
task_type = optional_params.pop("input_type", None)
|
||||
|
||||
if call_type == "sync":
|
||||
hf_task = get_hf_task_embedding_for_model(model=model, task_type=task_type, api_base=HF_HUB_URL)
|
||||
hf_task = get_hf_task_embedding_for_model(
|
||||
model=model, task_type=task_type, api_base=HF_HUB_URL
|
||||
)
|
||||
elif call_type == "async":
|
||||
return self._async_transform_input(
|
||||
model=model, task_type=task_type, embed_url=embed_url, input=input
|
||||
|
@ -325,6 +323,7 @@ class HuggingFaceEmbedding(BaseLLM):
|
|||
input: list,
|
||||
model_response: EmbeddingResponse,
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
encoding: Callable,
|
||||
api_key: Optional[str] = None,
|
||||
|
@ -341,9 +340,12 @@ class HuggingFaceEmbedding(BaseLLM):
|
|||
model=model,
|
||||
optional_params=optional_params,
|
||||
messages=[],
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
task_type = optional_params.pop("input_type", None)
|
||||
task = get_hf_task_embedding_for_model(model=model, task_type=task_type, api_base=HF_HUB_URL)
|
||||
task = get_hf_task_embedding_for_model(
|
||||
model=model, task_type=task_type, api_base=HF_HUB_URL
|
||||
)
|
||||
# print_verbose(f"{model}, {task}")
|
||||
embed_url = ""
|
||||
if "https" in model:
|
||||
|
@ -355,7 +357,9 @@ class HuggingFaceEmbedding(BaseLLM):
|
|||
elif "HUGGINGFACE_API_BASE" in os.environ:
|
||||
embed_url = os.getenv("HUGGINGFACE_API_BASE", "")
|
||||
else:
|
||||
embed_url = f"https://router.huggingface.co/hf-inference/pipeline/{task}/{model}"
|
||||
embed_url = (
|
||||
f"https://router.huggingface.co/hf-inference/pipeline/{task}/{model}"
|
||||
)
|
||||
|
||||
## ROUTING ##
|
||||
if aembedding is True:
|
||||
|
|
|
@ -355,6 +355,7 @@ class HuggingFaceEmbeddingConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: Dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> Dict:
|
||||
|
|
|
@ -36,6 +36,7 @@ def completion(
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
## Load Config
|
||||
|
|
|
@ -93,6 +93,7 @@ class NLPCloudConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -353,6 +353,7 @@ class OllamaConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -32,6 +32,7 @@ def completion(
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
if "https" in model:
|
||||
completion_url = model
|
||||
|
@ -123,6 +124,7 @@ def embedding(
|
|||
model=model,
|
||||
messages=[],
|
||||
optional_params=optional_params,
|
||||
litellm_params={},
|
||||
)
|
||||
response = litellm.module_level_client.post(
|
||||
embeddings_url, headers=headers, json=data
|
||||
|
|
|
@ -88,6 +88,7 @@ class OobaboogaConfig(OpenAIGPTConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -321,6 +321,7 @@ class OpenAIGPTConfig(BaseLLMModelInfo, BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -286,6 +286,7 @@ class OpenAIConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -53,6 +53,7 @@ class OpenAIWhisperAudioTranscriptionConfig(BaseAudioTranscriptionConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -131,6 +131,7 @@ class PetalsConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -228,10 +228,10 @@ class PredibaseChatCompletion:
|
|||
api_key: str,
|
||||
logging_obj,
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
tenant_id: str,
|
||||
timeout: Union[float, httpx.Timeout],
|
||||
acompletion=None,
|
||||
litellm_params=None,
|
||||
logger_fn=None,
|
||||
headers: dict = {},
|
||||
) -> Union[ModelResponse, CustomStreamWrapper]:
|
||||
|
@ -241,6 +241,7 @@ class PredibaseChatCompletion:
|
|||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
model=model,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
completion_url = ""
|
||||
input_text = ""
|
||||
|
|
|
@ -164,6 +164,7 @@ class PredibaseConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -141,6 +141,7 @@ def completion(
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
# Start a prediction and get the prediction URL
|
||||
version_id = replicate_config.model_to_version_id(model)
|
||||
|
|
|
@ -312,6 +312,7 @@ class ReplicateConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -96,6 +96,7 @@ class SagemakerLLM(BaseAWSLLM):
|
|||
model: str,
|
||||
data: dict,
|
||||
messages: List[AllMessageValues],
|
||||
litellm_params: dict,
|
||||
optional_params: dict,
|
||||
aws_region_name: str,
|
||||
extra_headers: Optional[dict] = None,
|
||||
|
@ -122,6 +123,7 @@ class SagemakerLLM(BaseAWSLLM):
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
request = AWSRequest(
|
||||
method="POST", url=api_base, data=encoded_data, headers=headers
|
||||
|
@ -198,6 +200,7 @@ class SagemakerLLM(BaseAWSLLM):
|
|||
data=data,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
credentials=credentials,
|
||||
aws_region_name=aws_region_name,
|
||||
)
|
||||
|
@ -274,6 +277,7 @@ class SagemakerLLM(BaseAWSLLM):
|
|||
"model": model,
|
||||
"data": _data,
|
||||
"optional_params": optional_params,
|
||||
"litellm_params": litellm_params,
|
||||
"credentials": credentials,
|
||||
"aws_region_name": aws_region_name,
|
||||
"messages": messages,
|
||||
|
@ -426,6 +430,7 @@ class SagemakerLLM(BaseAWSLLM):
|
|||
"model": model,
|
||||
"data": data,
|
||||
"optional_params": optional_params,
|
||||
"litellm_params": litellm_params,
|
||||
"credentials": credentials,
|
||||
"aws_region_name": aws_region_name,
|
||||
"messages": messages,
|
||||
|
@ -496,6 +501,7 @@ class SagemakerLLM(BaseAWSLLM):
|
|||
"model": model,
|
||||
"data": data,
|
||||
"optional_params": optional_params,
|
||||
"litellm_params": litellm_params,
|
||||
"credentials": credentials,
|
||||
"aws_region_name": aws_region_name,
|
||||
"messages": messages,
|
||||
|
|
|
@ -263,6 +263,7 @@ class SagemakerConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -92,6 +92,7 @@ class SnowflakeConfig(OpenAIGPTConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -37,6 +37,7 @@ class TopazImageVariationConfig(BaseImageVariationConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -48,6 +48,7 @@ class TritonConfig(BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: Dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> Dict:
|
||||
|
|
|
@ -42,6 +42,7 @@ class TritonEmbeddingConfig(BaseEmbeddingConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import asyncio
|
||||
from typing import Any, Coroutine, Optional, Union
|
||||
|
||||
import httpx
|
||||
|
@ -11,9 +12,9 @@ from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
|||
from litellm.types.llms.openai import CreateFileRequest, OpenAIFileObject
|
||||
from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES
|
||||
|
||||
from .transformation import VertexAIFilesTransformation
|
||||
from .transformation import VertexAIJsonlFilesTransformation
|
||||
|
||||
vertex_ai_files_transformation = VertexAIFilesTransformation()
|
||||
vertex_ai_files_transformation = VertexAIJsonlFilesTransformation()
|
||||
|
||||
|
||||
class VertexAIFilesHandler(GCSBucketBase):
|
||||
|
@ -92,5 +93,15 @@ class VertexAIFilesHandler(GCSBucketBase):
|
|||
timeout=timeout,
|
||||
max_retries=max_retries,
|
||||
)
|
||||
|
||||
return None # type: ignore
|
||||
else:
|
||||
return asyncio.run(
|
||||
self.async_create_file(
|
||||
create_file_data=create_file_data,
|
||||
api_base=api_base,
|
||||
vertex_credentials=vertex_credentials,
|
||||
vertex_project=vertex_project,
|
||||
vertex_location=vertex_location,
|
||||
timeout=timeout,
|
||||
max_retries=max_retries,
|
||||
)
|
||||
)
|
||||
|
|
|
@ -1,7 +1,17 @@
|
|||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
|
||||
from httpx import Headers, Response
|
||||
|
||||
from litellm.litellm_core_utils.prompt_templates.common_utils import extract_file_data
|
||||
from litellm.llms.base_llm.chat.transformation import BaseLLMException
|
||||
from litellm.llms.base_llm.files.transformation import (
|
||||
BaseFilesConfig,
|
||||
LiteLLMLoggingObj,
|
||||
)
|
||||
from litellm.llms.vertex_ai.common_utils import (
|
||||
_convert_vertex_datetime_to_openai_datetime,
|
||||
)
|
||||
|
@ -10,14 +20,317 @@ from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
|||
VertexGeminiConfig,
|
||||
)
|
||||
from litellm.types.llms.openai import (
|
||||
AllMessageValues,
|
||||
CreateFileRequest,
|
||||
FileTypes,
|
||||
OpenAICreateFileRequestOptionalParams,
|
||||
OpenAIFileObject,
|
||||
PathLike,
|
||||
)
|
||||
from litellm.types.llms.vertex_ai import GcsBucketResponse
|
||||
from litellm.types.utils import ExtractedFileData, LlmProviders
|
||||
|
||||
from ..common_utils import VertexAIError
|
||||
from ..vertex_llm_base import VertexBase
|
||||
|
||||
|
||||
class VertexAIFilesTransformation(VertexGeminiConfig):
|
||||
class VertexAIFilesConfig(VertexBase, BaseFilesConfig):
|
||||
"""
|
||||
Config for VertexAI Files
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.jsonl_transformation = VertexAIJsonlFilesTransformation()
|
||||
super().__init__()
|
||||
|
||||
@property
|
||||
def custom_llm_provider(self) -> LlmProviders:
|
||||
return LlmProviders.VERTEX_AI
|
||||
|
||||
def validate_environment(
|
||||
self,
|
||||
headers: dict,
|
||||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
if not api_key:
|
||||
api_key, _ = self.get_access_token(
|
||||
credentials=litellm_params.get("vertex_credentials"),
|
||||
project_id=litellm_params.get("vertex_project"),
|
||||
)
|
||||
if not api_key:
|
||||
raise ValueError("api_key is required")
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
return headers
|
||||
|
||||
def _get_content_from_openai_file(self, openai_file_content: FileTypes) -> str:
|
||||
"""
|
||||
Helper to extract content from various OpenAI file types and return as string.
|
||||
|
||||
Handles:
|
||||
- Direct content (str, bytes, IO[bytes])
|
||||
- Tuple formats: (filename, content, [content_type], [headers])
|
||||
- PathLike objects
|
||||
"""
|
||||
content: Union[str, bytes] = b""
|
||||
# Extract file content from tuple if necessary
|
||||
if isinstance(openai_file_content, tuple):
|
||||
# Take the second element which is always the file content
|
||||
file_content = openai_file_content[1]
|
||||
else:
|
||||
file_content = openai_file_content
|
||||
|
||||
# Handle different file content types
|
||||
if isinstance(file_content, str):
|
||||
# String content can be used directly
|
||||
content = file_content
|
||||
elif isinstance(file_content, bytes):
|
||||
# Bytes content can be decoded
|
||||
content = file_content
|
||||
elif isinstance(file_content, PathLike): # PathLike
|
||||
with open(str(file_content), "rb") as f:
|
||||
content = f.read()
|
||||
elif hasattr(file_content, "read"): # IO[bytes]
|
||||
# File-like objects need to be read
|
||||
content = file_content.read()
|
||||
|
||||
# Ensure content is string
|
||||
if isinstance(content, bytes):
|
||||
content = content.decode("utf-8")
|
||||
|
||||
return content
|
||||
|
||||
def _get_gcs_object_name_from_batch_jsonl(
|
||||
self,
|
||||
openai_jsonl_content: List[Dict[str, Any]],
|
||||
) -> str:
|
||||
"""
|
||||
Gets a unique GCS object name for the VertexAI batch prediction job
|
||||
|
||||
named as: litellm-vertex-{model}-{uuid}
|
||||
"""
|
||||
_model = openai_jsonl_content[0].get("body", {}).get("model", "")
|
||||
if "publishers/google/models" not in _model:
|
||||
_model = f"publishers/google/models/{_model}"
|
||||
object_name = f"litellm-vertex-files/{_model}/{uuid.uuid4()}"
|
||||
return object_name
|
||||
|
||||
def get_object_name(
|
||||
self, extracted_file_data: ExtractedFileData, purpose: str
|
||||
) -> str:
|
||||
"""
|
||||
Get the object name for the request
|
||||
"""
|
||||
extracted_file_data_content = extracted_file_data.get("content")
|
||||
|
||||
if extracted_file_data_content is None:
|
||||
raise ValueError("file content is required")
|
||||
|
||||
if purpose == "batch":
|
||||
## 1. If jsonl, check if there's a model name
|
||||
file_content = self._get_content_from_openai_file(
|
||||
extracted_file_data_content
|
||||
)
|
||||
|
||||
# Split into lines and parse each line as JSON
|
||||
openai_jsonl_content = [
|
||||
json.loads(line) for line in file_content.splitlines() if line.strip()
|
||||
]
|
||||
if len(openai_jsonl_content) > 0:
|
||||
return self._get_gcs_object_name_from_batch_jsonl(openai_jsonl_content)
|
||||
|
||||
## 2. If not jsonl, return the filename
|
||||
filename = extracted_file_data.get("filename")
|
||||
if filename:
|
||||
return filename
|
||||
## 3. If no file name, return timestamp
|
||||
return str(int(time.time()))
|
||||
|
||||
def get_complete_file_url(
|
||||
self,
|
||||
api_base: Optional[str],
|
||||
api_key: Optional[str],
|
||||
model: str,
|
||||
optional_params: Dict,
|
||||
litellm_params: Dict,
|
||||
data: CreateFileRequest,
|
||||
) -> str:
|
||||
"""
|
||||
Get the complete url for the request
|
||||
"""
|
||||
bucket_name = litellm_params.get("bucket_name") or os.getenv("GCS_BUCKET_NAME")
|
||||
if not bucket_name:
|
||||
raise ValueError("GCS bucket_name is required")
|
||||
file_data = data.get("file")
|
||||
purpose = data.get("purpose")
|
||||
if file_data is None:
|
||||
raise ValueError("file is required")
|
||||
if purpose is None:
|
||||
raise ValueError("purpose is required")
|
||||
extracted_file_data = extract_file_data(file_data)
|
||||
object_name = self.get_object_name(extracted_file_data, purpose)
|
||||
endpoint = (
|
||||
f"upload/storage/v1/b/{bucket_name}/o?uploadType=media&name={object_name}"
|
||||
)
|
||||
api_base = api_base or "https://storage.googleapis.com"
|
||||
if not api_base:
|
||||
raise ValueError("api_base is required")
|
||||
|
||||
return f"{api_base}/{endpoint}"
|
||||
|
||||
def get_supported_openai_params(
|
||||
self, model: str
|
||||
) -> List[OpenAICreateFileRequestOptionalParams]:
|
||||
return []
|
||||
|
||||
def map_openai_params(
|
||||
self,
|
||||
non_default_params: dict,
|
||||
optional_params: dict,
|
||||
model: str,
|
||||
drop_params: bool,
|
||||
) -> dict:
|
||||
return optional_params
|
||||
|
||||
def _map_openai_to_vertex_params(
|
||||
self,
|
||||
openai_request_body: Dict[str, Any],
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
wrapper to call VertexGeminiConfig.map_openai_params
|
||||
"""
|
||||
from litellm.llms.vertex_ai.gemini.vertex_and_google_ai_studio_gemini import (
|
||||
VertexGeminiConfig,
|
||||
)
|
||||
|
||||
config = VertexGeminiConfig()
|
||||
_model = openai_request_body.get("model", "")
|
||||
vertex_params = config.map_openai_params(
|
||||
model=_model,
|
||||
non_default_params=openai_request_body,
|
||||
optional_params={},
|
||||
drop_params=False,
|
||||
)
|
||||
return vertex_params
|
||||
|
||||
def _transform_openai_jsonl_content_to_vertex_ai_jsonl_content(
|
||||
self, openai_jsonl_content: List[Dict[str, Any]]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Transforms OpenAI JSONL content to VertexAI JSONL content
|
||||
|
||||
jsonl body for vertex is {"request": <request_body>}
|
||||
Example Vertex jsonl
|
||||
{"request":{"contents": [{"role": "user", "parts": [{"text": "What is the relation between the following video and image samples?"}, {"fileData": {"fileUri": "gs://cloud-samples-data/generative-ai/video/animals.mp4", "mimeType": "video/mp4"}}, {"fileData": {"fileUri": "gs://cloud-samples-data/generative-ai/image/cricket.jpeg", "mimeType": "image/jpeg"}}]}]}}
|
||||
{"request":{"contents": [{"role": "user", "parts": [{"text": "Describe what is happening in this video."}, {"fileData": {"fileUri": "gs://cloud-samples-data/generative-ai/video/another_video.mov", "mimeType": "video/mov"}}]}]}}
|
||||
"""
|
||||
|
||||
vertex_jsonl_content = []
|
||||
for _openai_jsonl_content in openai_jsonl_content:
|
||||
openai_request_body = _openai_jsonl_content.get("body") or {}
|
||||
vertex_request_body = _transform_request_body(
|
||||
messages=openai_request_body.get("messages", []),
|
||||
model=openai_request_body.get("model", ""),
|
||||
optional_params=self._map_openai_to_vertex_params(openai_request_body),
|
||||
custom_llm_provider="vertex_ai",
|
||||
litellm_params={},
|
||||
cached_content=None,
|
||||
)
|
||||
vertex_jsonl_content.append({"request": vertex_request_body})
|
||||
return vertex_jsonl_content
|
||||
|
||||
def transform_create_file_request(
|
||||
self,
|
||||
model: str,
|
||||
create_file_data: CreateFileRequest,
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
) -> Union[bytes, str, dict]:
|
||||
"""
|
||||
2 Cases:
|
||||
1. Handle basic file upload
|
||||
2. Handle batch file upload (.jsonl)
|
||||
"""
|
||||
file_data = create_file_data.get("file")
|
||||
if file_data is None:
|
||||
raise ValueError("file is required")
|
||||
extracted_file_data = extract_file_data(file_data)
|
||||
extracted_file_data_content = extracted_file_data.get("content")
|
||||
if (
|
||||
create_file_data.get("purpose") == "batch"
|
||||
and extracted_file_data.get("content_type") == "application/jsonl"
|
||||
and extracted_file_data_content is not None
|
||||
):
|
||||
## 1. If jsonl, check if there's a model name
|
||||
file_content = self._get_content_from_openai_file(
|
||||
extracted_file_data_content
|
||||
)
|
||||
|
||||
# Split into lines and parse each line as JSON
|
||||
openai_jsonl_content = [
|
||||
json.loads(line) for line in file_content.splitlines() if line.strip()
|
||||
]
|
||||
vertex_jsonl_content = (
|
||||
self._transform_openai_jsonl_content_to_vertex_ai_jsonl_content(
|
||||
openai_jsonl_content
|
||||
)
|
||||
)
|
||||
return json.dumps(vertex_jsonl_content)
|
||||
elif isinstance(extracted_file_data_content, bytes):
|
||||
return extracted_file_data_content
|
||||
else:
|
||||
raise ValueError("Unsupported file content type")
|
||||
|
||||
def transform_create_file_response(
|
||||
self,
|
||||
model: Optional[str],
|
||||
raw_response: Response,
|
||||
logging_obj: LiteLLMLoggingObj,
|
||||
litellm_params: dict,
|
||||
) -> OpenAIFileObject:
|
||||
"""
|
||||
Transform VertexAI File upload response into OpenAI-style FileObject
|
||||
"""
|
||||
response_json = raw_response.json()
|
||||
|
||||
try:
|
||||
response_object = GcsBucketResponse(**response_json) # type: ignore
|
||||
except Exception as e:
|
||||
raise VertexAIError(
|
||||
status_code=raw_response.status_code,
|
||||
message=f"Error reading GCS bucket response: {e}",
|
||||
headers=raw_response.headers,
|
||||
)
|
||||
|
||||
gcs_id = response_object.get("id", "")
|
||||
# Remove the last numeric ID from the path
|
||||
gcs_id = "/".join(gcs_id.split("/")[:-1]) if gcs_id else ""
|
||||
|
||||
return OpenAIFileObject(
|
||||
purpose=response_object.get("purpose", "batch"),
|
||||
id=f"gs://{gcs_id}",
|
||||
filename=response_object.get("name", ""),
|
||||
created_at=_convert_vertex_datetime_to_openai_datetime(
|
||||
vertex_datetime=response_object.get("timeCreated", "")
|
||||
),
|
||||
status="uploaded",
|
||||
bytes=int(response_object.get("size", 0)),
|
||||
object="file",
|
||||
)
|
||||
|
||||
def get_error_class(
|
||||
self, error_message: str, status_code: int, headers: Union[Dict, Headers]
|
||||
) -> BaseLLMException:
|
||||
return VertexAIError(
|
||||
status_code=status_code, message=error_message, headers=headers
|
||||
)
|
||||
|
||||
|
||||
class VertexAIJsonlFilesTransformation(VertexGeminiConfig):
|
||||
"""
|
||||
Transforms OpenAI /v1/files/* requests to VertexAI /v1/files/* requests
|
||||
"""
|
||||
|
|
|
@ -240,6 +240,7 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
|
|||
gtool_func_declarations = []
|
||||
googleSearch: Optional[dict] = None
|
||||
googleSearchRetrieval: Optional[dict] = None
|
||||
enterpriseWebSearch: Optional[dict] = None
|
||||
code_execution: Optional[dict] = None
|
||||
# remove 'additionalProperties' from tools
|
||||
value = _remove_additional_properties(value)
|
||||
|
@ -273,6 +274,8 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
|
|||
googleSearch = tool["googleSearch"]
|
||||
elif tool.get("googleSearchRetrieval", None) is not None:
|
||||
googleSearchRetrieval = tool["googleSearchRetrieval"]
|
||||
elif tool.get("enterpriseWebSearch", None) is not None:
|
||||
enterpriseWebSearch = tool["enterpriseWebSearch"]
|
||||
elif tool.get("code_execution", None) is not None:
|
||||
code_execution = tool["code_execution"]
|
||||
elif openai_function_object is not None:
|
||||
|
@ -299,6 +302,8 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
|
|||
_tools["googleSearch"] = googleSearch
|
||||
if googleSearchRetrieval is not None:
|
||||
_tools["googleSearchRetrieval"] = googleSearchRetrieval
|
||||
if enterpriseWebSearch is not None:
|
||||
_tools["enterpriseWebSearch"] = enterpriseWebSearch
|
||||
if code_execution is not None:
|
||||
_tools["code_execution"] = code_execution
|
||||
return [_tools]
|
||||
|
@ -900,6 +905,7 @@ class VertexGeminiConfig(VertexAIBaseConfig, BaseConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: Dict,
|
||||
litellm_params: Dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> Dict:
|
||||
|
@ -1017,7 +1023,7 @@ class VertexLLM(VertexBase):
|
|||
logging_obj,
|
||||
stream,
|
||||
optional_params: dict,
|
||||
litellm_params=None,
|
||||
litellm_params: dict,
|
||||
logger_fn=None,
|
||||
api_base: Optional[str] = None,
|
||||
client: Optional[AsyncHTTPHandler] = None,
|
||||
|
@ -1058,6 +1064,7 @@ class VertexLLM(VertexBase):
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
## LOGGING
|
||||
|
@ -1144,6 +1151,7 @@ class VertexLLM(VertexBase):
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
request_body = await async_transform_request_body(**data) # type: ignore
|
||||
|
@ -1317,6 +1325,7 @@ class VertexLLM(VertexBase):
|
|||
model=model,
|
||||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
## TRANSFORMATION ##
|
||||
|
|
|
@ -94,6 +94,7 @@ class VertexMultimodalEmbedding(VertexLLM):
|
|||
optional_params=optional_params,
|
||||
api_key=auth_header,
|
||||
api_base=api_base,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
## LOGGING
|
||||
|
|
|
@ -47,6 +47,7 @@ class VertexAIMultimodalEmbeddingConfig(BaseEmbeddingConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -10,7 +10,6 @@ from typing import TYPE_CHECKING, Any, Dict, Literal, Optional, Tuple
|
|||
|
||||
from litellm._logging import verbose_logger
|
||||
from litellm.litellm_core_utils.asyncify import asyncify
|
||||
from litellm.llms.base import BaseLLM
|
||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
|
||||
from litellm.types.llms.vertex_ai import VERTEX_CREDENTIALS_TYPES
|
||||
|
||||
|
@ -22,7 +21,7 @@ else:
|
|||
GoogleCredentialsObject = Any
|
||||
|
||||
|
||||
class VertexBase(BaseLLM):
|
||||
class VertexBase:
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.access_token: Optional[str] = None
|
||||
|
|
|
@ -83,6 +83,7 @@ class VoyageEmbeddingConfig(BaseEmbeddingConfig):
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> dict:
|
||||
|
|
|
@ -49,6 +49,7 @@ class WatsonXChatHandler(OpenAILikeChatHandler):
|
|||
messages=messages,
|
||||
optional_params=optional_params,
|
||||
api_key=api_key,
|
||||
litellm_params=litellm_params,
|
||||
)
|
||||
|
||||
## UPDATE PAYLOAD (optional params)
|
||||
|
|
|
@ -165,6 +165,7 @@ class IBMWatsonXMixin:
|
|||
model: str,
|
||||
messages: List[AllMessageValues],
|
||||
optional_params: Dict,
|
||||
litellm_params: dict,
|
||||
api_key: Optional[str] = None,
|
||||
api_base: Optional[str] = None,
|
||||
) -> Dict:
|
||||
|
|
|
@ -3616,6 +3616,7 @@ def embedding( # noqa: PLR0915
|
|||
optional_params=optional_params,
|
||||
client=client,
|
||||
aembedding=aembedding,
|
||||
litellm_params=litellm_params_dict,
|
||||
)
|
||||
elif custom_llm_provider == "bedrock":
|
||||
if isinstance(input, str):
|
||||
|
|
|
@ -380,6 +380,7 @@
|
|||
"supports_tool_choice": true,
|
||||
"supports_native_streaming": false,
|
||||
"supported_modalities": ["text", "image"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"supported_endpoints": ["/v1/responses", "/v1/batch"]
|
||||
},
|
||||
"o1-pro-2025-03-19": {
|
||||
|
@ -401,6 +402,7 @@
|
|||
"supports_tool_choice": true,
|
||||
"supports_native_streaming": false,
|
||||
"supported_modalities": ["text", "image"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"supported_endpoints": ["/v1/responses", "/v1/batch"]
|
||||
},
|
||||
"o1": {
|
||||
|
@ -2409,25 +2411,26 @@
|
|||
"max_tokens": 4096,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"input_cost_per_token": 0.000000075,
|
||||
"output_cost_per_token": 0.0000003,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft"
|
||||
"source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112"
|
||||
},
|
||||
"azure_ai/Phi-4-multimodal-instruct": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"input_cost_per_token": 0.00000008,
|
||||
"input_cost_per_audio_token": 0.000004,
|
||||
"output_cost_per_token": 0.00032,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat",
|
||||
"supports_audio_input": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft"
|
||||
"source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112"
|
||||
},
|
||||
"azure_ai/Phi-4": {
|
||||
"max_tokens": 16384,
|
||||
|
@ -3467,7 +3470,7 @@
|
|||
"input_cost_per_token": 0.0000008,
|
||||
"output_cost_per_token": 0.000004,
|
||||
"cache_creation_input_token_cost": 0.000001,
|
||||
"cache_read_input_token_cost": 0.0000008,
|
||||
"cache_read_input_token_cost": 0.00000008,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
|
@ -4511,20 +4514,10 @@
|
|||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
|
@ -4535,6 +4528,9 @@
|
|||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"gemini-2.0-pro-exp-02-05": {
|
||||
|
@ -4547,20 +4543,10 @@
|
|||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
|
@ -4571,6 +4557,9 @@
|
|||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"gemini-2.0-flash-exp": {
|
||||
|
@ -4604,6 +4593,8 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4628,6 +4619,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"gemini-2.0-flash-thinking-exp": {
|
||||
|
@ -4661,6 +4654,8 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4695,6 +4690,8 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": false,
|
||||
"supports_audio_output": false,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4720,6 +4717,7 @@
|
|||
"supports_audio_output": true,
|
||||
"supports_audio_input": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://ai.google.dev/pricing#2_0flash"
|
||||
},
|
||||
|
@ -4742,6 +4740,32 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gemini-2.0-flash-lite-001": {
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 8192,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 50,
|
||||
"input_cost_per_audio_token": 0.000000075,
|
||||
"input_cost_per_token": 0.000000075,
|
||||
"output_cost_per_token": 0.0000003,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4807,6 +4831,7 @@
|
|||
"supports_audio_output": true,
|
||||
"supports_audio_input": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://ai.google.dev/pricing#2_0flash"
|
||||
},
|
||||
|
@ -4832,6 +4857,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.0-flash-lite"
|
||||
},
|
||||
"gemini/gemini-2.0-flash-001": {
|
||||
|
@ -4857,6 +4884,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://ai.google.dev/pricing#2_0flash"
|
||||
},
|
||||
"gemini/gemini-2.5-pro-preview-03-25": {
|
||||
|
@ -4871,9 +4900,9 @@
|
|||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.0000007,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_128k_tokens": 0.0000025,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.0000010,
|
||||
"output_cost_per_token_above_128k_tokens": 0.000015,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "gemini",
|
||||
"mode": "chat",
|
||||
"rpm": 10000,
|
||||
|
@ -4884,6 +4913,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview"
|
||||
},
|
||||
"gemini/gemini-2.0-flash-exp": {
|
||||
|
@ -4919,6 +4950,8 @@
|
|||
"supports_audio_output": true,
|
||||
"tpm": 4000000,
|
||||
"rpm": 10,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4945,6 +4978,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash-lite"
|
||||
},
|
||||
"gemini/gemini-2.0-flash-thinking-exp": {
|
||||
|
@ -4980,6 +5015,8 @@
|
|||
"supports_audio_output": true,
|
||||
"tpm": 4000000,
|
||||
"rpm": 10,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -5016,6 +5053,8 @@
|
|||
"supports_audio_output": true,
|
||||
"tpm": 4000000,
|
||||
"rpm": 10,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
|
|
@ -29,14 +29,14 @@ model_list:
|
|||
model: databricks/databricks-claude-3-7-sonnet
|
||||
api_key: os.environ/DATABRICKS_API_KEY
|
||||
api_base: os.environ/DATABRICKS_API_BASE
|
||||
- model_name: "gemini/gemini-2.0-flash"
|
||||
- model_name: "llmaas-meta/llama-3.1-8b-instruct"
|
||||
litellm_params:
|
||||
model: gemini/gemini-2.0-flash
|
||||
api_key: os.environ/GEMINI_API_KEY
|
||||
- model_name: claude-3-5-sonnet-20240620
|
||||
litellm_params:
|
||||
model: anthropic/claude-3-5-sonnet-20240620
|
||||
api_key: os.environ/ANTHROPIC_API_KEY
|
||||
model: nvidia_nim/meta/llama-3.3-70b-instruct
|
||||
api_key: "invalid"
|
||||
api_base: "http://0.0.0.0:8090"
|
||||
model_info:
|
||||
input_cost_per_token: "100"
|
||||
output_cost_per_token: "100"
|
||||
|
||||
litellm_settings:
|
||||
num_retries: 0
|
||||
|
|
|
@ -1625,6 +1625,7 @@ class LiteLLM_UserTable(LiteLLMPydanticObjectBase):
|
|||
model_max_budget: Optional[Dict] = {}
|
||||
model_spend: Optional[Dict] = {}
|
||||
user_email: Optional[str] = None
|
||||
user_alias: Optional[str] = None
|
||||
models: list = []
|
||||
tpm_limit: Optional[int] = None
|
||||
rpm_limit: Optional[int] = None
|
||||
|
|
|
@ -51,9 +51,6 @@ def decrypt_value_helper(value: str):
|
|||
# if it's not str - do not decrypt it, return the value
|
||||
return value
|
||||
except Exception as e:
|
||||
import traceback
|
||||
|
||||
traceback.print_stack()
|
||||
verbose_proxy_logger.error(
|
||||
f"Error decrypting value, Did your master_key/salt key change recently? \nError: {str(e)}\nSet permanent salt key - https://docs.litellm.ai/docs/proxy/prod#5-set-litellm-salt-key"
|
||||
)
|
||||
|
|
284
litellm/proxy/common_utils/html_forms/jwt_display_template.py
Normal file
284
litellm/proxy/common_utils/html_forms/jwt_display_template.py
Normal file
|
@ -0,0 +1,284 @@
|
|||
# JWT display template for SSO debug callback
|
||||
jwt_display_template = """
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<title>LiteLLM SSO Debug - JWT Information</title>
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<style>
|
||||
body {
|
||||
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif;
|
||||
background-color: #f8fafc;
|
||||
margin: 0;
|
||||
padding: 20px;
|
||||
display: flex;
|
||||
justify-content: center;
|
||||
align-items: center;
|
||||
min-height: 100vh;
|
||||
color: #333;
|
||||
}
|
||||
|
||||
.container {
|
||||
background-color: #fff;
|
||||
padding: 40px;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 4px 12px rgba(0, 0, 0, 0.1);
|
||||
width: 800px;
|
||||
max-width: 100%;
|
||||
}
|
||||
|
||||
.logo-container {
|
||||
text-align: center;
|
||||
margin-bottom: 30px;
|
||||
}
|
||||
|
||||
.logo {
|
||||
font-size: 24px;
|
||||
font-weight: 600;
|
||||
color: #1e293b;
|
||||
}
|
||||
|
||||
h2 {
|
||||
margin: 0 0 10px;
|
||||
color: #1e293b;
|
||||
font-size: 28px;
|
||||
font-weight: 600;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.subtitle {
|
||||
color: #64748b;
|
||||
margin: 0 0 20px;
|
||||
font-size: 16px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.info-box {
|
||||
background-color: #f1f5f9;
|
||||
border-radius: 6px;
|
||||
padding: 20px;
|
||||
margin-bottom: 30px;
|
||||
border-left: 4px solid #2563eb;
|
||||
}
|
||||
|
||||
.success-box {
|
||||
background-color: #f0fdf4;
|
||||
border-radius: 6px;
|
||||
padding: 20px;
|
||||
margin-bottom: 30px;
|
||||
border-left: 4px solid #16a34a;
|
||||
}
|
||||
|
||||
.info-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
margin-bottom: 12px;
|
||||
color: #1e40af;
|
||||
font-weight: 600;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
.success-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
margin-bottom: 12px;
|
||||
color: #166534;
|
||||
font-weight: 600;
|
||||
font-size: 16px;
|
||||
}
|
||||
|
||||
.info-header svg, .success-header svg {
|
||||
margin-right: 8px;
|
||||
}
|
||||
|
||||
.data-container {
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.data-row {
|
||||
display: flex;
|
||||
border-bottom: 1px solid #e2e8f0;
|
||||
padding: 12px 0;
|
||||
}
|
||||
|
||||
.data-row:last-child {
|
||||
border-bottom: none;
|
||||
}
|
||||
|
||||
.data-label {
|
||||
font-weight: 500;
|
||||
color: #334155;
|
||||
width: 180px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
.data-value {
|
||||
color: #475569;
|
||||
word-break: break-all;
|
||||
}
|
||||
|
||||
.jwt-container {
|
||||
background-color: #f8fafc;
|
||||
border-radius: 6px;
|
||||
padding: 15px;
|
||||
margin-top: 20px;
|
||||
overflow-x: auto;
|
||||
border: 1px solid #e2e8f0;
|
||||
}
|
||||
|
||||
.jwt-text {
|
||||
font-family: monospace;
|
||||
white-space: pre-wrap;
|
||||
word-break: break-all;
|
||||
margin: 0;
|
||||
color: #334155;
|
||||
}
|
||||
|
||||
.back-button {
|
||||
display: inline-block;
|
||||
background-color: #6466E9;
|
||||
color: #fff;
|
||||
text-decoration: none;
|
||||
padding: 10px 16px;
|
||||
border-radius: 6px;
|
||||
font-weight: 500;
|
||||
margin-top: 20px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
.back-button:hover {
|
||||
background-color: #4138C2;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
.buttons {
|
||||
display: flex;
|
||||
gap: 10px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
.copy-button {
|
||||
background-color: #e2e8f0;
|
||||
color: #334155;
|
||||
border: none;
|
||||
padding: 8px 12px;
|
||||
border-radius: 4px;
|
||||
cursor: pointer;
|
||||
font-size: 14px;
|
||||
display: flex;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.copy-button:hover {
|
||||
background-color: #cbd5e1;
|
||||
}
|
||||
|
||||
.copy-button svg {
|
||||
margin-right: 6px;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="container">
|
||||
<div class="logo-container">
|
||||
<div class="logo">
|
||||
🚅 LiteLLM
|
||||
</div>
|
||||
</div>
|
||||
<h2>SSO Debug Information</h2>
|
||||
<p class="subtitle">Results from the SSO authentication process.</p>
|
||||
|
||||
<div class="success-box">
|
||||
<div class="success-header">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<path d="M22 11.08V12a10 10 0 1 1-5.93-9.14"></path>
|
||||
<polyline points="22 4 12 14.01 9 11.01"></polyline>
|
||||
</svg>
|
||||
Authentication Successful
|
||||
</div>
|
||||
<p>The SSO authentication completed successfully. Below is the information returned by the provider.</p>
|
||||
</div>
|
||||
|
||||
<div class="data-container" id="userData">
|
||||
<!-- Data will be inserted here by JavaScript -->
|
||||
</div>
|
||||
|
||||
<div class="info-box">
|
||||
<div class="info-header">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="12" cy="12" r="10"></circle>
|
||||
<line x1="12" y1="16" x2="12" y2="12"></line>
|
||||
<line x1="12" y1="8" x2="12.01" y2="8"></line>
|
||||
</svg>
|
||||
JSON Representation
|
||||
</div>
|
||||
<div class="jwt-container">
|
||||
<pre class="jwt-text" id="jsonData">Loading...</pre>
|
||||
</div>
|
||||
<div class="buttons">
|
||||
<button class="copy-button" onclick="copyToClipboard('jsonData')">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" width="14" height="14" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2" stroke-linecap="round" stroke-linejoin="round">
|
||||
<rect x="9" y="9" width="13" height="13" rx="2" ry="2"></rect>
|
||||
<path d="M5 15H4a2 2 0 0 1-2-2V4a2 2 0 0 1 2-2h9a2 2 0 0 1 2 2v1"></path>
|
||||
</svg>
|
||||
Copy to Clipboard
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<a href="/sso/debug/login" class="back-button">
|
||||
Try Another SSO Login
|
||||
</a>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
// This will be populated with the actual data from the server
|
||||
const userData = SSO_DATA;
|
||||
|
||||
function renderUserData() {
|
||||
const container = document.getElementById('userData');
|
||||
const jsonDisplay = document.getElementById('jsonData');
|
||||
|
||||
// Format JSON with indentation for display
|
||||
jsonDisplay.textContent = JSON.stringify(userData, null, 2);
|
||||
|
||||
// Clear container
|
||||
container.innerHTML = '';
|
||||
|
||||
// Add each key-value pair to the UI
|
||||
for (const [key, value] of Object.entries(userData)) {
|
||||
if (typeof value !== 'object' || value === null) {
|
||||
const row = document.createElement('div');
|
||||
row.className = 'data-row';
|
||||
|
||||
const label = document.createElement('div');
|
||||
label.className = 'data-label';
|
||||
label.textContent = key;
|
||||
|
||||
const dataValue = document.createElement('div');
|
||||
dataValue.className = 'data-value';
|
||||
dataValue.textContent = value !== null ? value : 'null';
|
||||
|
||||
row.appendChild(label);
|
||||
row.appendChild(dataValue);
|
||||
container.appendChild(row);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function copyToClipboard(elementId) {
|
||||
const text = document.getElementById(elementId).textContent;
|
||||
navigator.clipboard.writeText(text).then(() => {
|
||||
alert('Copied to clipboard!');
|
||||
}).catch(err => {
|
||||
console.error('Could not copy text: ', err);
|
||||
});
|
||||
}
|
||||
|
||||
// Render the data when the page loads
|
||||
document.addEventListener('DOMContentLoaded', renderUserData);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
|
@ -1434,7 +1434,7 @@ async def get_user_daily_activity(
|
|||
default=1, description="Page number for pagination", ge=1
|
||||
),
|
||||
page_size: int = fastapi.Query(
|
||||
default=50, description="Items per page", ge=1, le=100
|
||||
default=50, description="Items per page", ge=1, le=1000
|
||||
),
|
||||
user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth),
|
||||
) -> SpendAnalyticsPaginatedResponse:
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -4,16 +4,26 @@ import json
|
|||
import uuid
|
||||
from base64 import b64encode
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Union
|
||||
from typing import Dict, List, Optional, Tuple, Union
|
||||
from urllib.parse import parse_qs, urlencode, urlparse
|
||||
|
||||
import httpx
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request, Response, status
|
||||
from fastapi import (
|
||||
APIRouter,
|
||||
Depends,
|
||||
HTTPException,
|
||||
Request,
|
||||
Response,
|
||||
UploadFile,
|
||||
status,
|
||||
)
|
||||
from fastapi.responses import StreamingResponse
|
||||
from starlette.datastructures import UploadFile as StarletteUploadFile
|
||||
|
||||
import litellm
|
||||
from litellm._logging import verbose_proxy_logger
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.litellm_core_utils.safe_json_dumps import safe_dumps
|
||||
from litellm.llms.custom_httpx.http_handler import get_async_httpx_client
|
||||
from litellm.proxy._types import (
|
||||
ConfigFieldInfo,
|
||||
|
@ -358,6 +368,92 @@ class HttpPassThroughEndpointHelpers:
|
|||
)
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
async def non_streaming_http_request_handler(
|
||||
request: Request,
|
||||
async_client: httpx.AsyncClient,
|
||||
url: httpx.URL,
|
||||
headers: dict,
|
||||
requested_query_params: Optional[dict] = None,
|
||||
_parsed_body: Optional[dict] = None,
|
||||
) -> httpx.Response:
|
||||
"""
|
||||
Handle non-streaming HTTP requests
|
||||
|
||||
Handles special cases when GET requests, multipart/form-data requests, and generic httpx requests
|
||||
"""
|
||||
if request.method == "GET":
|
||||
response = await async_client.request(
|
||||
method=request.method,
|
||||
url=url,
|
||||
headers=headers,
|
||||
params=requested_query_params,
|
||||
)
|
||||
elif HttpPassThroughEndpointHelpers.is_multipart(request) is True:
|
||||
return await HttpPassThroughEndpointHelpers.make_multipart_http_request(
|
||||
request=request,
|
||||
async_client=async_client,
|
||||
url=url,
|
||||
headers=headers,
|
||||
requested_query_params=requested_query_params,
|
||||
)
|
||||
else:
|
||||
# Generic httpx method
|
||||
response = await async_client.request(
|
||||
method=request.method,
|
||||
url=url,
|
||||
headers=headers,
|
||||
params=requested_query_params,
|
||||
json=_parsed_body,
|
||||
)
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def is_multipart(request: Request) -> bool:
|
||||
"""Check if the request is a multipart/form-data request"""
|
||||
return "multipart/form-data" in request.headers.get("content-type", "")
|
||||
|
||||
@staticmethod
|
||||
async def _build_request_files_from_upload_file(
|
||||
upload_file: Union[UploadFile, StarletteUploadFile],
|
||||
) -> Tuple[Optional[str], bytes, Optional[str]]:
|
||||
"""Build a request files dict from an UploadFile object"""
|
||||
file_content = await upload_file.read()
|
||||
return (upload_file.filename, file_content, upload_file.content_type)
|
||||
|
||||
@staticmethod
|
||||
async def make_multipart_http_request(
|
||||
request: Request,
|
||||
async_client: httpx.AsyncClient,
|
||||
url: httpx.URL,
|
||||
headers: dict,
|
||||
requested_query_params: Optional[dict] = None,
|
||||
) -> httpx.Response:
|
||||
"""Process multipart/form-data requests, handling both files and form fields"""
|
||||
form_data = await request.form()
|
||||
files = {}
|
||||
form_data_dict = {}
|
||||
|
||||
for field_name, field_value in form_data.items():
|
||||
if isinstance(field_value, (StarletteUploadFile, UploadFile)):
|
||||
files[field_name] = (
|
||||
await HttpPassThroughEndpointHelpers._build_request_files_from_upload_file(
|
||||
upload_file=field_value
|
||||
)
|
||||
)
|
||||
else:
|
||||
form_data_dict[field_name] = field_value
|
||||
|
||||
response = await async_client.request(
|
||||
method=request.method,
|
||||
url=url,
|
||||
headers=headers,
|
||||
params=requested_query_params,
|
||||
files=files,
|
||||
data=form_data_dict,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
async def pass_through_request( # noqa: PLR0915
|
||||
request: Request,
|
||||
|
@ -424,7 +520,7 @@ async def pass_through_request( # noqa: PLR0915
|
|||
start_time = datetime.now()
|
||||
logging_obj = Logging(
|
||||
model="unknown",
|
||||
messages=[{"role": "user", "content": json.dumps(_parsed_body)}],
|
||||
messages=[{"role": "user", "content": safe_dumps(_parsed_body)}],
|
||||
stream=False,
|
||||
call_type="pass_through_endpoint",
|
||||
start_time=start_time,
|
||||
|
@ -453,7 +549,6 @@ async def pass_through_request( # noqa: PLR0915
|
|||
logging_obj.model_call_details["litellm_call_id"] = litellm_call_id
|
||||
|
||||
# combine url with query params for logging
|
||||
|
||||
requested_query_params: Optional[dict] = (
|
||||
query_params or request.query_params.__dict__
|
||||
)
|
||||
|
@ -474,7 +569,7 @@ async def pass_through_request( # noqa: PLR0915
|
|||
logging_url = str(url) + "?" + requested_query_params_str
|
||||
|
||||
logging_obj.pre_call(
|
||||
input=[{"role": "user", "content": json.dumps(_parsed_body)}],
|
||||
input=[{"role": "user", "content": safe_dumps(_parsed_body)}],
|
||||
api_key="",
|
||||
additional_args={
|
||||
"complete_input_dict": _parsed_body,
|
||||
|
@ -525,22 +620,16 @@ async def pass_through_request( # noqa: PLR0915
|
|||
)
|
||||
verbose_proxy_logger.debug("request body: {}".format(_parsed_body))
|
||||
|
||||
if request.method == "GET":
|
||||
response = await async_client.request(
|
||||
method=request.method,
|
||||
response = (
|
||||
await HttpPassThroughEndpointHelpers.non_streaming_http_request_handler(
|
||||
request=request,
|
||||
async_client=async_client,
|
||||
url=url,
|
||||
headers=headers,
|
||||
params=requested_query_params,
|
||||
requested_query_params=requested_query_params,
|
||||
_parsed_body=_parsed_body,
|
||||
)
|
||||
else:
|
||||
response = await async_client.request(
|
||||
method=request.method,
|
||||
url=url,
|
||||
headers=headers,
|
||||
params=requested_query_params,
|
||||
json=_parsed_body,
|
||||
)
|
||||
|
||||
)
|
||||
verbose_proxy_logger.debug("response.headers= %s", response.headers)
|
||||
|
||||
if _is_streaming_response(response) is True:
|
||||
|
|
|
@ -10,11 +10,12 @@ model_list:
|
|||
api_key: fake-key
|
||||
|
||||
litellm_settings:
|
||||
require_auth_for_metrics_endpoint: true
|
||||
|
||||
|
||||
callbacks: ["prometheus"]
|
||||
service_callback: ["prometheus_system"]
|
||||
|
||||
router_settings:
|
||||
enable_tag_filtering: True # 👈 Key Change
|
||||
default_team_settings:
|
||||
- team_id: test_dev
|
||||
success_callback: ["langfuse", "s3"]
|
||||
langfuse_secret: secret-test-key
|
||||
langfuse_public_key: public-test-key
|
||||
- team_id: my_workflows
|
||||
success_callback: ["langfuse", "s3"]
|
||||
langfuse_secret: secret-workflows-key
|
||||
langfuse_public_key: public-workflows-key
|
||||
|
|
|
@ -139,6 +139,7 @@ from litellm.litellm_core_utils.core_helpers import (
|
|||
)
|
||||
from litellm.litellm_core_utils.credential_accessor import CredentialAccessor
|
||||
from litellm.litellm_core_utils.litellm_logging import Logging as LiteLLMLoggingObj
|
||||
from litellm.litellm_core_utils.sensitive_data_masker import SensitiveDataMasker
|
||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler, HTTPHandler
|
||||
from litellm.proxy._experimental.mcp_server.server import router as mcp_router
|
||||
from litellm.proxy._experimental.mcp_server.tool_registry import (
|
||||
|
@ -387,6 +388,7 @@ global_max_parallel_request_retries_env: Optional[str] = os.getenv(
|
|||
"LITELLM_GLOBAL_MAX_PARALLEL_REQUEST_RETRIES"
|
||||
)
|
||||
proxy_state = ProxyState()
|
||||
SENSITIVE_DATA_MASKER = SensitiveDataMasker()
|
||||
if global_max_parallel_request_retries_env is None:
|
||||
global_max_parallel_request_retries: int = 3
|
||||
else:
|
||||
|
@ -1397,7 +1399,9 @@ class ProxyConfig:
|
|||
team_config: dict = {}
|
||||
for team in all_teams_config:
|
||||
if "team_id" not in team:
|
||||
raise Exception(f"team_id missing from team: {team}")
|
||||
raise Exception(
|
||||
f"team_id missing from team: {SENSITIVE_DATA_MASKER.mask_dict(team)}"
|
||||
)
|
||||
if team_id == team["team_id"]:
|
||||
team_config = team
|
||||
break
|
||||
|
|
|
@ -14,6 +14,9 @@ def get_instance_fn(value: str, config_file_path: Optional[str] = None) -> Any:
|
|||
module_name = ".".join(parts[:-1])
|
||||
instance_name = parts[-1]
|
||||
|
||||
# Security: Check if the module name contains any dangerous modules that can execute arbitrary code
|
||||
security_checks(module_name=module_name)
|
||||
|
||||
# If config_file_path is provided, use it to determine the module spec and load the module
|
||||
if config_file_path is not None:
|
||||
directory = os.path.dirname(config_file_path)
|
||||
|
@ -47,6 +50,35 @@ def get_instance_fn(value: str, config_file_path: Optional[str] = None) -> Any:
|
|||
raise e
|
||||
|
||||
|
||||
def security_checks(
|
||||
module_name: str,
|
||||
):
|
||||
"""
|
||||
This function checks if the module name contains any dangerous modules that can execute arbitrary code.
|
||||
|
||||
Reference: https://huntr.com/bounties/1d98bebb-6cf4-46c9-87c3-d3b1972973b5
|
||||
"""
|
||||
DANGEROUS_MODULES = [
|
||||
"os",
|
||||
"sys",
|
||||
"subprocess",
|
||||
"shutil",
|
||||
"socket",
|
||||
"multiprocessing",
|
||||
"threading",
|
||||
"ctypes",
|
||||
"pickle",
|
||||
"marshal",
|
||||
"builtins",
|
||||
"__builtin__",
|
||||
]
|
||||
# Security: Check if the module name contains any dangerous modules
|
||||
if any(dangerous in module_name.lower() for dangerous in DANGEROUS_MODULES):
|
||||
raise ImportError(
|
||||
f"Importing from module {module_name} is not allowed for security reasons"
|
||||
)
|
||||
|
||||
|
||||
def validate_custom_validate_return_type(
|
||||
fn: Optional[Callable[..., Any]]
|
||||
) -> Optional[Callable[..., Literal[True]]]:
|
||||
|
|
|
@ -19,6 +19,7 @@ class httpxSpecialProvider(str, Enum):
|
|||
SecretManager = "secret_manager"
|
||||
PassThroughEndpoint = "pass_through_endpoint"
|
||||
PromptFactory = "prompt_factory"
|
||||
SSO_HANDLER = "sso_handler"
|
||||
|
||||
|
||||
VerifyTypes = Union[str, bool, ssl.SSLContext]
|
||||
|
|
|
@ -187,6 +187,7 @@ class Tools(TypedDict, total=False):
|
|||
function_declarations: List[FunctionDeclaration]
|
||||
googleSearch: dict
|
||||
googleSearchRetrieval: dict
|
||||
enterpriseWebSearch: dict
|
||||
code_execution: dict
|
||||
retrieval: Retrieval
|
||||
|
||||
|
@ -497,6 +498,51 @@ class OutputConfig(TypedDict, total=False):
|
|||
gcsDestination: GcsDestination
|
||||
|
||||
|
||||
class GcsBucketResponse(TypedDict):
|
||||
"""
|
||||
TypedDict for GCS bucket upload response
|
||||
|
||||
Attributes:
|
||||
kind: The kind of item this is. For objects, this is always storage#object
|
||||
id: The ID of the object
|
||||
selfLink: The link to this object
|
||||
mediaLink: The link to download the object
|
||||
name: The name of the object
|
||||
bucket: The name of the bucket containing this object
|
||||
generation: The content generation of this object
|
||||
metageneration: The metadata generation of this object
|
||||
contentType: The content type of the object
|
||||
storageClass: The storage class of the object
|
||||
size: The size of the object in bytes
|
||||
md5Hash: The MD5 hash of the object
|
||||
crc32c: The CRC32c checksum of the object
|
||||
etag: The ETag of the object
|
||||
timeCreated: The creation time of the object
|
||||
updated: The last update time of the object
|
||||
timeStorageClassUpdated: The time the storage class was last updated
|
||||
timeFinalized: The time the object was finalized
|
||||
"""
|
||||
|
||||
kind: Literal["storage#object"]
|
||||
id: str
|
||||
selfLink: str
|
||||
mediaLink: str
|
||||
name: str
|
||||
bucket: str
|
||||
generation: str
|
||||
metageneration: str
|
||||
contentType: str
|
||||
storageClass: str
|
||||
size: str
|
||||
md5Hash: str
|
||||
crc32c: str
|
||||
etag: str
|
||||
timeCreated: str
|
||||
updated: str
|
||||
timeStorageClassUpdated: str
|
||||
timeFinalized: str
|
||||
|
||||
|
||||
class VertexAIBatchPredictionJob(TypedDict):
|
||||
displayName: str
|
||||
model: str
|
||||
|
|
27
litellm/types/proxy/management_endpoints/ui_sso.py
Normal file
27
litellm/types/proxy/management_endpoints/ui_sso.py
Normal file
|
@ -0,0 +1,27 @@
|
|||
from typing import List, Optional, TypedDict
|
||||
|
||||
|
||||
class MicrosoftGraphAPIUserGroupDirectoryObject(TypedDict, total=False):
|
||||
"""Model for Microsoft Graph API directory object"""
|
||||
|
||||
odata_type: Optional[str]
|
||||
id: Optional[str]
|
||||
deletedDateTime: Optional[str]
|
||||
description: Optional[str]
|
||||
displayName: Optional[str]
|
||||
roleTemplateId: Optional[str]
|
||||
|
||||
|
||||
class MicrosoftGraphAPIUserGroupResponse(TypedDict, total=False):
|
||||
"""Model for Microsoft Graph API user groups response"""
|
||||
|
||||
odata_context: Optional[str]
|
||||
odata_nextLink: Optional[str]
|
||||
value: Optional[List[MicrosoftGraphAPIUserGroupDirectoryObject]]
|
||||
|
||||
|
||||
class MicrosoftServicePrincipalTeam(TypedDict, total=False):
|
||||
"""Model for Microsoft Service Principal Team"""
|
||||
|
||||
principalDisplayName: Optional[str]
|
||||
principalId: Optional[str]
|
|
@ -2,7 +2,7 @@ import json
|
|||
import time
|
||||
import uuid
|
||||
from enum import Enum
|
||||
from typing import Any, Dict, List, Literal, Optional, Tuple, Union
|
||||
from typing import Any, Dict, List, Literal, Mapping, Optional, Tuple, Union
|
||||
|
||||
from aiohttp import FormData
|
||||
from openai._models import BaseModel as OpenAIObject
|
||||
|
@ -120,6 +120,9 @@ class ModelInfoBase(ProviderSpecificModelInfo, total=False):
|
|||
input_cost_per_character: Optional[float] # only for vertex ai models
|
||||
input_cost_per_audio_token: Optional[float]
|
||||
input_cost_per_token_above_128k_tokens: Optional[float] # only for vertex ai models
|
||||
input_cost_per_token_above_200k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai gemini-2.5-pro models
|
||||
input_cost_per_character_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
|
@ -136,6 +139,9 @@ class ModelInfoBase(ProviderSpecificModelInfo, total=False):
|
|||
output_cost_per_token_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
output_cost_per_token_above_200k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai gemini-2.5-pro models
|
||||
output_cost_per_character_above_128k_tokens: Optional[
|
||||
float
|
||||
] # only for vertex ai models
|
||||
|
@ -2170,3 +2176,20 @@ class CreateCredentialItem(CredentialBase):
|
|||
if not values.get("credential_values") and not values.get("model_id"):
|
||||
raise ValueError("Either credential_values or model_id must be set")
|
||||
return values
|
||||
|
||||
|
||||
class ExtractedFileData(TypedDict):
|
||||
"""
|
||||
TypedDict for storing processed file data
|
||||
|
||||
Attributes:
|
||||
filename: Name of the file if provided
|
||||
content: The file content in bytes
|
||||
content_type: MIME type of the file
|
||||
headers: Any additional headers for the file
|
||||
"""
|
||||
|
||||
filename: Optional[str]
|
||||
content: bytes
|
||||
content_type: Optional[str]
|
||||
headers: Mapping[str, str]
|
||||
|
|
|
@ -4532,6 +4532,9 @@ def _get_model_info_helper( # noqa: PLR0915
|
|||
input_cost_per_token_above_128k_tokens=_model_info.get(
|
||||
"input_cost_per_token_above_128k_tokens", None
|
||||
),
|
||||
input_cost_per_token_above_200k_tokens=_model_info.get(
|
||||
"input_cost_per_token_above_200k_tokens", None
|
||||
),
|
||||
input_cost_per_query=_model_info.get("input_cost_per_query", None),
|
||||
input_cost_per_second=_model_info.get("input_cost_per_second", None),
|
||||
input_cost_per_audio_token=_model_info.get(
|
||||
|
@ -4556,6 +4559,9 @@ def _get_model_info_helper( # noqa: PLR0915
|
|||
output_cost_per_character_above_128k_tokens=_model_info.get(
|
||||
"output_cost_per_character_above_128k_tokens", None
|
||||
),
|
||||
output_cost_per_token_above_200k_tokens=_model_info.get(
|
||||
"output_cost_per_token_above_200k_tokens", None
|
||||
),
|
||||
output_cost_per_second=_model_info.get("output_cost_per_second", None),
|
||||
output_cost_per_image=_model_info.get("output_cost_per_image", None),
|
||||
output_vector_size=_model_info.get("output_vector_size", None),
|
||||
|
@ -6517,6 +6523,10 @@ class ProviderConfigManager:
|
|||
)
|
||||
|
||||
return GoogleAIStudioFilesHandler()
|
||||
elif LlmProviders.VERTEX_AI == provider:
|
||||
from litellm.llms.vertex_ai.files.transformation import VertexAIFilesConfig
|
||||
|
||||
return VertexAIFilesConfig()
|
||||
return None
|
||||
|
||||
|
||||
|
|
|
@ -380,6 +380,7 @@
|
|||
"supports_tool_choice": true,
|
||||
"supports_native_streaming": false,
|
||||
"supported_modalities": ["text", "image"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"supported_endpoints": ["/v1/responses", "/v1/batch"]
|
||||
},
|
||||
"o1-pro-2025-03-19": {
|
||||
|
@ -401,6 +402,7 @@
|
|||
"supports_tool_choice": true,
|
||||
"supports_native_streaming": false,
|
||||
"supported_modalities": ["text", "image"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"supported_endpoints": ["/v1/responses", "/v1/batch"]
|
||||
},
|
||||
"o1": {
|
||||
|
@ -2409,25 +2411,26 @@
|
|||
"max_tokens": 4096,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"input_cost_per_token": 0.000000075,
|
||||
"output_cost_per_token": 0.0000003,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
"source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft"
|
||||
"source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112"
|
||||
},
|
||||
"azure_ai/Phi-4-multimodal-instruct": {
|
||||
"max_tokens": 4096,
|
||||
"max_input_tokens": 131072,
|
||||
"max_output_tokens": 4096,
|
||||
"input_cost_per_token": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"input_cost_per_token": 0.00000008,
|
||||
"input_cost_per_audio_token": 0.000004,
|
||||
"output_cost_per_token": 0.00032,
|
||||
"litellm_provider": "azure_ai",
|
||||
"mode": "chat",
|
||||
"supports_audio_input": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"source": "https://learn.microsoft.com/en-us/azure/ai-foundry/concepts/models-featured#microsoft"
|
||||
"source": "https://techcommunity.microsoft.com/blog/Azure-AI-Services-blog/announcing-new-phi-pricing-empowering-your-business-with-small-language-models/4395112"
|
||||
},
|
||||
"azure_ai/Phi-4": {
|
||||
"max_tokens": 16384,
|
||||
|
@ -3467,7 +3470,7 @@
|
|||
"input_cost_per_token": 0.0000008,
|
||||
"output_cost_per_token": 0.000004,
|
||||
"cache_creation_input_token_cost": 0.000001,
|
||||
"cache_read_input_token_cost": 0.0000008,
|
||||
"cache_read_input_token_cost": 0.00000008,
|
||||
"litellm_provider": "anthropic",
|
||||
"mode": "chat",
|
||||
"supports_function_calling": true,
|
||||
|
@ -4511,20 +4514,10 @@
|
|||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
|
@ -4535,6 +4528,9 @@
|
|||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"gemini-2.0-pro-exp-02-05": {
|
||||
|
@ -4547,20 +4543,10 @@
|
|||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_image": 0,
|
||||
"input_cost_per_video_per_second": 0,
|
||||
"input_cost_per_audio_per_second": 0,
|
||||
"input_cost_per_token": 0,
|
||||
"input_cost_per_character": 0,
|
||||
"input_cost_per_token_above_128k_tokens": 0,
|
||||
"input_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_image_above_128k_tokens": 0,
|
||||
"input_cost_per_video_per_second_above_128k_tokens": 0,
|
||||
"input_cost_per_audio_per_second_above_128k_tokens": 0,
|
||||
"output_cost_per_token": 0,
|
||||
"output_cost_per_character": 0,
|
||||
"output_cost_per_token_above_128k_tokens": 0,
|
||||
"output_cost_per_character_above_128k_tokens": 0,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.00001,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
|
@ -4571,6 +4557,9 @@
|
|||
"supports_pdf_input": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_endpoints": ["/v1/chat/completions", "/v1/completions"],
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"gemini-2.0-flash-exp": {
|
||||
|
@ -4604,6 +4593,8 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4628,6 +4619,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/pricing"
|
||||
},
|
||||
"gemini-2.0-flash-thinking-exp": {
|
||||
|
@ -4661,6 +4654,8 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4695,6 +4690,8 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": false,
|
||||
"supports_audio_output": false,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4720,6 +4717,7 @@
|
|||
"supports_audio_output": true,
|
||||
"supports_audio_input": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://ai.google.dev/pricing#2_0flash"
|
||||
},
|
||||
|
@ -4742,6 +4740,32 @@
|
|||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
"gemini-2.0-flash-lite-001": {
|
||||
"max_input_tokens": 1048576,
|
||||
"max_output_tokens": 8192,
|
||||
"max_images_per_prompt": 3000,
|
||||
"max_videos_per_prompt": 10,
|
||||
"max_video_length": 1,
|
||||
"max_audio_length_hours": 8.4,
|
||||
"max_audio_per_prompt": 1,
|
||||
"max_pdf_size_mb": 50,
|
||||
"input_cost_per_audio_token": 0.000000075,
|
||||
"input_cost_per_token": 0.000000075,
|
||||
"output_cost_per_token": 0.0000003,
|
||||
"litellm_provider": "vertex_ai-language-models",
|
||||
"mode": "chat",
|
||||
"supports_system_messages": true,
|
||||
"supports_function_calling": true,
|
||||
"supports_vision": true,
|
||||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4807,6 +4831,7 @@
|
|||
"supports_audio_output": true,
|
||||
"supports_audio_input": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"supports_tool_choice": true,
|
||||
"source": "https://ai.google.dev/pricing#2_0flash"
|
||||
},
|
||||
|
@ -4832,6 +4857,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": true,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.0-flash-lite"
|
||||
},
|
||||
"gemini/gemini-2.0-flash-001": {
|
||||
|
@ -4857,6 +4884,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://ai.google.dev/pricing#2_0flash"
|
||||
},
|
||||
"gemini/gemini-2.5-pro-preview-03-25": {
|
||||
|
@ -4871,9 +4900,9 @@
|
|||
"max_pdf_size_mb": 30,
|
||||
"input_cost_per_audio_token": 0.0000007,
|
||||
"input_cost_per_token": 0.00000125,
|
||||
"input_cost_per_token_above_128k_tokens": 0.0000025,
|
||||
"input_cost_per_token_above_200k_tokens": 0.0000025,
|
||||
"output_cost_per_token": 0.0000010,
|
||||
"output_cost_per_token_above_128k_tokens": 0.000015,
|
||||
"output_cost_per_token_above_200k_tokens": 0.000015,
|
||||
"litellm_provider": "gemini",
|
||||
"mode": "chat",
|
||||
"rpm": 10000,
|
||||
|
@ -4884,6 +4913,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://ai.google.dev/gemini-api/docs/pricing#gemini-2.5-pro-preview"
|
||||
},
|
||||
"gemini/gemini-2.0-flash-exp": {
|
||||
|
@ -4919,6 +4950,8 @@
|
|||
"supports_audio_output": true,
|
||||
"tpm": 4000000,
|
||||
"rpm": 10,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -4945,6 +4978,8 @@
|
|||
"supports_response_schema": true,
|
||||
"supports_audio_output": false,
|
||||
"supports_tool_choice": true,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash-lite"
|
||||
},
|
||||
"gemini/gemini-2.0-flash-thinking-exp": {
|
||||
|
@ -4980,6 +5015,8 @@
|
|||
"supports_audio_output": true,
|
||||
"tpm": 4000000,
|
||||
"rpm": 10,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
@ -5016,6 +5053,8 @@
|
|||
"supports_audio_output": true,
|
||||
"tpm": 4000000,
|
||||
"rpm": 10,
|
||||
"supported_modalities": ["text", "image", "audio", "video"],
|
||||
"supported_output_modalities": ["text", "image"],
|
||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#gemini-2.0-flash",
|
||||
"supports_tool_choice": true
|
||||
},
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "1.65.4"
|
||||
version = "1.65.5"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT"
|
||||
|
@ -118,7 +118,7 @@ requires = ["poetry-core", "wheel"]
|
|||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.commitizen]
|
||||
version = "1.65.4"
|
||||
version = "1.65.5"
|
||||
version_files = [
|
||||
"pyproject.toml:^version"
|
||||
]
|
||||
|
|
|
@ -423,25 +423,35 @@ mock_vertex_batch_response = {
|
|||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_avertex_batch_prediction():
|
||||
with patch(
|
||||
async def test_avertex_batch_prediction(monkeypatch):
|
||||
monkeypatch.setenv("GCS_BUCKET_NAME", "litellm-local")
|
||||
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
|
||||
|
||||
client = AsyncHTTPHandler()
|
||||
|
||||
async def mock_side_effect(*args, **kwargs):
|
||||
print("args", args, "kwargs", kwargs)
|
||||
url = kwargs.get("url", "")
|
||||
if "files" in url:
|
||||
mock_response.json.return_value = mock_file_response
|
||||
elif "batch" in url:
|
||||
mock_response.json.return_value = mock_vertex_batch_response
|
||||
mock_response.status_code = 200
|
||||
return mock_response
|
||||
|
||||
with patch.object(
|
||||
client, "post", side_effect=mock_side_effect
|
||||
) as mock_post, patch(
|
||||
"litellm.llms.custom_httpx.http_handler.AsyncHTTPHandler.post"
|
||||
) as mock_post:
|
||||
) as mock_global_post:
|
||||
# Configure mock responses
|
||||
mock_response = MagicMock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
|
||||
# Set up different responses for different API calls
|
||||
async def mock_side_effect(*args, **kwargs):
|
||||
url = kwargs.get("url", "")
|
||||
if "files" in url:
|
||||
mock_response.json.return_value = mock_file_response
|
||||
elif "batch" in url:
|
||||
mock_response.json.return_value = mock_vertex_batch_response
|
||||
mock_response.status_code = 200
|
||||
return mock_response
|
||||
|
||||
|
||||
mock_post.side_effect = mock_side_effect
|
||||
mock_global_post.side_effect = mock_side_effect
|
||||
|
||||
# load_vertex_ai_credentials()
|
||||
litellm.set_verbose = True
|
||||
|
@ -455,6 +465,7 @@ async def test_avertex_batch_prediction():
|
|||
file=open(file_path, "rb"),
|
||||
purpose="batch",
|
||||
custom_llm_provider="vertex_ai",
|
||||
client=client
|
||||
)
|
||||
print("Response from creating file=", file_obj)
|
||||
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
import litellm
|
||||
from litellm.litellm_core_utils.llm_cost_calc.tool_call_cost_tracking import (
|
||||
StandardBuiltInToolCostTracking,
|
||||
)
|
||||
from litellm.types.llms.openai import FileSearchTool, WebSearchOptions
|
||||
from litellm.types.utils import ModelInfo, ModelResponse, StandardBuiltInToolsParams
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../../..")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
from litellm.litellm_core_utils.llm_cost_calc.utils import generic_cost_per_token
|
||||
from litellm.types.utils import Usage
|
||||
|
||||
|
||||
def test_generic_cost_per_token_above_200k_tokens():
|
||||
model = "gemini-2.5-pro-exp-03-25"
|
||||
custom_llm_provider = "vertex_ai"
|
||||
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
|
||||
litellm.model_cost = litellm.get_model_cost_map(url="")
|
||||
|
||||
model_cost_map = litellm.model_cost[model]
|
||||
prompt_tokens = 220 * 1e6
|
||||
completion_tokens = 150
|
||||
usage = Usage(
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=prompt_tokens + completion_tokens,
|
||||
)
|
||||
prompt_cost, completion_cost = generic_cost_per_token(
|
||||
model=model,
|
||||
usage=usage,
|
||||
custom_llm_provider=custom_llm_provider,
|
||||
)
|
||||
assert round(prompt_cost, 10) == round(
|
||||
model_cost_map["input_cost_per_token_above_200k_tokens"] * usage.prompt_tokens,
|
||||
10,
|
||||
)
|
||||
assert round(completion_cost, 10) == round(
|
||||
model_cost_map["output_cost_per_token_above_200k_tokens"]
|
||||
* usage.completion_tokens,
|
||||
10,
|
||||
)
|
|
@ -1,3 +1,4 @@
|
|||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
@ -5,15 +6,23 @@ from typing import Optional, cast
|
|||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
from fastapi import Request
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../../..")
|
||||
0, os.path.abspath("../../../")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
from litellm.proxy.auth.handle_jwt import JWTHandler
|
||||
from litellm.proxy.management_endpoints.types import CustomOpenID
|
||||
from litellm.proxy.management_endpoints.ui_sso import MicrosoftSSOHandler
|
||||
from litellm.proxy.management_endpoints.ui_sso import (
|
||||
GoogleSSOHandler,
|
||||
MicrosoftSSOHandler,
|
||||
)
|
||||
from litellm.types.proxy.management_endpoints.ui_sso import (
|
||||
MicrosoftGraphAPIUserGroupDirectoryObject,
|
||||
MicrosoftGraphAPIUserGroupResponse,
|
||||
)
|
||||
|
||||
|
||||
def test_microsoft_sso_handler_openid_from_response():
|
||||
|
@ -27,23 +36,14 @@ def test_microsoft_sso_handler_openid_from_response():
|
|||
"surname": "User",
|
||||
"some_other_field": "value",
|
||||
}
|
||||
|
||||
# Create a mock JWTHandler that returns predetermined team IDs
|
||||
mock_jwt_handler = MagicMock(spec=JWTHandler)
|
||||
expected_team_ids = ["team1", "team2"]
|
||||
mock_jwt_handler.get_team_ids_from_jwt.return_value = expected_team_ids
|
||||
|
||||
# Act
|
||||
# Call the method being tested
|
||||
result = MicrosoftSSOHandler.openid_from_response(
|
||||
response=mock_response, jwt_handler=mock_jwt_handler
|
||||
response=mock_response, team_ids=expected_team_ids
|
||||
)
|
||||
|
||||
# Assert
|
||||
# Verify the JWT handler was called with the correct parameters
|
||||
mock_jwt_handler.get_team_ids_from_jwt.assert_called_once_with(
|
||||
cast(dict, mock_response)
|
||||
)
|
||||
|
||||
# Check that the result is a CustomOpenID object with the expected values
|
||||
assert isinstance(result, CustomOpenID)
|
||||
|
@ -59,13 +59,9 @@ def test_microsoft_sso_handler_openid_from_response():
|
|||
def test_microsoft_sso_handler_with_empty_response():
|
||||
# Arrange
|
||||
# Test with None response
|
||||
mock_jwt_handler = MagicMock(spec=JWTHandler)
|
||||
mock_jwt_handler.get_team_ids_from_jwt.return_value = []
|
||||
|
||||
# Act
|
||||
result = MicrosoftSSOHandler.openid_from_response(
|
||||
response=None, jwt_handler=mock_jwt_handler
|
||||
)
|
||||
result = MicrosoftSSOHandler.openid_from_response(response=None, team_ids=[])
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, CustomOpenID)
|
||||
|
@ -77,5 +73,309 @@ def test_microsoft_sso_handler_with_empty_response():
|
|||
assert result.last_name is None
|
||||
assert result.team_ids == []
|
||||
|
||||
# Make sure the JWT handler was called with an empty dict
|
||||
mock_jwt_handler.get_team_ids_from_jwt.assert_called_once_with({})
|
||||
|
||||
def test_get_microsoft_callback_response():
|
||||
# Arrange
|
||||
mock_request = MagicMock(spec=Request)
|
||||
mock_response = {
|
||||
"mail": "microsoft_user@example.com",
|
||||
"displayName": "Microsoft User",
|
||||
"id": "msft123",
|
||||
"givenName": "Microsoft",
|
||||
"surname": "User",
|
||||
}
|
||||
|
||||
future = asyncio.Future()
|
||||
future.set_result(mock_response)
|
||||
|
||||
with patch.dict(
|
||||
os.environ,
|
||||
{"MICROSOFT_CLIENT_SECRET": "mock_secret", "MICROSOFT_TENANT": "mock_tenant"},
|
||||
):
|
||||
with patch(
|
||||
"fastapi_sso.sso.microsoft.MicrosoftSSO.verify_and_process",
|
||||
return_value=future,
|
||||
):
|
||||
# Act
|
||||
result = asyncio.run(
|
||||
MicrosoftSSOHandler.get_microsoft_callback_response(
|
||||
request=mock_request,
|
||||
microsoft_client_id="mock_client_id",
|
||||
redirect_url="http://mock_redirect_url",
|
||||
)
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, CustomOpenID)
|
||||
assert result.email == "microsoft_user@example.com"
|
||||
assert result.display_name == "Microsoft User"
|
||||
assert result.provider == "microsoft"
|
||||
assert result.id == "msft123"
|
||||
assert result.first_name == "Microsoft"
|
||||
assert result.last_name == "User"
|
||||
|
||||
|
||||
def test_get_microsoft_callback_response_raw_sso_response():
|
||||
# Arrange
|
||||
mock_request = MagicMock(spec=Request)
|
||||
mock_response = {
|
||||
"mail": "microsoft_user@example.com",
|
||||
"displayName": "Microsoft User",
|
||||
"id": "msft123",
|
||||
"givenName": "Microsoft",
|
||||
"surname": "User",
|
||||
}
|
||||
|
||||
future = asyncio.Future()
|
||||
future.set_result(mock_response)
|
||||
with patch.dict(
|
||||
os.environ,
|
||||
{"MICROSOFT_CLIENT_SECRET": "mock_secret", "MICROSOFT_TENANT": "mock_tenant"},
|
||||
):
|
||||
with patch(
|
||||
"fastapi_sso.sso.microsoft.MicrosoftSSO.verify_and_process",
|
||||
return_value=future,
|
||||
):
|
||||
# Act
|
||||
result = asyncio.run(
|
||||
MicrosoftSSOHandler.get_microsoft_callback_response(
|
||||
request=mock_request,
|
||||
microsoft_client_id="mock_client_id",
|
||||
redirect_url="http://mock_redirect_url",
|
||||
return_raw_sso_response=True,
|
||||
)
|
||||
)
|
||||
|
||||
# Assert
|
||||
print("result from verify_and_process", result)
|
||||
assert isinstance(result, dict)
|
||||
assert result["mail"] == "microsoft_user@example.com"
|
||||
assert result["displayName"] == "Microsoft User"
|
||||
assert result["id"] == "msft123"
|
||||
assert result["givenName"] == "Microsoft"
|
||||
assert result["surname"] == "User"
|
||||
|
||||
|
||||
def test_get_google_callback_response():
|
||||
# Arrange
|
||||
mock_request = MagicMock(spec=Request)
|
||||
mock_response = {
|
||||
"email": "google_user@example.com",
|
||||
"name": "Google User",
|
||||
"sub": "google123",
|
||||
"given_name": "Google",
|
||||
"family_name": "User",
|
||||
}
|
||||
|
||||
future = asyncio.Future()
|
||||
future.set_result(mock_response)
|
||||
|
||||
with patch.dict(os.environ, {"GOOGLE_CLIENT_SECRET": "mock_secret"}):
|
||||
with patch(
|
||||
"fastapi_sso.sso.google.GoogleSSO.verify_and_process", return_value=future
|
||||
):
|
||||
# Act
|
||||
result = asyncio.run(
|
||||
GoogleSSOHandler.get_google_callback_response(
|
||||
request=mock_request,
|
||||
google_client_id="mock_client_id",
|
||||
redirect_url="http://mock_redirect_url",
|
||||
)
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, dict)
|
||||
assert result.get("email") == "google_user@example.com"
|
||||
assert result.get("name") == "Google User"
|
||||
assert result.get("sub") == "google123"
|
||||
assert result.get("given_name") == "Google"
|
||||
assert result.get("family_name") == "User"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_groups_from_graph_api():
|
||||
# Arrange
|
||||
mock_response = {
|
||||
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#directoryObjects",
|
||||
"value": [
|
||||
{
|
||||
"@odata.type": "#microsoft.graph.group",
|
||||
"id": "group1",
|
||||
"displayName": "Group 1",
|
||||
},
|
||||
{
|
||||
"@odata.type": "#microsoft.graph.group",
|
||||
"id": "group2",
|
||||
"displayName": "Group 2",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
async def mock_get(*args, **kwargs):
|
||||
mock = MagicMock()
|
||||
mock.json.return_value = mock_response
|
||||
return mock
|
||||
|
||||
with patch(
|
||||
"litellm.proxy.management_endpoints.ui_sso.get_async_httpx_client"
|
||||
) as mock_client:
|
||||
mock_client.return_value = MagicMock()
|
||||
mock_client.return_value.get = mock_get
|
||||
|
||||
# Act
|
||||
result = await MicrosoftSSOHandler.get_user_groups_from_graph_api(
|
||||
access_token="mock_token"
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 2
|
||||
assert "group1" in result
|
||||
assert "group2" in result
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_groups_pagination():
|
||||
# Arrange
|
||||
first_response = {
|
||||
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#directoryObjects",
|
||||
"@odata.nextLink": "https://graph.microsoft.com/v1.0/me/memberOf?$skiptoken=page2",
|
||||
"value": [
|
||||
{
|
||||
"@odata.type": "#microsoft.graph.group",
|
||||
"id": "group1",
|
||||
"displayName": "Group 1",
|
||||
},
|
||||
],
|
||||
}
|
||||
second_response = {
|
||||
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#directoryObjects",
|
||||
"value": [
|
||||
{
|
||||
"@odata.type": "#microsoft.graph.group",
|
||||
"id": "group2",
|
||||
"displayName": "Group 2",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
responses = [first_response, second_response]
|
||||
current_response = {"index": 0}
|
||||
|
||||
async def mock_get(*args, **kwargs):
|
||||
mock = MagicMock()
|
||||
mock.json.return_value = responses[current_response["index"]]
|
||||
current_response["index"] += 1
|
||||
return mock
|
||||
|
||||
with patch(
|
||||
"litellm.proxy.management_endpoints.ui_sso.get_async_httpx_client"
|
||||
) as mock_client:
|
||||
mock_client.return_value = MagicMock()
|
||||
mock_client.return_value.get = mock_get
|
||||
|
||||
# Act
|
||||
result = await MicrosoftSSOHandler.get_user_groups_from_graph_api(
|
||||
access_token="mock_token"
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 2
|
||||
assert "group1" in result
|
||||
assert "group2" in result
|
||||
assert current_response["index"] == 2 # Verify both pages were fetched
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_groups_empty_response():
|
||||
# Arrange
|
||||
mock_response = {
|
||||
"@odata.context": "https://graph.microsoft.com/v1.0/$metadata#directoryObjects",
|
||||
"value": [],
|
||||
}
|
||||
|
||||
async def mock_get(*args, **kwargs):
|
||||
mock = MagicMock()
|
||||
mock.json.return_value = mock_response
|
||||
return mock
|
||||
|
||||
with patch(
|
||||
"litellm.proxy.management_endpoints.ui_sso.get_async_httpx_client"
|
||||
) as mock_client:
|
||||
mock_client.return_value = MagicMock()
|
||||
mock_client.return_value.get = mock_get
|
||||
|
||||
# Act
|
||||
result = await MicrosoftSSOHandler.get_user_groups_from_graph_api(
|
||||
access_token="mock_token"
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 0
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_user_groups_error_handling():
|
||||
# Arrange
|
||||
async def mock_get(*args, **kwargs):
|
||||
raise Exception("API Error")
|
||||
|
||||
with patch(
|
||||
"litellm.proxy.management_endpoints.ui_sso.get_async_httpx_client"
|
||||
) as mock_client:
|
||||
mock_client.return_value = MagicMock()
|
||||
mock_client.return_value.get = mock_get
|
||||
|
||||
# Act
|
||||
result = await MicrosoftSSOHandler.get_user_groups_from_graph_api(
|
||||
access_token="mock_token"
|
||||
)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 0
|
||||
|
||||
|
||||
def test_get_group_ids_from_graph_api_response():
|
||||
# Arrange
|
||||
mock_response = MicrosoftGraphAPIUserGroupResponse(
|
||||
odata_context="https://graph.microsoft.com/v1.0/$metadata#directoryObjects",
|
||||
odata_nextLink=None,
|
||||
value=[
|
||||
MicrosoftGraphAPIUserGroupDirectoryObject(
|
||||
odata_type="#microsoft.graph.group",
|
||||
id="group1",
|
||||
displayName="Group 1",
|
||||
description=None,
|
||||
deletedDateTime=None,
|
||||
roleTemplateId=None,
|
||||
),
|
||||
MicrosoftGraphAPIUserGroupDirectoryObject(
|
||||
odata_type="#microsoft.graph.group",
|
||||
id="group2",
|
||||
displayName="Group 2",
|
||||
description=None,
|
||||
deletedDateTime=None,
|
||||
roleTemplateId=None,
|
||||
),
|
||||
MicrosoftGraphAPIUserGroupDirectoryObject(
|
||||
odata_type="#microsoft.graph.group",
|
||||
id=None, # Test handling of None id
|
||||
displayName="Invalid Group",
|
||||
description=None,
|
||||
deletedDateTime=None,
|
||||
roleTemplateId=None,
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
# Act
|
||||
result = MicrosoftSSOHandler._get_group_ids_from_graph_api_response(mock_response)
|
||||
|
||||
# Assert
|
||||
assert isinstance(result, list)
|
||||
assert len(result) == 2
|
||||
assert "group1" in result
|
||||
assert "group2" in result
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
from io import BytesIO
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import httpx
|
||||
import pytest
|
||||
from fastapi import Request, UploadFile
|
||||
from fastapi.testclient import TestClient
|
||||
from starlette.datastructures import Headers
|
||||
from starlette.datastructures import UploadFile as StarletteUploadFile
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../../..")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
from litellm.proxy.pass_through_endpoints.pass_through_endpoints import (
|
||||
HttpPassThroughEndpointHelpers,
|
||||
)
|
||||
|
||||
|
||||
# Test is_multipart
|
||||
def test_is_multipart():
|
||||
# Test with multipart content type
|
||||
request = MagicMock(spec=Request)
|
||||
request.headers = Headers({"content-type": "multipart/form-data; boundary=123"})
|
||||
assert HttpPassThroughEndpointHelpers.is_multipart(request) is True
|
||||
|
||||
# Test with non-multipart content type
|
||||
request.headers = Headers({"content-type": "application/json"})
|
||||
assert HttpPassThroughEndpointHelpers.is_multipart(request) is False
|
||||
|
||||
# Test with no content type
|
||||
request.headers = Headers({})
|
||||
assert HttpPassThroughEndpointHelpers.is_multipart(request) is False
|
||||
|
||||
|
||||
# Test _build_request_files_from_upload_file
|
||||
@pytest.mark.asyncio
|
||||
async def test_build_request_files_from_upload_file():
|
||||
# Test with FastAPI UploadFile
|
||||
file_content = b"test content"
|
||||
file = BytesIO(file_content)
|
||||
# Create SpooledTemporaryFile with content type headers
|
||||
headers = {"content-type": "text/plain"}
|
||||
upload_file = UploadFile(file=file, filename="test.txt", headers=headers)
|
||||
upload_file.read = AsyncMock(return_value=file_content)
|
||||
|
||||
result = await HttpPassThroughEndpointHelpers._build_request_files_from_upload_file(
|
||||
upload_file
|
||||
)
|
||||
assert result == ("test.txt", file_content, "text/plain")
|
||||
|
||||
# Test with Starlette UploadFile
|
||||
file2 = BytesIO(file_content)
|
||||
starlette_file = StarletteUploadFile(
|
||||
file=file2,
|
||||
filename="test2.txt",
|
||||
headers=Headers({"content-type": "text/plain"}),
|
||||
)
|
||||
starlette_file.read = AsyncMock(return_value=file_content)
|
||||
|
||||
result = await HttpPassThroughEndpointHelpers._build_request_files_from_upload_file(
|
||||
starlette_file
|
||||
)
|
||||
assert result == ("test2.txt", file_content, "text/plain")
|
||||
|
||||
|
||||
# Test make_multipart_http_request
|
||||
@pytest.mark.asyncio
|
||||
async def test_make_multipart_http_request():
|
||||
# Mock request with file and form field
|
||||
request = MagicMock(spec=Request)
|
||||
request.method = "POST"
|
||||
|
||||
# Mock form data
|
||||
file_content = b"test file content"
|
||||
file = BytesIO(file_content)
|
||||
# Create SpooledTemporaryFile with content type headers
|
||||
headers = {"content-type": "text/plain"}
|
||||
upload_file = UploadFile(file=file, filename="test.txt", headers=headers)
|
||||
upload_file.read = AsyncMock(return_value=file_content)
|
||||
|
||||
form_data = {"file": upload_file, "text_field": "test value"}
|
||||
request.form = AsyncMock(return_value=form_data)
|
||||
|
||||
# Mock httpx client
|
||||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.headers = {}
|
||||
|
||||
async_client = MagicMock()
|
||||
async_client.request = AsyncMock(return_value=mock_response)
|
||||
|
||||
# Test the function
|
||||
response = await HttpPassThroughEndpointHelpers.make_multipart_http_request(
|
||||
request=request,
|
||||
async_client=async_client,
|
||||
url=httpx.URL("http://test.com"),
|
||||
headers={},
|
||||
requested_query_params=None,
|
||||
)
|
||||
|
||||
# Verify the response
|
||||
assert response == mock_response
|
||||
|
||||
# Verify the client call
|
||||
async_client.request.assert_called_once()
|
||||
call_args = async_client.request.call_args[1]
|
||||
|
||||
assert call_args["method"] == "POST"
|
||||
assert str(call_args["url"]) == "http://test.com"
|
||||
assert isinstance(call_args["files"], dict)
|
||||
assert isinstance(call_args["data"], dict)
|
||||
assert call_args["data"]["text_field"] == "test value"
|
|
@ -162,3 +162,30 @@ async def test_aaaproxy_startup_master_key(mock_prisma, monkeypatch, tmp_path):
|
|||
from litellm.proxy.proxy_server import master_key
|
||||
|
||||
assert master_key == test_resolved_key
|
||||
|
||||
|
||||
def test_team_info_masking():
|
||||
"""
|
||||
Test that sensitive team information is properly masked
|
||||
|
||||
Ref: https://huntr.com/bounties/661b388a-44d8-4ad5-862b-4dc5b80be30a
|
||||
"""
|
||||
from litellm.proxy.proxy_server import ProxyConfig
|
||||
|
||||
proxy_config = ProxyConfig()
|
||||
# Test team object with sensitive data
|
||||
team1_info = {
|
||||
"success_callback": "['langfuse', 's3']",
|
||||
"langfuse_secret": "secret-test-key",
|
||||
"langfuse_public_key": "public-test-key",
|
||||
}
|
||||
|
||||
with pytest.raises(Exception) as exc_info:
|
||||
proxy_config._get_team_config(
|
||||
team_id="test_dev",
|
||||
all_teams_config=[team1_info],
|
||||
)
|
||||
|
||||
print("Got exception: {}".format(exc_info.value))
|
||||
assert "secret-test-key" not in str(exc_info.value)
|
||||
assert "public-test-key" not in str(exc_info.value)
|
||||
|
|
72
tests/litellm/proxy/types_utils/test_utils.py
Normal file
72
tests/litellm/proxy/types_utils/test_utils.py
Normal file
|
@ -0,0 +1,72 @@
|
|||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from litellm.proxy.types_utils.utils import security_checks
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../../..")
|
||||
) # Adds the parent directory to the system path
|
||||
|
||||
|
||||
def test_security_checks_blocks_dangerous_modules():
|
||||
"""
|
||||
Resolves: https://huntr.com/bounties/1d98bebb-6cf4-46c9-87c3-d3b1972973b5
|
||||
|
||||
This test checks if the security_checks function correctly blocks the import of dangerous modules.
|
||||
"""
|
||||
dangerous_module = "/usr/lib/python3/os.system"
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
security_checks(dangerous_module)
|
||||
|
||||
assert "not allowed for security reasons" in str(exc_info.value)
|
||||
assert dangerous_module in str(exc_info.value)
|
||||
|
||||
|
||||
def test_security_checks_various_dangerous_modules():
|
||||
dangerous_modules = [
|
||||
"subprocess.run",
|
||||
"socket.socket",
|
||||
"pickle.loads",
|
||||
"marshal.loads",
|
||||
"ctypes.CDLL",
|
||||
"builtins.eval",
|
||||
"__builtin__.exec",
|
||||
"shutil.rmtree",
|
||||
"multiprocessing.Process",
|
||||
"threading.Thread",
|
||||
]
|
||||
|
||||
for module in dangerous_modules:
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
security_checks(module)
|
||||
assert "not allowed for security reasons" in str(exc_info.value)
|
||||
assert module in str(exc_info.value)
|
||||
|
||||
|
||||
def test_security_checks_case_insensitive():
|
||||
# Test that the check is case-insensitive
|
||||
variations = ["OS.system", "os.System", "Os.SyStEm", "SUBPROCESS.run"]
|
||||
|
||||
for module in variations:
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
security_checks(module)
|
||||
assert "not allowed for security reasons" in str(exc_info.value)
|
||||
|
||||
|
||||
def test_security_checks_nested_paths():
|
||||
# Test nested paths that contain dangerous modules
|
||||
nested_paths = [
|
||||
"some/path/to/os/system",
|
||||
"myproject/utils/subprocess_wrapper",
|
||||
"lib/helpers/socket_utils",
|
||||
"../../../system/os.py",
|
||||
]
|
||||
|
||||
for path in nested_paths:
|
||||
with pytest.raises(ImportError) as exc_info:
|
||||
security_checks(path)
|
||||
assert "not allowed for security reasons" in str(exc_info.value)
|
|
@ -323,7 +323,8 @@ class TestHuggingFace(BaseLLMChatTest):
|
|||
model="huggingface/fireworks-ai/meta-llama/Meta-Llama-3-8B-Instruct",
|
||||
messages=[{"role": "user", "content": "Hello"}],
|
||||
optional_params={},
|
||||
api_key="test_api_key"
|
||||
api_key="test_api_key",
|
||||
litellm_params={}
|
||||
)
|
||||
|
||||
assert headers["Authorization"] == "Bearer test_api_key"
|
||||
|
|
|
@ -141,6 +141,7 @@ def test_build_vertex_schema():
|
|||
[
|
||||
([{"googleSearch": {}}], "googleSearch"),
|
||||
([{"googleSearchRetrieval": {}}], "googleSearchRetrieval"),
|
||||
([{"enterpriseWebSearch": {}}], "enterpriseWebSearch"),
|
||||
([{"code_execution": {}}], "code_execution"),
|
||||
],
|
||||
)
|
||||
|
|
2
tests/local_testing/example.jsonl
Normal file
2
tests/local_testing/example.jsonl
Normal file
|
@ -0,0 +1,2 @@
|
|||
{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gemini-1.5-flash-001", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "Hello world!"}], "max_tokens": 10}}
|
||||
{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gemini-1.5-flash-001", "messages": [{"role": "system", "content": "You are an unhelpful assistant."}, {"role": "user", "content": "Hello world!"}], "max_tokens": 10}}
|
|
@ -21,7 +21,7 @@ from litellm.integrations.gcs_bucket.gcs_bucket import (
|
|||
StandardLoggingPayload,
|
||||
)
|
||||
from litellm.types.utils import StandardCallbackDynamicParams
|
||||
|
||||
from unittest.mock import patch
|
||||
verbose_logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
|
@ -687,3 +687,63 @@ async def test_basic_gcs_logger_with_folder_in_bucket_name():
|
|||
# clean up
|
||||
if old_bucket_name is not None:
|
||||
os.environ["GCS_BUCKET_NAME"] = old_bucket_name
|
||||
|
||||
@pytest.mark.skip(reason="This test is flaky on ci/cd")
|
||||
def test_create_file_e2e():
|
||||
"""
|
||||
Asserts 'create_file' is called with the correct arguments
|
||||
"""
|
||||
load_vertex_ai_credentials()
|
||||
test_file_content = b"test audio content"
|
||||
test_file = ("test.wav", test_file_content, "audio/wav")
|
||||
|
||||
from litellm import create_file
|
||||
response = create_file(
|
||||
file=test_file,
|
||||
purpose="user_data",
|
||||
custom_llm_provider="vertex_ai",
|
||||
)
|
||||
print("response", response)
|
||||
assert response is not None
|
||||
|
||||
@pytest.mark.skip(reason="This test is flaky on ci/cd")
|
||||
def test_create_file_e2e_jsonl():
|
||||
"""
|
||||
Asserts 'create_file' is called with the correct arguments
|
||||
"""
|
||||
load_vertex_ai_credentials()
|
||||
from litellm.llms.custom_httpx.http_handler import HTTPHandler
|
||||
|
||||
client = HTTPHandler()
|
||||
|
||||
example_jsonl = [{"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gemini-1.5-flash-001", "messages": [{"role": "system", "content": "You are a helpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 10}},{"custom_id": "request-2", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gemini-1.5-flash-001", "messages": [{"role": "system", "content": "You are an unhelpful assistant."},{"role": "user", "content": "Hello world!"}],"max_tokens": 10}}]
|
||||
|
||||
# Create and write to the file
|
||||
file_path = "example.jsonl"
|
||||
with open(file_path, "w") as f:
|
||||
for item in example_jsonl:
|
||||
f.write(json.dumps(item) + "\n")
|
||||
|
||||
# Verify file content
|
||||
with open(file_path, "r") as f:
|
||||
content = f.read()
|
||||
print("File content:", content)
|
||||
assert len(content) > 0, "File is empty"
|
||||
|
||||
from litellm import create_file
|
||||
with patch.object(client, "post") as mock_create_file:
|
||||
try:
|
||||
response = create_file(
|
||||
file=open(file_path, "rb"),
|
||||
purpose="user_data",
|
||||
custom_llm_provider="vertex_ai",
|
||||
client=client,
|
||||
)
|
||||
except Exception as e:
|
||||
print("error", e)
|
||||
|
||||
mock_create_file.assert_called_once()
|
||||
|
||||
print(f"kwargs: {mock_create_file.call_args.kwargs}")
|
||||
|
||||
assert mock_create_file.call_args.kwargs["data"] is not None and len(mock_create_file.call_args.kwargs["data"]) > 0
|
|
@ -431,6 +431,7 @@ def test_aaamodel_prices_and_context_window_json_is_valid():
|
|||
"input_cost_per_character_above_128k_tokens": {"type": "number"},
|
||||
"input_cost_per_image": {"type": "number"},
|
||||
"input_cost_per_image_above_128k_tokens": {"type": "number"},
|
||||
"input_cost_per_token_above_200k_tokens": {"type": "number"},
|
||||
"input_cost_per_pixel": {"type": "number"},
|
||||
"input_cost_per_query": {"type": "number"},
|
||||
"input_cost_per_request": {"type": "number"},
|
||||
|
@ -483,6 +484,7 @@ def test_aaamodel_prices_and_context_window_json_is_valid():
|
|||
"output_cost_per_second": {"type": "number"},
|
||||
"output_cost_per_token": {"type": "number"},
|
||||
"output_cost_per_token_above_128k_tokens": {"type": "number"},
|
||||
"output_cost_per_token_above_200k_tokens": {"type": "number"},
|
||||
"output_cost_per_token_batches": {"type": "number"},
|
||||
"output_db_cost_per_token": {"type": "number"},
|
||||
"output_dbu_cost_per_token": {"type": "number"},
|
||||
|
@ -541,6 +543,13 @@ def test_aaamodel_prices_and_context_window_json_is_valid():
|
|||
"enum": ["text", "audio", "image", "video"],
|
||||
},
|
||||
},
|
||||
"supported_output_modalities": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"enum": ["text", "image"],
|
||||
},
|
||||
},
|
||||
"supports_native_streaming": {"type": "boolean"},
|
||||
},
|
||||
"additionalProperties": False,
|
||||
|
|
|
@ -2,14 +2,31 @@ import pytest
|
|||
import openai
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import tempfile
|
||||
from typing_extensions import override
|
||||
from openai import AssistantEventHandler
|
||||
|
||||
|
||||
client = openai.OpenAI(base_url="http://0.0.0.0:4000/openai", api_key="sk-1234")
|
||||
|
||||
def test_pass_through_file_operations():
|
||||
# Create a temporary file
|
||||
with tempfile.NamedTemporaryFile(mode='w+', suffix='.txt', delete=False) as temp_file:
|
||||
temp_file.write("This is a test file for the OpenAI Assistants API.")
|
||||
temp_file.flush()
|
||||
|
||||
# create a file
|
||||
file = client.files.create(
|
||||
file=open(temp_file.name, "rb"),
|
||||
purpose="assistants",
|
||||
)
|
||||
print("file created", file)
|
||||
|
||||
# delete the file
|
||||
delete_file = client.files.delete(file.id)
|
||||
print("file deleted", delete_file)
|
||||
|
||||
def test_openai_assistants_e2e_operations():
|
||||
|
||||
assistant = client.beta.assistants.create(
|
||||
name="Math Tutor",
|
||||
instructions="You are a personal math tutor. Write and run code to answer math questions.",
|
||||
|
|
88
ui/litellm-dashboard/package-lock.json
generated
88
ui/litellm-dashboard/package-lock.json
generated
|
@ -21,7 +21,7 @@
|
|||
"jsonwebtoken": "^9.0.2",
|
||||
"jwt-decode": "^4.0.0",
|
||||
"moment": "^2.30.1",
|
||||
"next": "^14.2.25",
|
||||
"next": "^14.2.26",
|
||||
"openai": "^4.28.0",
|
||||
"papaparse": "^5.5.2",
|
||||
"react": "^18",
|
||||
|
@ -418,9 +418,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/env": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.25.tgz",
|
||||
"integrity": "sha512-JnzQ2cExDeG7FxJwqAksZ3aqVJrHjFwZQAEJ9gQZSoEhIow7SNoKZzju/AwQ+PLIR4NY8V0rhcVozx/2izDO0w==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.26.tgz",
|
||||
"integrity": "sha512-vO//GJ/YBco+H7xdQhzJxF7ub3SUwft76jwaeOyVVQFHCi5DCnkP16WHB+JBylo4vOKPoZBlR94Z8xBxNBdNJA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@next/eslint-plugin-next": {
|
||||
|
@ -433,9 +433,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-darwin-arm64": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.25.tgz",
|
||||
"integrity": "sha512-09clWInF1YRd6le00vt750s3m7SEYNehz9C4PUcSu3bAdCTpjIV4aTYQZ25Ehrr83VR1rZeqtKUPWSI7GfuKZQ==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-14.2.26.tgz",
|
||||
"integrity": "sha512-zDJY8gsKEseGAxG+C2hTMT0w9Nk9N1Sk1qV7vXYz9MEiyRoF5ogQX2+vplyUMIfygnjn9/A04I6yrUTRTuRiyQ==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
|
@ -449,9 +449,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-darwin-x64": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.25.tgz",
|
||||
"integrity": "sha512-V+iYM/QR+aYeJl3/FWWU/7Ix4b07ovsQ5IbkwgUK29pTHmq+5UxeDr7/dphvtXEq5pLB/PucfcBNh9KZ8vWbug==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-14.2.26.tgz",
|
||||
"integrity": "sha512-U0adH5ryLfmTDkahLwG9sUQG2L0a9rYux8crQeC92rPhi3jGQEY47nByQHrVrt3prZigadwj/2HZ1LUUimuSbg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
@ -465,9 +465,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-linux-arm64-gnu": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.25.tgz",
|
||||
"integrity": "sha512-LFnV2899PJZAIEHQ4IMmZIgL0FBieh5keMnriMY1cK7ompR+JUd24xeTtKkcaw8QmxmEdhoE5Mu9dPSuDBgtTg==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-14.2.26.tgz",
|
||||
"integrity": "sha512-SINMl1I7UhfHGM7SoRiw0AbwnLEMUnJ/3XXVmhyptzriHbWvPPbbm0OEVG24uUKhuS1t0nvN/DBvm5kz6ZIqpg==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
|
@ -481,9 +481,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-linux-arm64-musl": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.25.tgz",
|
||||
"integrity": "sha512-QC5y5PPTmtqFExcKWKYgUNkHeHE/z3lUsu83di488nyP0ZzQ3Yse2G6TCxz6nNsQwgAx1BehAJTZez+UQxzLfw==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-14.2.26.tgz",
|
||||
"integrity": "sha512-s6JaezoyJK2DxrwHWxLWtJKlqKqTdi/zaYigDXUJ/gmx/72CrzdVZfMvUc6VqnZ7YEvRijvYo+0o4Z9DencduA==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
|
@ -497,9 +497,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-linux-x64-gnu": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.25.tgz",
|
||||
"integrity": "sha512-y6/ML4b9eQ2D/56wqatTJN5/JR8/xdObU2Fb1RBidnrr450HLCKr6IJZbPqbv7NXmje61UyxjF5kvSajvjye5w==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-14.2.26.tgz",
|
||||
"integrity": "sha512-FEXeUQi8/pLr/XI0hKbe0tgbLmHFRhgXOUiPScz2hk0hSmbGiU8aUqVslj/6C6KA38RzXnWoJXo4FMo6aBxjzg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
@ -513,9 +513,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-linux-x64-musl": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.25.tgz",
|
||||
"integrity": "sha512-sPX0TSXHGUOZFvv96GoBXpB3w4emMqKeMgemrSxI7A6l55VBJp/RKYLwZIB9JxSqYPApqiREaIIap+wWq0RU8w==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-14.2.26.tgz",
|
||||
"integrity": "sha512-BUsomaO4d2DuXhXhgQCVt2jjX4B4/Thts8nDoIruEJkhE5ifeQFtvW5c9JkdOtYvE5p2G0hcwQ0UbRaQmQwaVg==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
@ -529,9 +529,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-win32-arm64-msvc": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.25.tgz",
|
||||
"integrity": "sha512-ReO9S5hkA1DU2cFCsGoOEp7WJkhFzNbU/3VUF6XxNGUCQChyug6hZdYL/istQgfT/GWE6PNIg9cm784OI4ddxQ==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-14.2.26.tgz",
|
||||
"integrity": "sha512-5auwsMVzT7wbB2CZXQxDctpWbdEnEW/e66DyXO1DcgHxIyhP06awu+rHKshZE+lPLIGiwtjo7bsyeuubewwxMw==",
|
||||
"cpu": [
|
||||
"arm64"
|
||||
],
|
||||
|
@ -545,9 +545,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-win32-ia32-msvc": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.25.tgz",
|
||||
"integrity": "sha512-DZ/gc0o9neuCDyD5IumyTGHVun2dCox5TfPQI/BJTYwpSNYM3CZDI4i6TOdjeq1JMo+Ug4kPSMuZdwsycwFbAw==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-win32-ia32-msvc/-/swc-win32-ia32-msvc-14.2.26.tgz",
|
||||
"integrity": "sha512-GQWg/Vbz9zUGi9X80lOeGsz1rMH/MtFO/XqigDznhhhTfDlDoynCM6982mPCbSlxJ/aveZcKtTlwfAjwhyxDpg==",
|
||||
"cpu": [
|
||||
"ia32"
|
||||
],
|
||||
|
@ -561,9 +561,9 @@
|
|||
}
|
||||
},
|
||||
"node_modules/@next/swc-win32-x64-msvc": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.25.tgz",
|
||||
"integrity": "sha512-KSznmS6eFjQ9RJ1nEc66kJvtGIL1iZMYmGEXsZPh2YtnLtqrgdVvKXJY2ScjjoFnG6nGLyPFR0UiEvDwVah4Tw==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-14.2.26.tgz",
|
||||
"integrity": "sha512-2rdB3T1/Gp7bv1eQTTm9d1Y1sv9UuJ2LAwOE0Pe2prHKe32UNscj7YS13fRB37d0GAiGNR+Y7ZcW8YjDI8Ns0w==",
|
||||
"cpu": [
|
||||
"x64"
|
||||
],
|
||||
|
@ -5011,12 +5011,12 @@
|
|||
"dev": true
|
||||
},
|
||||
"node_modules/next": {
|
||||
"version": "14.2.25",
|
||||
"resolved": "https://registry.npmjs.org/next/-/next-14.2.25.tgz",
|
||||
"integrity": "sha512-N5M7xMc4wSb4IkPvEV5X2BRRXUmhVHNyaXwEM86+voXthSZz8ZiRyQW4p9mwAoAPIm6OzuVZtn7idgEJeAJN3Q==",
|
||||
"version": "14.2.26",
|
||||
"resolved": "https://registry.npmjs.org/next/-/next-14.2.26.tgz",
|
||||
"integrity": "sha512-b81XSLihMwCfwiUVRRja3LphLo4uBBMZEzBBWMaISbKTwOmq3wPknIETy/8000tr7Gq4WmbuFYPS7jOYIf+ZJw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@next/env": "14.2.25",
|
||||
"@next/env": "14.2.26",
|
||||
"@swc/helpers": "0.5.5",
|
||||
"busboy": "1.6.0",
|
||||
"caniuse-lite": "^1.0.30001579",
|
||||
|
@ -5031,15 +5031,15 @@
|
|||
"node": ">=18.17.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"@next/swc-darwin-arm64": "14.2.25",
|
||||
"@next/swc-darwin-x64": "14.2.25",
|
||||
"@next/swc-linux-arm64-gnu": "14.2.25",
|
||||
"@next/swc-linux-arm64-musl": "14.2.25",
|
||||
"@next/swc-linux-x64-gnu": "14.2.25",
|
||||
"@next/swc-linux-x64-musl": "14.2.25",
|
||||
"@next/swc-win32-arm64-msvc": "14.2.25",
|
||||
"@next/swc-win32-ia32-msvc": "14.2.25",
|
||||
"@next/swc-win32-x64-msvc": "14.2.25"
|
||||
"@next/swc-darwin-arm64": "14.2.26",
|
||||
"@next/swc-darwin-x64": "14.2.26",
|
||||
"@next/swc-linux-arm64-gnu": "14.2.26",
|
||||
"@next/swc-linux-arm64-musl": "14.2.26",
|
||||
"@next/swc-linux-x64-gnu": "14.2.26",
|
||||
"@next/swc-linux-x64-musl": "14.2.26",
|
||||
"@next/swc-win32-arm64-msvc": "14.2.26",
|
||||
"@next/swc-win32-ia32-msvc": "14.2.26",
|
||||
"@next/swc-win32-x64-msvc": "14.2.26"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@opentelemetry/api": "^1.1.0",
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
"jsonwebtoken": "^9.0.2",
|
||||
"jwt-decode": "^4.0.0",
|
||||
"moment": "^2.30.1",
|
||||
"next": "^14.2.25",
|
||||
"next": "^14.2.26",
|
||||
"openai": "^4.28.0",
|
||||
"papaparse": "^5.5.2",
|
||||
"react": "^18",
|
||||
|
|
|
@ -314,6 +314,8 @@ export default function CreateKeyPage() {
|
|||
<BudgetPanel accessToken={accessToken} />
|
||||
) : page == "guardrails" ? (
|
||||
<GuardrailsPanel accessToken={accessToken} />
|
||||
): page == "transform-request" ? (
|
||||
<TransformRequestPanel accessToken={accessToken} />
|
||||
): page == "general-settings" ? (
|
||||
<GeneralSettings
|
||||
userID={userID}
|
||||
|
|
|
@ -13,9 +13,12 @@ import { Organization, userListCall } from "./networking";
|
|||
import { createTeamSearchFunction } from "./key_team_helpers/team_search_fn";
|
||||
import { createOrgSearchFunction } from "./key_team_helpers/organization_search_fn";
|
||||
import { useFilterLogic } from "./key_team_helpers/filter_logic";
|
||||
import { Setter } from "@/types";
|
||||
import { updateExistingKeys } from "@/utils/dataUtils";
|
||||
|
||||
interface AllKeysTableProps {
|
||||
keys: KeyResponse[];
|
||||
setKeys: Setter<KeyResponse[]>;
|
||||
isLoading?: boolean;
|
||||
pagination: {
|
||||
currentPage: number;
|
||||
|
@ -87,6 +90,7 @@ const TeamFilter = ({
|
|||
*/
|
||||
export function AllKeysTable({
|
||||
keys,
|
||||
setKeys,
|
||||
isLoading = false,
|
||||
pagination,
|
||||
onPageChange,
|
||||
|
@ -364,6 +368,23 @@ export function AllKeysTable({
|
|||
keyId={selectedKeyId}
|
||||
onClose={() => setSelectedKeyId(null)}
|
||||
keyData={keys.find(k => k.token === selectedKeyId)}
|
||||
onKeyDataUpdate={(updatedKeyData) => {
|
||||
setKeys(keys => keys.map(key => {
|
||||
if (key.token === updatedKeyData.token) {
|
||||
// The shape of key is different from that of
|
||||
// updatedKeyData(received from keyUpdateCall in networking.tsx).
|
||||
// Hence, we can't replace key with updatedKeys since it might lead
|
||||
// to unintended bugs/behaviors.
|
||||
// So instead, we only update fields that are present in both.
|
||||
return updateExistingKeys(key, updatedKeyData)
|
||||
}
|
||||
|
||||
return key
|
||||
}))
|
||||
}}
|
||||
onDelete={() => {
|
||||
setKeys(keys => keys.filter(key => key.token !== selectedKeyId))
|
||||
}}
|
||||
accessToken={accessToken}
|
||||
userID={userID}
|
||||
userRole={userRole}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue