forked from phoenix/litellm-mirror
Merge pull request #2164 from BerriAI/litellm_support_extra_headers
[FEAT] Support extra headers - OpenAI / Azure
This commit is contained in:
commit
9594ceae55
7 changed files with 43 additions and 13 deletions
|
@ -28,11 +28,11 @@ This is a list of openai params we translate across providers.
|
|||
|
||||
This list is constantly being updated.
|
||||
|
||||
| Provider | temperature | max_tokens | top_p | stream | stop | n | presence_penalty | frequency_penalty | functions | function_call | logit_bias | user | response_format | seed | tools | tool_choice | logprobs | top_logprobs |
|
||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
||||
| Provider | temperature | max_tokens | top_p | stream | stop | n | presence_penalty | frequency_penalty | functions | function_call | logit_bias | user | response_format | seed | tools | tool_choice | logprobs | top_logprobs | extra_headers |
|
||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|--|
|
||||
|Anthropic| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |
|
||||
|OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |
|
||||
|Azure OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | | |
|
||||
|OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
|Azure OpenAI| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |✅ | ✅ | ✅ | ✅ |✅ | ✅ | | | ✅ |
|
||||
|Replicate | ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |
|
||||
|Anyscale | ✅ | ✅ | ✅ | ✅ |
|
||||
|Cohere| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | |
|
||||
|
@ -42,7 +42,7 @@ This list is constantly being updated.
|
|||
|VertexAI| ✅ | ✅ | | ✅ | | | | | | |
|
||||
|Bedrock| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |
|
||||
|Sagemaker| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | |
|
||||
|TogetherAI| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |
|
||||
|TogetherAI| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | | ✅ |
|
||||
|AlephAlpha| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | |
|
||||
|Palm| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | | | |
|
||||
|NLP Cloud| ✅ | ✅ | ✅ | ✅ | ✅ | | | | | |
|
||||
|
|
|
@ -156,6 +156,19 @@ response_message = response.choices[0].message
|
|||
tool_calls = response.choices[0].message.tool_calls
|
||||
```
|
||||
|
||||
### Setting `extra_headers` for completion calls
|
||||
```python
|
||||
import os
|
||||
from litellm import completion
|
||||
|
||||
os.environ["OPENAI_API_KEY"] = "your-api-key"
|
||||
|
||||
response = completion(
|
||||
model = "gpt-3.5-turbo",
|
||||
messages=[{ "content": "Hello, how are you?","role": "user"}],
|
||||
extra_headers={"AI-Resource Group": "ishaan-resource"}
|
||||
)
|
||||
```
|
||||
|
||||
### Setting Organization-ID for completion calls
|
||||
This can be set in one of the following ways:
|
||||
|
|
|
@ -227,6 +227,7 @@ model_list:
|
|||
- model_name: openai-gpt-3.5
|
||||
litellm_params:
|
||||
model: openai/gpt-3.5-turbo
|
||||
extra_headers: {"AI-Resource Group": "ishaan-resource"}
|
||||
api_key: sk-123
|
||||
organization: org-ikDc4ex8NB
|
||||
temperature: 0.2
|
||||
|
@ -234,10 +235,6 @@ model_list:
|
|||
litellm_params:
|
||||
model: ollama/mistral
|
||||
api_base: your_ollama_api_base
|
||||
headers: {
|
||||
"HTTP-Referer": "litellm.ai",
|
||||
"X-Title": "LiteLLM Server"
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2**: Start server with config
|
||||
|
|
|
@ -787,10 +787,6 @@ model_list:
|
|||
litellm_params:
|
||||
model: ollama/mistral
|
||||
api_base: your_ollama_api_base
|
||||
headers: {
|
||||
"HTTP-Referer": "litellm.ai",
|
||||
"X-Title": "LiteLLM Server"
|
||||
}
|
||||
```
|
||||
|
||||
**Step 2**: Start server with config
|
||||
|
|
|
@ -398,6 +398,7 @@ def completion(
|
|||
logprobs: Optional[bool] = None,
|
||||
top_logprobs: Optional[int] = None,
|
||||
deployment_id=None,
|
||||
extra_headers: Optional[dict] = None,
|
||||
# soon to be deprecated params by OpenAI
|
||||
functions: Optional[List] = None,
|
||||
function_call: Optional[str] = None,
|
||||
|
@ -435,6 +436,7 @@ def completion(
|
|||
api_version (str, optional): API version (default is None).
|
||||
api_key (str, optional): API key (default is None).
|
||||
model_list (list, optional): List of api base, version, keys
|
||||
extra_headers (dict, optional): Additional headers to include in the request.
|
||||
|
||||
LITELLM Specific Params
|
||||
mock_response (str, optional): If provided, return a mock completion response for testing or debugging purposes (default is None).
|
||||
|
@ -514,6 +516,7 @@ def completion(
|
|||
"max_retries",
|
||||
"logprobs",
|
||||
"top_logprobs",
|
||||
"extra_headers",
|
||||
]
|
||||
litellm_params = [
|
||||
"metadata",
|
||||
|
@ -691,6 +694,7 @@ def completion(
|
|||
max_retries=max_retries,
|
||||
logprobs=logprobs,
|
||||
top_logprobs=top_logprobs,
|
||||
extra_headers=extra_headers,
|
||||
**non_default_params,
|
||||
)
|
||||
|
||||
|
|
|
@ -118,3 +118,18 @@ def test_azure_gpt_optional_params_gpt_vision_with_extra_body():
|
|||
|
||||
|
||||
# test_azure_gpt_optional_params_gpt_vision_with_extra_body()
|
||||
|
||||
|
||||
def test_openai_extra_headers():
|
||||
optional_params = litellm.utils.get_optional_params(
|
||||
user="John",
|
||||
custom_llm_provider="openai",
|
||||
max_tokens=10,
|
||||
temperature=0.2,
|
||||
extra_headers={"AI-Resource Group": "ishaan-resource"},
|
||||
)
|
||||
|
||||
print(optional_params)
|
||||
assert optional_params["max_tokens"] == 10
|
||||
assert optional_params["temperature"] == 0.2
|
||||
assert optional_params["extra_headers"] == {"AI-Resource Group": "ishaan-resource"}
|
||||
|
|
|
@ -3920,6 +3920,7 @@ def get_optional_params(
|
|||
max_retries=None,
|
||||
logprobs=None,
|
||||
top_logprobs=None,
|
||||
extra_headers=None,
|
||||
**kwargs,
|
||||
):
|
||||
# retrieve all parameters passed to the function
|
||||
|
@ -3959,6 +3960,7 @@ def get_optional_params(
|
|||
"max_retries": None,
|
||||
"logprobs": None,
|
||||
"top_logprobs": None,
|
||||
"extra_headers": None,
|
||||
}
|
||||
# filter out those parameters that were passed with non-default values
|
||||
non_default_params = {
|
||||
|
@ -4766,6 +4768,7 @@ def get_optional_params(
|
|||
"max_retries",
|
||||
"logprobs",
|
||||
"top_logprobs",
|
||||
"extra_headers",
|
||||
]
|
||||
_check_valid_arg(supported_params=supported_params)
|
||||
if functions is not None:
|
||||
|
@ -4806,6 +4809,8 @@ def get_optional_params(
|
|||
optional_params["logprobs"] = logprobs
|
||||
if top_logprobs is not None:
|
||||
optional_params["top_logprobs"] = top_logprobs
|
||||
if extra_headers is not None:
|
||||
optional_params["extra_headers"] = extra_headers
|
||||
if custom_llm_provider in ["openai", "azure"] + litellm.openai_compatible_providers:
|
||||
# for openai, azure we should pass the extra/passed params within `extra_body` https://github.com/openai/openai-python/blob/ac33853ba10d13ac149b1fa3ca6dba7d613065c9/src/openai/resources/models.py#L46
|
||||
extra_body = passed_params.pop("extra_body", {})
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue