forked from phoenix/litellm-mirror
fix use azure_ai/mistral
This commit is contained in:
parent
5ce80d82d3
commit
6b9c04618e
2 changed files with 12 additions and 43 deletions
|
@ -13,47 +13,7 @@ response = litellm.completion(
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Using Mistral models deployed on Azure AI Studio
|
### Sample Usage - LiteLLM Proxy
|
||||||
|
|
||||||
### Sample Usage - setting env vars
|
|
||||||
|
|
||||||
Set `MISTRAL_AZURE_API_KEY` and `MISTRAL_AZURE_API_BASE` in your env
|
|
||||||
|
|
||||||
```shell
|
|
||||||
MISTRAL_AZURE_API_KEY = "zE************""
|
|
||||||
MISTRAL_AZURE_API_BASE = "https://Mistral-large-nmefg-serverless.eastus2.inference.ai.azure.com/v1"
|
|
||||||
```
|
|
||||||
|
|
||||||
```python
|
|
||||||
from litellm import completion
|
|
||||||
import os
|
|
||||||
|
|
||||||
response = completion(
|
|
||||||
model="mistral/Mistral-large-dfgfj",
|
|
||||||
messages=[
|
|
||||||
{"role": "user", "content": "hello from litellm"}
|
|
||||||
],
|
|
||||||
)
|
|
||||||
print(response)
|
|
||||||
```
|
|
||||||
|
|
||||||
### Sample Usage - passing `api_base` and `api_key` to `litellm.completion`
|
|
||||||
```python
|
|
||||||
from litellm import completion
|
|
||||||
import os
|
|
||||||
|
|
||||||
response = completion(
|
|
||||||
model="mistral/Mistral-large-dfgfj",
|
|
||||||
api_base="https://Mistral-large-dfgfj-serverless.eastus2.inference.ai.azure.com",
|
|
||||||
api_key = "JGbKodRcTp****"
|
|
||||||
messages=[
|
|
||||||
{"role": "user", "content": "hello from litellm"}
|
|
||||||
],
|
|
||||||
)
|
|
||||||
print(response)
|
|
||||||
```
|
|
||||||
|
|
||||||
### [LiteLLM Proxy] Using Mistral Models
|
|
||||||
|
|
||||||
Set this on your litellm proxy config.yaml
|
Set this on your litellm proxy config.yaml
|
||||||
```yaml
|
```yaml
|
||||||
|
@ -61,8 +21,17 @@ model_list:
|
||||||
- model_name: mistral
|
- model_name: mistral
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: mistral/Mistral-large-dfgfj
|
model: mistral/Mistral-large-dfgfj
|
||||||
api_base: https://Mistral-large-dfgfj-serverless.eastus2.inference.ai.azure.com
|
api_base: https://Mistral-large-dfgfj-serverless.eastus2.inference.ai.azure.com/v1/
|
||||||
api_key: JGbKodRcTp****
|
api_key: JGbKodRcTp****
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## Supported Models
|
||||||
|
|
||||||
|
| Model Name | Function Call |
|
||||||
|
|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||||
|
| command-r-plus | `completion(model="azure/command-r-plus", messages)` |
|
||||||
|
| command-r | `completion(model="azure/command-r", messages)` |
|
||||||
|
| mistral-large-latest | `completion(model="azure/mistral-large-latest", messages)` |
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -5579,7 +5579,7 @@ def get_llm_provider(
|
||||||
model_name = model.split("/", 1)[1]
|
model_name = model.split("/", 1)[1]
|
||||||
if (
|
if (
|
||||||
model_name in litellm.cohere_chat_models
|
model_name in litellm.cohere_chat_models
|
||||||
or model_name in litellm.mistral_chat_models
|
or f"mistral/{model_name}" in litellm.mistral_chat_models
|
||||||
):
|
):
|
||||||
custom_llm_provider = "openai"
|
custom_llm_provider = "openai"
|
||||||
model = model_name
|
model = model_name
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue