From 6b9c04618eb8f1ce42eda61749b47c592041493a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Fri, 5 Apr 2024 10:07:43 -0700 Subject: [PATCH] fix use azure_ai/mistral --- docs/my-website/docs/providers/azure_ai.md | 53 +++++----------------- litellm/utils.py | 2 +- 2 files changed, 12 insertions(+), 43 deletions(-) diff --git a/docs/my-website/docs/providers/azure_ai.md b/docs/my-website/docs/providers/azure_ai.md index fbdc8ee1c..0166d9848 100644 --- a/docs/my-website/docs/providers/azure_ai.md +++ b/docs/my-website/docs/providers/azure_ai.md @@ -13,47 +13,7 @@ response = litellm.completion( ) ``` -## Using Mistral models deployed on Azure AI Studio - -### Sample Usage - setting env vars - -Set `MISTRAL_AZURE_API_KEY` and `MISTRAL_AZURE_API_BASE` in your env - -```shell -MISTRAL_AZURE_API_KEY = "zE************"" -MISTRAL_AZURE_API_BASE = "https://Mistral-large-nmefg-serverless.eastus2.inference.ai.azure.com/v1" -``` - -```python -from litellm import completion -import os - -response = completion( - model="mistral/Mistral-large-dfgfj", - messages=[ - {"role": "user", "content": "hello from litellm"} - ], -) -print(response) -``` - -### Sample Usage - passing `api_base` and `api_key` to `litellm.completion` -```python -from litellm import completion -import os - -response = completion( - model="mistral/Mistral-large-dfgfj", - api_base="https://Mistral-large-dfgfj-serverless.eastus2.inference.ai.azure.com", - api_key = "JGbKodRcTp****" - messages=[ - {"role": "user", "content": "hello from litellm"} - ], -) -print(response) -``` - -### [LiteLLM Proxy] Using Mistral Models +### Sample Usage - LiteLLM Proxy Set this on your litellm proxy config.yaml ```yaml @@ -61,8 +21,17 @@ model_list: - model_name: mistral litellm_params: model: mistral/Mistral-large-dfgfj - api_base: https://Mistral-large-dfgfj-serverless.eastus2.inference.ai.azure.com + api_base: https://Mistral-large-dfgfj-serverless.eastus2.inference.ai.azure.com/v1/ api_key: JGbKodRcTp**** ``` +## Supported Models + +| Model Name | Function Call | +|--------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| command-r-plus | `completion(model="azure/command-r-plus", messages)` | +| command-r | `completion(model="azure/command-r", messages)` | +| mistral-large-latest | `completion(model="azure/mistral-large-latest", messages)` | + + diff --git a/litellm/utils.py b/litellm/utils.py index 9da560947..6c0521265 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5579,7 +5579,7 @@ def get_llm_provider( model_name = model.split("/", 1)[1] if ( model_name in litellm.cohere_chat_models - or model_name in litellm.mistral_chat_models + or f"mistral/{model_name}" in litellm.mistral_chat_models ): custom_llm_provider = "openai" model = model_name