From cc960da4b6c7f04c888a042b36104072cca453e9 Mon Sep 17 00:00:00 2001 From: Krrish Dholakia Date: Mon, 7 Oct 2024 22:37:49 -0700 Subject: [PATCH] docs(azure.md): add o1 model support to config --- docs/my-website/docs/providers/azure.md | 75 ++++++++++++++++++++++++- docs/my-website/docs/proxy/configs.md | 2 + 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/docs/my-website/docs/providers/azure.md b/docs/my-website/docs/providers/azure.md index c52938e2c..5728f4c06 100644 --- a/docs/my-website/docs/providers/azure.md +++ b/docs/my-website/docs/providers/azure.md @@ -281,6 +281,78 @@ response = completion( ) ``` +## Azure O1 Models + +| Model Name | Function Call | +|---------------------|----------------------------------------------------| +| o1-mini | `response = completion(model="azure/", messages=messages)` | +| o1-preview | `response = completion(model="azure/", messages=messages)` | + +Set `litellm.enable_preview_features = True` to use Azure O1 Models with streaming support. + + + + +```python +import litellm + +litellm.enable_preview_features = True # 👈 KEY CHANGE + +response = litellm.completion( + model="azure/", + messages=[{"role": "user", "content": "What is the weather like in Boston?"}], + stream=True +) + +for chunk in response: + print(chunk) +``` + + + +1. Setup config.yaml +```yaml +model_list: + - model_name: o1-mini + litellm_params: + model: azure/o1-mini + api_base: "os.environ/AZURE_API_BASE" + api_key: "os.environ/AZURE_API_KEY" + api_version: "os.environ/AZURE_API_VERSION" + +litellm_settings: + enable_preview_features: true # 👈 KEY CHANGE +``` + +2. Start proxy + +```bash +litellm --config /path/to/config.yaml +``` + +3. Test it + +```python +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:4000" +) + +response = client.chat.completions.create(model="o1-mini", messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } +], +stream=True) + +for chunk in response: + print(chunk) +``` + + + ## Azure Instruct Models Use `model="azure_text/"` @@ -613,4 +685,5 @@ print("\nLLM Response1:\n", response) response_message = response.choices[0].message tool_calls = response.choices[0].message.tool_calls print("\nTool Choice:\n", tool_calls) -``` \ No newline at end of file +``` + diff --git a/docs/my-website/docs/proxy/configs.md b/docs/my-website/docs/proxy/configs.md index f4e43aa80..af29f13ce 100644 --- a/docs/my-website/docs/proxy/configs.md +++ b/docs/my-website/docs/proxy/configs.md @@ -697,6 +697,8 @@ general_settings: | callbacks | array of strings | List of callbacks - runs on success and failure [Doc Proxy logging callbacks](logging), [Doc Metrics](prometheus) | | service_callbacks | array of strings | System health monitoring - Logs redis, postgres failures on specified services (e.g. datadog, prometheus) [Doc Metrics](prometheus) | | turn_off_message_logging | boolean | If true, prevents messages and responses from being logged to callbacks, but request metadata will still be logged [Proxy Logging](logging) | +| modify_params | boolean | If true, allows modifying the parameters of the request before it is sent to the LLM provider | +| enable_preview_features | boolean | If true, enables preview features - e.g. Azure O1 Models with streaming support.| | redact_user_api_key_info | boolean | If true, redacts information about the user api key from logs [Proxy Logging](logging#redacting-userapikeyinfo) | | langfuse_default_tags | array of strings | Default tags for Langfuse Logging. Use this if you want to control which LiteLLM-specific fields are logged as tags by the LiteLLM proxy. By default LiteLLM Proxy logs no LiteLLM-specific fields as tags. [Further docs](./logging#litellm-specific-tags-on-langfuse---cache_hit-cache_key) | | set_verbose | boolean | If true, sets litellm.set_verbose=True to view verbose debug logs. DO NOT LEAVE THIS ON IN PRODUCTION |