forked from phoenix/litellm-mirror
docs - show how to use with azure openai
This commit is contained in:
parent
3a06e2e425
commit
de13d06ce6
2 changed files with 34 additions and 1 deletions
|
@ -48,6 +48,39 @@ response = client.chat.completions.create(
|
|||
}
|
||||
)
|
||||
|
||||
print(response)
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="azureopenai" label="AzureOpenAI Python">
|
||||
|
||||
Set `extra_body={"metadata": { }}` to `metadata` you want to pass
|
||||
|
||||
```python
|
||||
import openai
|
||||
client = openai.AzureOpenAI(
|
||||
api_key="anything",
|
||||
base_url="http://0.0.0.0:4000"
|
||||
)
|
||||
|
||||
# request sent to model set on litellm proxy, `litellm --model`
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-3.5-turbo",
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test request, write a short poem"
|
||||
}
|
||||
],
|
||||
extra_body={ # pass in any provider-specific param, if not supported by openai, https://docs.litellm.ai/docs/completion/input#provider-specific-params
|
||||
"metadata": { # 👈 use for logging additional params (e.g. to langfuse)
|
||||
"generation_name": "ishaan-generation-openai-client",
|
||||
"generation_id": "openai-client-gen-id22",
|
||||
"trace_id": "openai-client-trace-id22",
|
||||
"trace_user_id": "openai-client-user-id2"
|
||||
}
|
||||
}
|
||||
)
|
||||
|
||||
print(response)
|
||||
```
|
||||
</TabItem>
|
||||
|
|
|
@ -37,6 +37,7 @@ const sidebars = {
|
|||
href: "https://litellm-api.up.railway.app/",
|
||||
},
|
||||
"proxy/enterprise",
|
||||
"proxy/user_keys",
|
||||
"proxy/demo",
|
||||
"proxy/configs",
|
||||
"proxy/reliability",
|
||||
|
@ -46,7 +47,6 @@ const sidebars = {
|
|||
"proxy/team_budgets",
|
||||
"proxy/customers",
|
||||
"proxy/billing",
|
||||
"proxy/user_keys",
|
||||
"proxy/virtual_keys",
|
||||
"proxy/guardrails",
|
||||
"proxy/token_auth",
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue