forked from phoenix/litellm-mirror
docs(quick_start.md): add example of using proxy with litellm sdk
This commit is contained in:
parent
200f58c1e7
commit
2afdf34a9f
1 changed files with 21 additions and 0 deletions
|
@ -283,6 +283,27 @@ response = client.chat.completions.create(model="gpt-3.5-turbo", messages = [
|
|||
|
||||
print(response)
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="litellm" label="LiteLLM SDK">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
|
||||
response = completion(
|
||||
model="openai/gpt-3.5-turbo",
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": "this is a test request, write a short poem"
|
||||
}
|
||||
],
|
||||
api_key="anything",
|
||||
base_url="http://0.0.0.0:4000"
|
||||
)
|
||||
|
||||
print(response)
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
<TabItem value="langchain" label="Langchain">
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue