forked from phoenix/litellm-mirror
docs update
This commit is contained in:
parent
c45e2ed48c
commit
68d994f980
2 changed files with 21 additions and 2 deletions
|
@ -8,7 +8,7 @@ LiteLLM supports all models on VLLM.
|
||||||
```
|
```
|
||||||
pip install litellm vllm
|
pip install litellm vllm
|
||||||
```
|
```
|
||||||
```
|
```python
|
||||||
import litellm
|
import litellm
|
||||||
|
|
||||||
response = completion(
|
response = completion(
|
||||||
|
@ -20,6 +20,25 @@ response = completion(
|
||||||
print(response)
|
print(response)
|
||||||
```
|
```
|
||||||
|
|
||||||
|
#### Calling hosted VLLM Server
|
||||||
|
In order to use litellm to call a hosted vllm server add the following to your completion call
|
||||||
|
|
||||||
|
* `custom_llm_provider == "openai"`
|
||||||
|
* `api_base = "your-hosted-vllm-server/v1"`
|
||||||
|
|
||||||
|
```python
|
||||||
|
import litellm
|
||||||
|
|
||||||
|
response = completion(
|
||||||
|
model="vllm/facebook/opt-125m",
|
||||||
|
messages=messages,
|
||||||
|
temperature=0.2,
|
||||||
|
max_tokens=80)
|
||||||
|
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
|
||||||
### Batch Completion
|
### Batch Completion
|
||||||
|
|
||||||
```
|
```
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "litellm"
|
name = "litellm"
|
||||||
version = "0.1.562"
|
version = "0.1.563"
|
||||||
description = "Library to easily interface with LLM API providers"
|
description = "Library to easily interface with LLM API providers"
|
||||||
authors = ["BerriAI"]
|
authors = ["BerriAI"]
|
||||||
license = "MIT License"
|
license = "MIT License"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue