forked from phoenix/litellm-mirror
(docs) replicate input params
This commit is contained in:
parent
fe82e172b9
commit
b102489f49
1 changed files with 46 additions and 6 deletions
|
@ -18,13 +18,11 @@ import os
|
|||
## set ENV variables
|
||||
os.environ["REPLICATE_API_KEY"] = "replicate key"
|
||||
|
||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||
|
||||
# replicate llama-2 call
|
||||
response = completion(
|
||||
model="replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
|
||||
messages=messages
|
||||
)
|
||||
messages = [{ "content": "Hello, how are you?","role": "user"}]
|
||||
)
|
||||
```
|
||||
|
||||
### Replicate Models
|
||||
|
@ -40,5 +38,47 @@ Model Name | Function Call
|
|||
a16z-infra/llama-2-13b-chat| `completion('replicate/a16z-infra/llama-2-13b-chat:2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52', messages)`| `os.environ['REPLICATE_API_KEY']` |
|
||||
replicate/vicuna-13b | `completion('replicate/vicuna-13b:6282abe6a492de4145d7bb601023762212f9ddbbe78278bd6771c8b3b2f2a13b', messages)` | `os.environ['REPLICATE_API_KEY']` |
|
||||
daanelson/flan-t5-large | `completion('replicate/daanelson/flan-t5-large:ce962b3f6792a57074a601d3979db5839697add2e4e02696b3ced4c022d4767f', messages)` | `os.environ['REPLICATE_API_KEY']` |
|
||||
custom-llm | Ensure the `model` param has `replicate/` as a prefix <`completion('replicate/custom-llm-version-id', messages)` | `os.environ['REPLICATE_API_KEY']` |
|
||||
custom-llm | Ensure the `model` param has `replicate/` as a prefix `completion('replicate/custom-llm-version-id', messages)` | `os.environ['REPLICATE_API_KEY']` |
|
||||
|
||||
|
||||
### Passing additional params - max_tokens, temperature
|
||||
See all litellm.completion supported params [here](https://docs.litellm.ai/docs/completion/input)
|
||||
|
||||
```python
|
||||
# !pip install litellm
|
||||
from litellm import completion
|
||||
import os
|
||||
## set ENV variables
|
||||
os.environ["REPLICATE_API_KEY"] = "replicate key"
|
||||
|
||||
# replicate llama-2 call
|
||||
response = completion(
|
||||
model="replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
|
||||
messages = [{ "content": "Hello, how are you?","role": "user"}],
|
||||
max_tokens=20,
|
||||
temperature=0.5
|
||||
|
||||
)
|
||||
```
|
||||
|
||||
### Passings Replicate specific params
|
||||
Send params [not supported by `litellm.completion()`](https://docs.litellm.ai/docs/completion/input) but supported by Replicate by passing them to `litellm.completion`
|
||||
|
||||
Example `seed`, `min_tokens` are Replicate specific param
|
||||
|
||||
```python
|
||||
# !pip install litellm
|
||||
from litellm import completion
|
||||
import os
|
||||
## set ENV variables
|
||||
os.environ["REPLICATE_API_KEY"] = "replicate key"
|
||||
|
||||
# replicate llama-2 call
|
||||
response = completion(
|
||||
model="replicate/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf",
|
||||
messages = [{ "content": "Hello, how are you?","role": "user"}],
|
||||
seed=-1,
|
||||
min_tokens=2,
|
||||
top_k=20,
|
||||
)
|
||||
```
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue