forked from phoenix/litellm-mirror
add nlp cloud details to docs
This commit is contained in:
parent
7d00c63cf6
commit
90b26a437c
2 changed files with 56 additions and 0 deletions
55
docs/my-website/docs/providers/nlp_cloud.md
Normal file
55
docs/my-website/docs/providers/nlp_cloud.md
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
# NLP Cloud
|
||||||
|
|
||||||
|
LiteLLM supports all LLMs on NLP Cloud.
|
||||||
|
|
||||||
|
## quick start
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from litellm import completion
|
||||||
|
|
||||||
|
# set env
|
||||||
|
os.environ["NLP_CLOUD_API_KEY"] = "your-key"
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
|
response = completion(model="dolphin", messages=messages)
|
||||||
|
print(response)
|
||||||
|
```
|
||||||
|
|
||||||
|
## streaming
|
||||||
|
Just set `stream=True` when calling completion.
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from litellm import completion
|
||||||
|
|
||||||
|
# set env
|
||||||
|
os.environ["NLP_CLOUD_API_KEY"] = "your-key"
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
|
response = completion(model="dolphin", messages=messages, stream=True)
|
||||||
|
for chunk in response:
|
||||||
|
print(chunk["choices"][0]["delta"]["content"]) # same as openai format
|
||||||
|
```
|
||||||
|
|
||||||
|
## non-dolphin models
|
||||||
|
|
||||||
|
By default, LiteLLM will map `dolphin` and `chatdolphin` to nlp cloud.
|
||||||
|
|
||||||
|
If you're trying to call any other model (e.g. GPT-J, Llama-2, etc.) with nlp cloud, just set it as your custom llm provider.
|
||||||
|
|
||||||
|
|
||||||
|
```python
|
||||||
|
import os
|
||||||
|
from litellm import completion
|
||||||
|
|
||||||
|
# set env
|
||||||
|
os.environ["NLP_CLOUD_API_KEY"] = "your-key"
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": "Hey! how's it going?"}]
|
||||||
|
|
||||||
|
# e.g. to call Llama2 on NLP Cloud
|
||||||
|
response = completion(model="nlp_cloud/finetuned-llama-2-70b", messages=messages, stream=True)
|
||||||
|
for chunk in response:
|
||||||
|
print(chunk["choices"][0]["delta"]["content"]) # same as openai format
|
||||||
|
```
|
|
@ -61,6 +61,7 @@ const sidebars = {
|
||||||
"providers/huggingface",
|
"providers/huggingface",
|
||||||
"providers/vllm",
|
"providers/vllm",
|
||||||
"providers/ai21",
|
"providers/ai21",
|
||||||
|
"providers/nlp_cloud",
|
||||||
"providers/replicate",
|
"providers/replicate",
|
||||||
"providers/cohere",
|
"providers/cohere",
|
||||||
"providers/togetherai",
|
"providers/togetherai",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue