mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 02:34:29 +00:00
(doc) Add nvidia as provider (#8023)
* add nvidia as provider in docs * fixes for closing tag * review changes
This commit is contained in:
parent
022917b7b5
commit
986c463983
3 changed files with 106 additions and 0 deletions
|
@ -323,6 +323,40 @@ response = embedding(
|
|||
| embed-english-light-v2.0 | `embedding(model="embed-english-light-v2.0", input=["good morning from litellm", "this is another item"])` |
|
||||
| embed-multilingual-v2.0 | `embedding(model="embed-multilingual-v2.0", input=["good morning from litellm", "this is another item"])` |
|
||||
|
||||
## NVIDIA NIM Embedding Models
|
||||
|
||||
### API keys
|
||||
This can be set as env variables or passed as **params to litellm.embedding()**
|
||||
```python
|
||||
import os
|
||||
os.environ["NVIDIA_NIM_API_KEY"] = "" # api key
|
||||
os.environ["NVIDIA_NIM_API_BASE"] = "" # nim endpoint url
|
||||
```
|
||||
|
||||
### Usage
|
||||
```python
|
||||
from litellm import embedding
|
||||
import os
|
||||
os.environ['NVIDIA_NIM_API_KEY'] = ""
|
||||
response = embedding(
|
||||
model='nvidia_nim/<model_name>',
|
||||
input=["good morning from litellm"]
|
||||
)
|
||||
```
|
||||
All models listed [here](https://build.nvidia.com/explore/retrieval) are supported:
|
||||
|
||||
| Model Name | Function Call |
|
||||
| :--- | :--- |
|
||||
| NV-Embed-QA | `embedding(model="nvidia_nim/NV-Embed-QA", input)` |
|
||||
| nvidia/nv-embed-v1 | `embedding(model="nvidia_nim/nvidia/nv-embed-v1", input)` |
|
||||
| nvidia/nv-embedqa-mistral-7b-v2 | `embedding(model="nvidia_nim/nvidia/nv-embedqa-mistral-7b-v2", input)` |
|
||||
| nvidia/nv-embedqa-e5-v5 | `embedding(model="nvidia_nim/nvidia/nv-embedqa-e5-v5", input)` |
|
||||
| nvidia/embed-qa-4 | `embedding(model="nvidia_nim/nvidia/embed-qa-4", input)` |
|
||||
| nvidia/llama-3.2-nv-embedqa-1b-v1 | `embedding(model="nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v1", input)` |
|
||||
| nvidia/llama-3.2-nv-embedqa-1b-v2 | `embedding(model="nvidia_nim/nvidia/llama-3.2-nv-embedqa-1b-v2", input)` |
|
||||
| snowflake/arctic-embed-l | `embedding(model="nvidia_nim/snowflake/arctic-embed-l", input)` |
|
||||
| baai/bge-m3 | `embedding(model="nvidia_nim/baai/bge-m3", input)` |
|
||||
|
||||
## HuggingFace Embedding Models
|
||||
LiteLLM supports all Feature-Extraction + Sentence Similarity Embedding models: https://huggingface.co/models?pipeline_tag=feature-extraction
|
||||
|
||||
|
|
|
@ -108,6 +108,24 @@ response = completion(
|
|||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="nvidia" label="NVIDIA">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
## set ENV variables
|
||||
os.environ["NVIDIA_NIM_API_KEY"] = "nvidia_api_key"
|
||||
os.environ["NVIDIA_NIM_API_BASE"] = "nvidia_nim_endpoint_url"
|
||||
|
||||
response = completion(
|
||||
model="nvidia_nim/<model_name>",
|
||||
messages=[{ "content": "Hello, how are you?","role": "user"}]
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="hugging" label="HuggingFace">
|
||||
|
||||
```python
|
||||
|
@ -274,6 +292,24 @@ response = completion(
|
|||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="nvidia" label="NVIDIA">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
## set ENV variables
|
||||
os.environ["NVIDIA_NIM_API_KEY"] = "nvidia_api_key"
|
||||
os.environ["NVIDIA_NIM_API_BASE"] = "nvidia_nim_endpoint_url"
|
||||
|
||||
response = completion(
|
||||
model="nvidia_nim/<model_name>",
|
||||
messages=[{ "content": "Hello, how are you?","role": "user"}]
|
||||
stream=True,
|
||||
)
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="hugging" label="HuggingFace">
|
||||
|
||||
```python
|
||||
|
|
|
@ -108,6 +108,24 @@ response = completion(
|
|||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="nvidia" label="NVIDIA">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
## set ENV variables
|
||||
os.environ["NVIDIA_NIM_API_KEY"] = "nvidia_api_key"
|
||||
os.environ["NVIDIA_NIM_API_BASE"] = "nvidia_nim_endpoint_url"
|
||||
|
||||
response = completion(
|
||||
model="nvidia_nim/<model_name>",
|
||||
messages=[{ "content": "Hello, how are you?","role": "user"}]
|
||||
)
|
||||
```
|
||||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="hugging" label="HuggingFace">
|
||||
|
||||
```python
|
||||
|
@ -238,6 +256,24 @@ response = completion(
|
|||
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="nvidia" label="NVIDIA">
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
## set ENV variables
|
||||
os.environ["NVIDIA_NIM_API_KEY"] = "nvidia_api_key"
|
||||
os.environ["NVIDIA_NIM_API_BASE"] = "nvidia_nim_endpoint_url"
|
||||
|
||||
response = completion(
|
||||
model="nvidia_nim/<model_name>",
|
||||
messages=[{ "content": "Hello, how are you?","role": "user"}]
|
||||
stream=True,
|
||||
)
|
||||
```
|
||||
</TabItem>
|
||||
|
||||
<TabItem value="hugging" label="HuggingFace">
|
||||
|
||||
```python
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue