forked from phoenix/litellm-mirror
Merge pull request #1535 from kihaya/docs/fix_sample_code_import_for_providers
docs: Fix import statement for provider's sample code
This commit is contained in:
commit
d60af6fff5
4 changed files with 7 additions and 7 deletions
|
@ -6,7 +6,7 @@
|
||||||
# Gemini-Pro
|
# Gemini-Pro
|
||||||
## Sample Usage
|
## Sample Usage
|
||||||
```python
|
```python
|
||||||
import litellm
|
from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ['GEMINI_API_KEY'] = ""
|
os.environ['GEMINI_API_KEY'] = ""
|
||||||
|
|
|
@ -5,7 +5,7 @@
|
||||||
|
|
||||||
## Sample Usage
|
## Sample Usage
|
||||||
```python
|
```python
|
||||||
import litellm
|
from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ['PALM_API_KEY'] = ""
|
os.environ['PALM_API_KEY'] = ""
|
||||||
|
@ -17,7 +17,7 @@ response = completion(
|
||||||
|
|
||||||
## Sample Usage - Streaming
|
## Sample Usage - Streaming
|
||||||
```python
|
```python
|
||||||
import litellm
|
from litellm import completion
|
||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ['PALM_API_KEY'] = ""
|
os.environ['PALM_API_KEY'] = ""
|
||||||
|
|
|
@ -17,7 +17,7 @@ import litellm
|
||||||
litellm.vertex_project = "hardy-device-38811" # Your Project ID
|
litellm.vertex_project = "hardy-device-38811" # Your Project ID
|
||||||
litellm.vertex_location = "us-central1" # proj location
|
litellm.vertex_location = "us-central1" # proj location
|
||||||
|
|
||||||
response = completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}])
|
response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}])
|
||||||
```
|
```
|
||||||
|
|
||||||
## Set Vertex Project & Vertex Location
|
## Set Vertex Project & Vertex Location
|
||||||
|
|
|
@ -11,7 +11,7 @@ pip install litellm vllm
|
||||||
```python
|
```python
|
||||||
import litellm
|
import litellm
|
||||||
|
|
||||||
response = completion(
|
response = litellm.completion(
|
||||||
model="vllm/facebook/opt-125m", # add a vllm prefix so litellm knows the custom_llm_provider==vllm
|
model="vllm/facebook/opt-125m", # add a vllm prefix so litellm knows the custom_llm_provider==vllm
|
||||||
messages=messages,
|
messages=messages,
|
||||||
temperature=0.2,
|
temperature=0.2,
|
||||||
|
@ -29,7 +29,7 @@ In order to use litellm to call a hosted vllm server add the following to your c
|
||||||
```python
|
```python
|
||||||
import litellm
|
import litellm
|
||||||
|
|
||||||
response = completion(
|
response = litellm.completion(
|
||||||
model="openai/facebook/opt-125m", # pass the vllm model name
|
model="openai/facebook/opt-125m", # pass the vllm model name
|
||||||
messages=messages,
|
messages=messages,
|
||||||
api_base="https://hosted-vllm-api.co",
|
api_base="https://hosted-vllm-api.co",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue