Merge pull request #1535 from kihaya/docs/fix_sample_code_import_for_providers

docs: Fix import statement for provider's sample code
This commit is contained in:
Ishaan Jaff 2024-01-20 08:42:42 -08:00 committed by GitHub
commit d60af6fff5
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 7 additions and 7 deletions

View file

@ -6,7 +6,7 @@
# Gemini-Pro # Gemini-Pro
## Sample Usage ## Sample Usage
```python ```python
import litellm from litellm import completion
import os import os
os.environ['GEMINI_API_KEY'] = "" os.environ['GEMINI_API_KEY'] = ""

View file

@ -5,7 +5,7 @@
## Sample Usage ## Sample Usage
```python ```python
import litellm from litellm import completion
import os import os
os.environ['PALM_API_KEY'] = "" os.environ['PALM_API_KEY'] = ""
@ -17,7 +17,7 @@ response = completion(
## Sample Usage - Streaming ## Sample Usage - Streaming
```python ```python
import litellm from litellm import completion
import os import os
os.environ['PALM_API_KEY'] = "" os.environ['PALM_API_KEY'] = ""

View file

@ -17,7 +17,7 @@ import litellm
litellm.vertex_project = "hardy-device-38811" # Your Project ID litellm.vertex_project = "hardy-device-38811" # Your Project ID
litellm.vertex_location = "us-central1" # proj location litellm.vertex_location = "us-central1" # proj location
response = completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]) response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}])
``` ```
## Set Vertex Project & Vertex Location ## Set Vertex Project & Vertex Location

View file

@ -11,7 +11,7 @@ pip install litellm vllm
```python ```python
import litellm import litellm
response = completion( response = litellm.completion(
model="vllm/facebook/opt-125m", # add a vllm prefix so litellm knows the custom_llm_provider==vllm model="vllm/facebook/opt-125m", # add a vllm prefix so litellm knows the custom_llm_provider==vllm
messages=messages, messages=messages,
temperature=0.2, temperature=0.2,
@ -29,7 +29,7 @@ In order to use litellm to call a hosted vllm server add the following to your c
```python ```python
import litellm import litellm
response = completion( response = litellm.completion(
model="openai/facebook/opt-125m", # pass the vllm model name model="openai/facebook/opt-125m", # pass the vllm model name
messages=messages, messages=messages,
api_base="https://hosted-vllm-api.co", api_base="https://hosted-vllm-api.co",