diff --git a/docs/my-website/docs/providers/gemini.md b/docs/my-website/docs/providers/gemini.md index b50a85087..9d5eb298b 100644 --- a/docs/my-website/docs/providers/gemini.md +++ b/docs/my-website/docs/providers/gemini.md @@ -6,7 +6,7 @@ # Gemini-Pro ## Sample Usage ```python -import litellm +from litellm import completion import os os.environ['GEMINI_API_KEY'] = "" @@ -24,7 +24,7 @@ LiteLLM Supports the following image types passed in `url` ## Sample Usage ```python import os -import litellm +import litellm from dotenv import load_dotenv # Load the environment variables from .env file diff --git a/docs/my-website/docs/providers/palm.md b/docs/my-website/docs/providers/palm.md index 39fcc207c..b41465b5f 100644 --- a/docs/my-website/docs/providers/palm.md +++ b/docs/my-website/docs/providers/palm.md @@ -5,7 +5,7 @@ ## Sample Usage ```python -import litellm +from litellm import completion import os os.environ['PALM_API_KEY'] = "" @@ -17,7 +17,7 @@ response = completion( ## Sample Usage - Streaming ```python -import litellm +from litellm import completion import os os.environ['PALM_API_KEY'] = "" diff --git a/docs/my-website/docs/providers/vertex.md b/docs/my-website/docs/providers/vertex.md index f71aa0ada..3349faa52 100644 --- a/docs/my-website/docs/providers/vertex.md +++ b/docs/my-website/docs/providers/vertex.md @@ -17,7 +17,7 @@ import litellm litellm.vertex_project = "hardy-device-38811" # Your Project ID litellm.vertex_location = "us-central1" # proj location -response = completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]) +response = litellm.completion(model="gemini-pro", messages=[{"role": "user", "content": "write code for saying hi from LiteLLM"}]) ``` ## Set Vertex Project & Vertex Location diff --git a/docs/my-website/docs/providers/vllm.md b/docs/my-website/docs/providers/vllm.md index df9e07ef7..b8285da71 100644 --- a/docs/my-website/docs/providers/vllm.md +++ b/docs/my-website/docs/providers/vllm.md @@ -11,7 +11,7 @@ pip install litellm vllm ```python import litellm -response = completion( +response = litellm.completion( model="vllm/facebook/opt-125m", # add a vllm prefix so litellm knows the custom_llm_provider==vllm messages=messages, temperature=0.2, @@ -29,7 +29,7 @@ In order to use litellm to call a hosted vllm server add the following to your c ```python import litellm -response = completion( +response = litellm.completion( model="openai/facebook/opt-125m", # pass the vllm model name messages=messages, api_base="https://hosted-vllm-api.co",