mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-27 03:34:10 +00:00
expose vertex ai and hf api base as env var
This commit is contained in:
parent
dbbe519e19
commit
1d86448b6a
5 changed files with 31 additions and 6 deletions
|
@ -72,12 +72,13 @@ response = completion(
|
|||
print(response)
|
||||
```
|
||||
|
||||
### [OPTIONAL] API KEYS
|
||||
If the endpoint you're calling requires an api key to be passed, set it in your os environment. [Code for how it's sent](https://github.com/BerriAI/litellm/blob/0100ab2382a0e720c7978fbf662cc6e6920e7e03/litellm/llms/huggingface_restapi.py#L25)
|
||||
### [OPTIONAL] API KEYS + API BASE
|
||||
If required, you can set the api key + api base, set it in your os environment. [Code for how it's sent](https://github.com/BerriAI/litellm/blob/0100ab2382a0e720c7978fbf662cc6e6920e7e03/litellm/llms/huggingface_restapi.py#L25)
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["HUGGINGFACE_API_KEY"] = ""
|
||||
os.environ["HUGGINGFACE_API_BASE"] = ""
|
||||
```
|
||||
|
||||
### Models with Prompt Formatting
|
||||
|
|
|
@ -13,9 +13,29 @@
|
|||
## Set Vertex Project & Vertex Location
|
||||
All calls using Vertex AI require the following parameters:
|
||||
* Your Project ID
|
||||
`litellm.vertex_project = "hardy-device-38811" Your Project ID`
|
||||
```python
|
||||
import os, litellm
|
||||
|
||||
# set via env var
|
||||
os.environ["VERTEXAI_PROJECT"] = "hardy-device-38811" # Your Project ID`
|
||||
|
||||
### OR ###
|
||||
|
||||
# set directly on module
|
||||
litellm.vertex_project = "hardy-device-38811" # Your Project ID`
|
||||
```
|
||||
* Your Project Location
|
||||
`litellm.vertex_location = "us-central1" `
|
||||
```python
|
||||
import os, litellm
|
||||
|
||||
# set via env var
|
||||
os.environ["VERTEXAI_LOCATION"] = "us-central1 # Your Location
|
||||
|
||||
### OR ###
|
||||
|
||||
# set directly on module
|
||||
litellm.vertex_location = "us-central1 # Your Location
|
||||
```
|
||||
|
||||
## Sample Usage
|
||||
```python
|
||||
|
|
|
@ -49,6 +49,8 @@ def completion(
|
|||
completion_url = api_base
|
||||
elif "HF_API_BASE" in os.environ:
|
||||
completion_url = os.getenv("HF_API_BASE", "")
|
||||
elif "HUGGINGFACE_API_BASE" in os.environ:
|
||||
completion_url = os.getenv("HUGGINGFACE_API_BASE", "")
|
||||
else:
|
||||
completion_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
|
||||
|
|
|
@ -749,8 +749,10 @@ def completion(
|
|||
raise Exception("vertexai import failed please run `pip install google-cloud-aiplatform`")
|
||||
from vertexai.preview.language_models import ChatModel, CodeChatModel, InputOutputTextPair
|
||||
|
||||
vertex_project = (litellm.vertex_project or get_secret("VERTEXAI_PROJECT"))
|
||||
vertex_location = (litellm.vertex_location or get_secret("VERTEXAI_LOCATION"))
|
||||
vertexai.init(
|
||||
project=litellm.vertex_project, location=litellm.vertex_location
|
||||
project=vertex_project, location=vertex_location
|
||||
)
|
||||
# vertexai does not use an API key, it looks for credentials.json in the environment
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.734"
|
||||
version = "0.1.735"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue