mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
Merge dd79517f2d
into b82af5b826
This commit is contained in:
commit
44288d5bae
3 changed files with 46 additions and 2 deletions
20
README.md
20
README.md
|
@ -111,6 +111,26 @@ print(response)
|
|||
|
||||
Call any model supported by a provider, with `model=<provider_name>/<model_name>`. There might be provider-specific details here, so refer to [provider docs for more information](https://docs.litellm.ai/docs/providers)
|
||||
|
||||
## Environment Variable Configuration
|
||||
|
||||
You can now configure LiteLLM using environment variables:
|
||||
|
||||
| Variable | Description |
|
||||
| ------------------ | ------------------------------------------------ |
|
||||
| `LITELLM_API_KEY` | Used as the default value for `litellm.api_key` |
|
||||
| `LITELLM_API_BASE` | Used as the default value for `litellm.api_base` |
|
||||
|
||||
If not explicitly set in code, these values will be automatically picked up:
|
||||
|
||||
```python
|
||||
import litellm
|
||||
|
||||
print(litellm.api_key) # Will output the value from LITELLM_API_KEY if not manually set
|
||||
print(litellm.api_base) # Will output the value from LITELLM_API_BASE if not manually set
|
||||
```
|
||||
|
||||
This is helpful when running LiteLLM in CI/CD pipelines, containerized environments, or local `.env` setups.
|
||||
|
||||
## Async ([Docs](https://docs.litellm.ai/docs/completion/stream#async-completion))
|
||||
|
||||
```python
|
||||
|
|
|
@ -167,7 +167,7 @@ drop_params = bool(os.getenv("LITELLM_DROP_PARAMS", False))
|
|||
modify_params = bool(os.getenv("LITELLM_MODIFY_PARAMS", False))
|
||||
retry = True
|
||||
### AUTH ###
|
||||
api_key: Optional[str] = None
|
||||
api_key: Optional[str] = os.getenv("LITELLM_API_KEY", None)
|
||||
openai_key: Optional[str] = None
|
||||
groq_key: Optional[str] = None
|
||||
databricks_key: Optional[str] = None
|
||||
|
@ -345,7 +345,7 @@ def identify(event_details):
|
|||
|
||||
|
||||
####### ADDITIONAL PARAMS ################### configurable params if you use proxy models like Helicone, map spend to org id, etc.
|
||||
api_base: Optional[str] = None
|
||||
api_base: Optional[str] = os.getenv("LITELLM_API_BASE", None)
|
||||
headers = None
|
||||
api_version = None
|
||||
organization = None
|
||||
|
|
24
tests/litellm/test_init.py
Normal file
24
tests/litellm/test_init.py
Normal file
|
@ -0,0 +1,24 @@
|
|||
# tests/litellm/test_init.py
|
||||
|
||||
import importlib
|
||||
import sys
|
||||
|
||||
def test_api_key_from_env(monkeypatch):
|
||||
monkeypatch.setenv("LITELLM_API_KEY", "mocked_key")
|
||||
|
||||
if "litellm" in sys.modules:
|
||||
del sys.modules["litellm"]
|
||||
import litellm
|
||||
importlib.reload(litellm)
|
||||
|
||||
assert litellm.api_key == "mocked_key"
|
||||
|
||||
def test_api_base_from_env(monkeypatch):
|
||||
monkeypatch.setenv("LITELLM_API_BASE", "https://mocked-base.com")
|
||||
|
||||
if "litellm" in sys.modules:
|
||||
del sys.modules["litellm"]
|
||||
import litellm
|
||||
importlib.reload(litellm)
|
||||
|
||||
assert litellm.api_base == "https://mocked-base.com"
|
Loading…
Add table
Add a link
Reference in a new issue