Merge branch 'main' into litellm_team_id_support

This commit is contained in:
Krish Dholakia 2024-02-01 21:40:22 -08:00 committed by GitHub
commit f01dce02d4
5 changed files with 37 additions and 6 deletions

View file

@ -174,6 +174,31 @@ response = completion(
messages=[{ "content": "Hello, how are you?","role": "user"}] messages=[{ "content": "Hello, how are you?","role": "user"}]
) )
``` ```
### Set `ssl_verify=False`
This is done by setting your own `httpx.Client`
- For `litellm.completion` set `litellm.client_session=httpx.Client(verify=False)`
- For `litellm.acompletion` set `litellm.aclient_session=AsyncClient.Client(verify=False)`
```python
import litellm, httpx
# for completion
litellm.client_session = httpx.Client(verify=False)
response = litellm.completion(
model="gpt-3.5-turbo",
messages=messages,
)
# for acompletion
litellm.aclient_session = httpx.AsyncClient(verify=False)
response = litellm.acompletion(
model="gpt-3.5-turbo",
messages=messages,
)
```
### Using Helicone Proxy with LiteLLM ### Using Helicone Proxy with LiteLLM
```python ```python
import os import os

View file

@ -2,7 +2,7 @@
import threading, requests, os import threading, requests, os
from typing import Callable, List, Optional, Dict, Union, Any from typing import Callable, List, Optional, Dict, Union, Any
from litellm.caching import Cache from litellm.caching import Cache
from litellm._logging import set_verbose, _turn_on_debug from litellm._logging import set_verbose, _turn_on_debug, verbose_logger
from litellm.proxy._types import KeyManagementSystem from litellm.proxy._types import KeyManagementSystem
import httpx import httpx
import dotenv import dotenv

View file

@ -225,9 +225,6 @@ def test_health(client_no_auth):
try: try:
response = client_no_auth.get("/health") response = client_no_auth.get("/health")
assert response.status_code == 200 assert response.status_code == 200
result = response.json()
print("\n response from health:", result)
assert result["unhealthy_count"] == 0
except Exception as e: except Exception as e:
pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}")

View file

@ -125,6 +125,15 @@
"litellm_provider": "openai", "litellm_provider": "openai",
"mode": "chat" "mode": "chat"
}, },
"gpt-3.5-turbo-0125": {
"max_tokens": 16385,
"max_input_tokens": 16385,
"max_output_tokens": 4096,
"input_cost_per_token": 0.0000005,
"output_cost_per_token": 0.0000015,
"litellm_provider": "openai",
"mode": "chat"
},
"gpt-3.5-turbo-16k": { "gpt-3.5-turbo-16k": {
"max_tokens": 16385, "max_tokens": 16385,
"max_input_tokens": 16385, "max_input_tokens": 16385,

View file

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "litellm" name = "litellm"
version = "1.20.10" version = "1.20.11"
description = "Library to easily interface with LLM API providers" description = "Library to easily interface with LLM API providers"
authors = ["BerriAI"] authors = ["BerriAI"]
license = "MIT" license = "MIT"
@ -63,7 +63,7 @@ requires = ["poetry-core", "wheel"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.commitizen] [tool.commitizen]
version = "1.20.10" version = "1.20.11"
version_files = [ version_files = [
"pyproject.toml:^version" "pyproject.toml:^version"
] ]