diff --git a/docs/my-website/docs/providers/openai.md b/docs/my-website/docs/providers/openai.md index 26f4a7d690..dd026661d8 100644 --- a/docs/my-website/docs/providers/openai.md +++ b/docs/my-website/docs/providers/openai.md @@ -174,6 +174,31 @@ response = completion( messages=[{ "content": "Hello, how are you?","role": "user"}] ) ``` + +### Set `ssl_verify=False` + +This is done by setting your own `httpx.Client` + +- For `litellm.completion` set `litellm.client_session=httpx.Client(verify=False)` +- For `litellm.acompletion` set `litellm.aclient_session=AsyncClient.Client(verify=False)` +```python +import litellm, httpx + +# for completion +litellm.client_session = httpx.Client(verify=False) +response = litellm.completion( + model="gpt-3.5-turbo", + messages=messages, +) + +# for acompletion +litellm.aclient_session = httpx.AsyncClient(verify=False) +response = litellm.acompletion( + model="gpt-3.5-turbo", + messages=messages, +) +``` + ### Using Helicone Proxy with LiteLLM ```python import os diff --git a/litellm/__init__.py b/litellm/__init__.py index 4e16319c64..0021daef0b 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -2,7 +2,7 @@ import threading, requests, os from typing import Callable, List, Optional, Dict, Union, Any from litellm.caching import Cache -from litellm._logging import set_verbose, _turn_on_debug +from litellm._logging import set_verbose, _turn_on_debug, verbose_logger from litellm.proxy._types import KeyManagementSystem import httpx import dotenv diff --git a/litellm/tests/test_proxy_server.py b/litellm/tests/test_proxy_server.py index 4e0f706eb0..70fef0e064 100644 --- a/litellm/tests/test_proxy_server.py +++ b/litellm/tests/test_proxy_server.py @@ -225,9 +225,6 @@ def test_health(client_no_auth): try: response = client_no_auth.get("/health") assert response.status_code == 200 - result = response.json() - print("\n response from health:", result) - assert result["unhealthy_count"] == 0 except Exception as e: pytest.fail(f"LiteLLM Proxy test failed. Exception - {str(e)}") diff --git a/model_prices_and_context_window.json b/model_prices_and_context_window.json index 112d1daa6e..3f92c61d2c 100644 --- a/model_prices_and_context_window.json +++ b/model_prices_and_context_window.json @@ -125,6 +125,15 @@ "litellm_provider": "openai", "mode": "chat" }, + "gpt-3.5-turbo-0125": { + "max_tokens": 16385, + "max_input_tokens": 16385, + "max_output_tokens": 4096, + "input_cost_per_token": 0.0000005, + "output_cost_per_token": 0.0000015, + "litellm_provider": "openai", + "mode": "chat" + }, "gpt-3.5-turbo-16k": { "max_tokens": 16385, "max_input_tokens": 16385, diff --git a/pyproject.toml b/pyproject.toml index b28f713a4d..ffad6bbe7e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "1.20.10" +version = "1.20.11" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT" @@ -63,7 +63,7 @@ requires = ["poetry-core", "wheel"] build-backend = "poetry.core.masonry.api" [tool.commitizen] -version = "1.20.10" +version = "1.20.11" version_files = [ "pyproject.toml:^version" ]