diff --git a/docs/my-website/docs/proxy_server.md b/docs/my-website/docs/proxy_server.md new file mode 100644 index 000000000..2d5c801b4 --- /dev/null +++ b/docs/my-website/docs/proxy_server.md @@ -0,0 +1,34 @@ +# OpenAI Proxy Server + +Use this to spin up a proxy api to translate openai api calls to any non-openai model (e.g. Huggingface, TogetherAI, Ollama, etc.) + +This works for async + streaming as well. + +## usage +```python +pip install litellm +``` + +```python +litellm --model +``` + +This will host a local proxy api at : **http://localhost:8000** + +[**Jump to Code**](https://github.com/BerriAI/litellm/blob/fef4146396d5d87006259e00095a62e3900d6bb4/litellm/proxy.py#L36) +## test it + +```curl +curl --location 'http://0.0.0.0:8000/chat/completions' \ +--header 'Content-Type: application/json' \ +--data '{ + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "what do you know?" + } + ], +}' +``` + diff --git a/docs/my-website/sidebars.js b/docs/my-website/sidebars.js index 0c8750a47..15a6918bf 100644 --- a/docs/my-website/sidebars.js +++ b/docs/my-website/sidebars.js @@ -83,7 +83,7 @@ const sidebars = { "exception_mapping", 'debugging/local_debugging', "budget_manager", - "proxy_api", + "proxy_server", { type: 'category', label: 'Tutorials', diff --git a/litellm/__pycache__/__init__.cpython-311.pyc b/litellm/__pycache__/__init__.cpython-311.pyc index 240bff6fb..5a6b2ad79 100644 Binary files a/litellm/__pycache__/__init__.cpython-311.pyc and b/litellm/__pycache__/__init__.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index e5918250d..2b97701e7 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ