diff --git a/docs/my-website/docs/proxy/user_keys.md b/docs/my-website/docs/proxy/user_keys.md index ef718f3ad..9136452f5 100644 --- a/docs/my-website/docs/proxy/user_keys.md +++ b/docs/my-website/docs/proxy/user_keys.md @@ -1,11 +1,147 @@ import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -# Pass User LLM API Keys +# Use with Langchain, OpenAI SDK, Curl +How to send requests to the proxy, pass metadata, allow users to pass in their OpenAI API key + +## `/chat/completions` + +### Request Format + + + + + + +Set `extra_body={"metadata": { }}` to `metadata` you want to pass + +```python +import openai +client = openai.OpenAI( + api_key="anything", + base_url="http://0.0.0.0:8000" +) + +# request sent to model set on litellm proxy, `litellm --model` +response = client.chat.completions.create( + model="gpt-3.5-turbo", + messages = [ + { + "role": "user", + "content": "this is a test request, write a short poem" + } + ], + extra_body={ + "metadata": { + "generation_name": "ishaan-generation-openai-client", + "generation_id": "openai-client-gen-id22", + "trace_id": "openai-client-trace-id22", + "trace_user_id": "openai-client-user-id2" + } + } +) + +print(response) +``` + + + +Pass `metadata` as part of the request body + +```shell +curl --location 'http://0.0.0.0:8000/chat/completions' \ + --header 'Content-Type: application/json' \ + --data '{ + "model": "gpt-3.5-turbo", + "messages": [ + { + "role": "user", + "content": "what llm are you" + } + ], + "metadata": { + "generation_name": "ishaan-test-generation", + "generation_id": "gen-id22", + "trace_id": "trace-id22", + "trace_user_id": "user-id2" + } +}' +``` + + + +```python +from langchain.chat_models import ChatOpenAI +from langchain.prompts.chat import ( + ChatPromptTemplate, + HumanMessagePromptTemplate, + SystemMessagePromptTemplate, +) +from langchain.schema import HumanMessage, SystemMessage + +chat = ChatOpenAI( + openai_api_base="http://0.0.0.0:8000", + model = "gpt-3.5-turbo", + temperature=0.1, + extra_body={ + "metadata": { + "generation_name": "ishaan-generation-langchain-client", + "generation_id": "langchain-client-gen-id22", + "trace_id": "langchain-client-trace-id22", + "trace_user_id": "langchain-client-user-id2" + } + } +) + +messages = [ + SystemMessage( + content="You are a helpful assistant that im using to make a test request to." + ), + HumanMessage( + content="test from litellm. tell me why it's amazing in 1 sentence" + ), +] +response = chat(messages) + +print(response) +``` + + + + +### Response Format + +```json +{ + "id": "chatcmpl-8c5qbGTILZa1S4CK3b31yj5N40hFN", + "choices": [ + { + "finish_reason": "stop", + "index": 0, + "message": { + "content": "As an AI language model, I do not have a physical form or personal preferences. However, I am programmed to assist with various topics and provide information on a wide range of subjects. Is there something specific you would like assistance with?", + "role": "assistant" + } + } + ], + "created": 1704089632, + "model": "gpt-35-turbo", + "object": "chat.completion", + "system_fingerprint": null, + "usage": { + "completion_tokens": 47, + "prompt_tokens": 12, + "total_tokens": 59 + }, + "_response_ms": 1753.426 +} + +``` + +## Pass User LLM API Keys Allows your users to pass in their OpenAI API key (any LiteLLM supported provider) to make requests - Here's how to do it: ```python