diff --git a/docs/my-website/docs/providers/ai21.md b/docs/my-website/docs/providers/ai21.md index 294db3fcbc..f331fcff9a 100644 --- a/docs/my-website/docs/providers/ai21.md +++ b/docs/my-website/docs/providers/ai21.md @@ -103,6 +103,93 @@ Here's how to call a ai21 model with the LiteLLM Proxy Server +## Supported OpenAI Parameters + + +| [param](../completion/input) | type | AI21 equivalent | +|-------|-------------|------------------| +| `tools` | **Optional[list]** | `tools` | +| `response_format` | **Optional[dict]** | `response_format` | +| `max_tokens` | **Optional[int]** | `max_tokens` | +| `temperature` | **Optional[float]** | `temperature` | +| `top_p` | **Optional[float]** | `top_p` | +| `stop` | **Optional[Union[str, list]]** | `stop` | +| `n` | **Optional[int]** | `n` | +| `stream` | **Optional[bool]** | `stream` | +| `seed` | **Optional[int]** | `seed` | +| `tool_choice` | **Optional[str]** | `tool_choice` | +| `user` | **Optional[str]** | `user` | + +## Supported AI21 Parameters + + +| param | type | [AI21 equivalent](https://docs.ai21.com/reference/jamba-15-api-ref#request-parameters) | +|-----------|------|-------------| +| `documents` | **Optional[List[Dict]]** | `documents` | + + +## Passing AI21 Specific Parameters - `documents` + +LiteLLM allows you to pass all AI21 specific parameters to the `litellm.completion` function. Here is an example of how to pass the `documents` parameter to the `litellm.completion` function. + + + + + +```python +response = await litellm.acompletion( + model="jamba-1.5-large", + messages=[{"role": "user", "content": "what does the document say"}], + documents = [ + { + "content": "hello world", + "metadata": { + "source": "google", + "author": "ishaan" + } + } + ] +) + +``` + + + + +```python +import openai +client = openai.OpenAI( + api_key="sk-1234", # pass litellm proxy key, if you're using virtual keys + base_url="http://0.0.0.0:4000" # litellm-proxy-base url +) + +response = client.chat.completions.create( + model="my-model", + messages = [ + { + "role": "user", + "content": "what llm are you" + } + ], + extra_body = { + "documents": [ + { + "content": "hello world", + "metadata": { + "source": "google", + "author": "ishaan" + } + } + ] + } +) + +print(response) + +``` + + + :::tip @@ -118,4 +205,5 @@ Here's how to call a ai21 model with the LiteLLM Proxy Server | jamba-1.5-large | `completion('jamba-1.5-large', messages)` | `os.environ['AI21_API_KEY']` | | j2-light | `completion('j2-light', messages)` | `os.environ['AI21_API_KEY']` | | j2-mid | `completion('j2-mid', messages)` | `os.environ['AI21_API_KEY']` | -| j2-ultra | `completion('j2-ultra', messages)` | `os.environ['AI21_API_KEY']` | \ No newline at end of file +| j2-ultra | `completion('j2-ultra', messages)` | `os.environ['AI21_API_KEY']` | + diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index affe40d916..6f133a9ff2 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -4485,6 +4485,12 @@ async def test_completion_ai21_chat(): user="ishaan", tool_choice="auto", seed=123, - messages=[{"role": "user", "content": "hi my name is ishaan"}], + messages=[{"role": "user", "content": "what does the document say"}], + documents=[ + { + "content": "hello world", + "metadata": {"source": "google", "author": "ishaan"}, + } + ], ) pass