diff --git a/docs/my-website/docs/providers/codestral.md b/docs/my-website/docs/providers/codestral.md index a401d4f05..bfe015c4e 100644 --- a/docs/my-website/docs/providers/codestral.md +++ b/docs/my-website/docs/providers/codestral.md @@ -124,4 +124,132 @@ All models listed here https://docs.mistral.ai/platform/endpoints are supported. | Model Name | Function Call | |----------------|--------------------------------------------------------------| | Codestral Latest | `completion(model="text-completion-codestral/codestral-latest", messages)` | -| Codestral 2405 | `completion(model="text-completion-codestral/codestral-2405", messages)`| \ No newline at end of file +| Codestral 2405 | `completion(model="text-completion-codestral/codestral-2405", messages)`| + + + + +## Chat Completions + +:::info + +Official Mistral API Docs: https://docs.mistral.ai/api/#operation/createChatCompletion +::: + + + + + +### Sample Usage + +```python +import os +import litellm + +os.environ['CODESTRAL_API_KEY'] + +response = await litellm.acompletion( + model="codestral/codestral-latest", + messages=[ + { + "role": "user", + "content": "Hey, how's it going?", + } + ], + temperature=0.0, # optional + top_p=1, # optional + max_tokens=10, # optional + safe_prompt=False, # optional + seed=12, # optional +) +``` + +#### Expected Response + +```json +{ + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "codestral/codestral-latest", + "system_fingerprint": None, + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } +} + + +``` + + + + + +### Sample Usage - Streaming + +```python +import os +import litellm + +os.environ['CODESTRAL_API_KEY'] + +response = await litellm.acompletion( + model="codestral/codestral-latest", + messages=[ + { + "role": "user", + "content": "Hey, how's it going?", + } + ], + stream=True, # optional + temperature=0.0, # optional + top_p=1, # optional + max_tokens=10, # optional + safe_prompt=False, # optional + seed=12, # optional +) +async for chunk in response: + print(chunk) +``` + +#### Expected Response + +```json +{ + "id":"chatcmpl-123", + "object":"chat.completion.chunk", + "created":1694268190, + "model": "codestral/codestral-latest", + "system_fingerprint": None, + "choices":[ + { + "index":0, + "delta":{"role":"assistant","content":"gm"}, + "logprobs":null, + " finish_reason":null + } + ] +} + +``` + + + +### Supported Models +All models listed here https://docs.mistral.ai/platform/endpoints are supported. We actively maintain the list of models, pricing, token window, etc. [here](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json). + +| Model Name | Function Call | +|----------------|--------------------------------------------------------------| +| Codestral Latest | `completion(model="codestral/codestral-latest", messages)` | +| Codestral 2405 | `completion(model="codestral/codestral-2405", messages)`| \ No newline at end of file diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 250a4a7b6..0e1b4a9f4 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -823,13 +823,17 @@ async def test_completion_codestral_chat_api(): litellm.set_verbose = True response = await litellm.acompletion( model="codestral/codestral-latest", - max_tokens=5, messages=[ { "role": "user", "content": "Hey, how's it going?", } ], + temperature=0.0, + top_p=1, + max_tokens=10, + safe_prompt=False, + seed=12, ) # Add any assertions here to-check the response print(response)