forked from phoenix/litellm-mirror
docs - "codestral/codestral-latest"
This commit is contained in:
parent
866f26ceda
commit
13bd7e8127
2 changed files with 134 additions and 2 deletions
|
@ -124,4 +124,132 @@ All models listed here https://docs.mistral.ai/platform/endpoints are supported.
|
|||
| Model Name | Function Call |
|
||||
|----------------|--------------------------------------------------------------|
|
||||
| Codestral Latest | `completion(model="text-completion-codestral/codestral-latest", messages)` |
|
||||
| Codestral 2405 | `completion(model="text-completion-codestral/codestral-2405", messages)`|
|
||||
| Codestral 2405 | `completion(model="text-completion-codestral/codestral-2405", messages)`|
|
||||
|
||||
|
||||
|
||||
|
||||
## Chat Completions
|
||||
|
||||
:::info
|
||||
|
||||
Official Mistral API Docs: https://docs.mistral.ai/api/#operation/createChatCompletion
|
||||
:::
|
||||
|
||||
|
||||
<Tabs>
|
||||
<TabItem value="no-streaming" label="No Streaming">
|
||||
|
||||
### Sample Usage
|
||||
|
||||
```python
|
||||
import os
|
||||
import litellm
|
||||
|
||||
os.environ['CODESTRAL_API_KEY']
|
||||
|
||||
response = await litellm.acompletion(
|
||||
model="codestral/codestral-latest",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hey, how's it going?",
|
||||
}
|
||||
],
|
||||
temperature=0.0, # optional
|
||||
top_p=1, # optional
|
||||
max_tokens=10, # optional
|
||||
safe_prompt=False, # optional
|
||||
seed=12, # optional
|
||||
)
|
||||
```
|
||||
|
||||
#### Expected Response
|
||||
|
||||
```json
|
||||
{
|
||||
"id": "chatcmpl-123",
|
||||
"object": "chat.completion",
|
||||
"created": 1677652288,
|
||||
"model": "codestral/codestral-latest",
|
||||
"system_fingerprint": None,
|
||||
"choices": [{
|
||||
"index": 0,
|
||||
"message": {
|
||||
"role": "assistant",
|
||||
"content": "\n\nHello there, how may I assist you today?",
|
||||
},
|
||||
"logprobs": null,
|
||||
"finish_reason": "stop"
|
||||
}],
|
||||
"usage": {
|
||||
"prompt_tokens": 9,
|
||||
"completion_tokens": 12,
|
||||
"total_tokens": 21
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
```
|
||||
|
||||
|
||||
</TabItem>
|
||||
<TabItem value="stream" label="Streaming">
|
||||
|
||||
### Sample Usage - Streaming
|
||||
|
||||
```python
|
||||
import os
|
||||
import litellm
|
||||
|
||||
os.environ['CODESTRAL_API_KEY']
|
||||
|
||||
response = await litellm.acompletion(
|
||||
model="codestral/codestral-latest",
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hey, how's it going?",
|
||||
}
|
||||
],
|
||||
stream=True, # optional
|
||||
temperature=0.0, # optional
|
||||
top_p=1, # optional
|
||||
max_tokens=10, # optional
|
||||
safe_prompt=False, # optional
|
||||
seed=12, # optional
|
||||
)
|
||||
async for chunk in response:
|
||||
print(chunk)
|
||||
```
|
||||
|
||||
#### Expected Response
|
||||
|
||||
```json
|
||||
{
|
||||
"id":"chatcmpl-123",
|
||||
"object":"chat.completion.chunk",
|
||||
"created":1694268190,
|
||||
"model": "codestral/codestral-latest",
|
||||
"system_fingerprint": None,
|
||||
"choices":[
|
||||
{
|
||||
"index":0,
|
||||
"delta":{"role":"assistant","content":"gm"},
|
||||
"logprobs":null,
|
||||
" finish_reason":null
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
```
|
||||
</TabItem>
|
||||
</Tabs>
|
||||
|
||||
### Supported Models
|
||||
All models listed here https://docs.mistral.ai/platform/endpoints are supported. We actively maintain the list of models, pricing, token window, etc. [here](https://github.com/BerriAI/litellm/blob/main/model_prices_and_context_window.json).
|
||||
|
||||
| Model Name | Function Call |
|
||||
|----------------|--------------------------------------------------------------|
|
||||
| Codestral Latest | `completion(model="codestral/codestral-latest", messages)` |
|
||||
| Codestral 2405 | `completion(model="codestral/codestral-2405", messages)`|
|
|
@ -823,13 +823,17 @@ async def test_completion_codestral_chat_api():
|
|||
litellm.set_verbose = True
|
||||
response = await litellm.acompletion(
|
||||
model="codestral/codestral-latest",
|
||||
max_tokens=5,
|
||||
messages=[
|
||||
{
|
||||
"role": "user",
|
||||
"content": "Hey, how's it going?",
|
||||
}
|
||||
],
|
||||
temperature=0.0,
|
||||
top_p=1,
|
||||
max_tokens=10,
|
||||
safe_prompt=False,
|
||||
seed=12,
|
||||
)
|
||||
# Add any assertions here to-check the response
|
||||
print(response)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue