(feat) add gpt-3.5-turbo-instruct-0914

This commit is contained in:
ishaan-jaff 2024-03-01 20:02:12 -08:00
parent 6dc85ed8e3
commit d1465ed57e
4 changed files with 58 additions and 0 deletions

View file

@ -168,6 +168,13 @@ response = completion(
)
```
## Azure Instruct Models
| Model Name | Function Call |
|---------------------|----------------------------------------------------|
| gpt-3.5-turbo-instruct | `response = completion(model="azure/<your deployment name>", messages=messages)` |
| gpt-3.5-turbo-instruct-0914 | `response = completion(model="azure/<your deployment name>", messages=messages)` |
## Advanced
### Azure API Load-Balancing

View file

@ -93,6 +93,7 @@ response = completion(
| Model Name | Function Call |
|---------------------|----------------------------------------------------|
| gpt-3.5-turbo-instruct | `response = completion(model="gpt-3.5-turbo-instruct", messages=messages)` |
| gpt-3.5-turbo-instruct-0914 | `response = completion(model="gpt-3.5-turbo-instruct-091", messages=messages)` |
| text-davinci-003 | `response = completion(model="text-davinci-003", messages=messages)` |
| ada-001 | `response = completion(model="ada-001", messages=messages)` |
| curie-001 | `response = completion(model="curie-001", messages=messages)` |

View file

@ -424,6 +424,23 @@
"mode": "chat",
"supports_function_calling": true
},
"azure/gpt-3.5-turbo-instruct-0914": {
"max_tokens": 4097,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"azure/gpt-35-turbo-instruct": {
"max_tokens": 4097,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"azure/mistral-large-latest": {
"max_tokens": 32000,
"input_cost_per_token": 0.000008,
@ -537,6 +554,14 @@
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"gpt-3.5-turbo-instruct-0914": {
"max_tokens": 4097,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"claude-instant-1": {
"max_tokens": 100000,
"max_output_tokens": 8191,

View file

@ -424,6 +424,23 @@
"mode": "chat",
"supports_function_calling": true
},
"azure/gpt-3.5-turbo-instruct-0914": {
"max_tokens": 4097,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"azure/gpt-35-turbo-instruct": {
"max_tokens": 4097,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"azure/mistral-large-latest": {
"max_tokens": 32000,
"input_cost_per_token": 0.000008,
@ -537,6 +554,14 @@
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"gpt-3.5-turbo-instruct-0914": {
"max_tokens": 4097,
"input_cost_per_token": 0.0000015,
"output_cost_per_token": 0.000002,
"litellm_provider": "text-completion-openai",
"mode": "completion"
},
"claude-instant-1": {
"max_tokens": 100000,
"max_output_tokens": 8191,