mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
update docs on together ai
This commit is contained in:
parent
cc4be8dd73
commit
b4a9699138
2 changed files with 132 additions and 0 deletions
|
@ -20,3 +20,112 @@ Example TogetherAI Usage - Note: liteLLM supports all models deployed on Togethe
|
|||
| togethercomputer/code-and-talk-v1 | `completion('togethercomputer/code-and-talk-v1', messages)` | `os.environ['TOGETHERAI_API_KEY']` |
|
||||
| togethercomputer/creative-v1 | `completion('togethercomputer/creative-v1', messages)` | `os.environ['TOGETHERAI_API_KEY']` |
|
||||
| togethercomputer/yourmodel | `completion('togethercomputer/yourmodel', messages)` | `os.environ['TOGETHERAI_API_KEY']` |
|
||||
|
||||
|
||||
### Prompt Templates
|
||||
|
||||
Using a chat model on Together AI with it's own prompt format?
|
||||
|
||||
#### Using Llama2 Instruct models
|
||||
If you're using Together AI's Llama2 variants( `model=togethercomputer/llama-2..-instruct`), LiteLLM can automatically translate between the OpenAI prompt format and the TogetherAI Llama2 one (`[INST]..[/INST]`).
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
|
||||
# set env variable
|
||||
os.environ["TOGETHERAI_API_KEY"] = ""
|
||||
|
||||
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
|
||||
|
||||
completion(model="together_ai/togethercomputer/Llama-2-7B-32K-Instruct", messages=messages)
|
||||
```
|
||||
|
||||
#### Using another model
|
||||
|
||||
You can create a custom prompt template on LiteLLM (and we [welcome PRs](https://github.com/BerriAI/litellm) to add them to the main repo 🤗)
|
||||
|
||||
Let's make one for `OpenAssistant/llama2-70b-oasst-sft-v10`!
|
||||
|
||||
The accepted template format is: [Reference](https://huggingface.co/OpenAssistant/llama2-70b-oasst-sft-v10-)
|
||||
```
|
||||
"""
|
||||
<|im_start|>system
|
||||
{system_message}<|im_end|>
|
||||
<|im_start|>user
|
||||
{prompt}<|im_end|>
|
||||
<|im_start|>assistant
|
||||
"""
|
||||
```
|
||||
|
||||
Let's register our custom prompt template: [Implementation Code](https://github.com/BerriAI/litellm/blob/64f3d3c56ef02ac5544983efc78293de31c1c201/litellm/llms/prompt_templates/factory.py#L77)
|
||||
```
|
||||
import litellm
|
||||
|
||||
litellm.register_prompt_template(
|
||||
model="OpenAssistant/llama2-70b-oasst-sft-v10",
|
||||
roles={"system":"<|im_start|>system", "assistant":"<|im_start|>assistant", "user":"<|im_start|>user"}, # tell LiteLLM how you want to map the openai messages to this model
|
||||
pre_message_sep= "\n",
|
||||
post_message_sep= "\n"
|
||||
)
|
||||
```
|
||||
|
||||
Let's use it!
|
||||
|
||||
```
|
||||
from litellm import completion
|
||||
|
||||
# set env variable
|
||||
os.environ["TOGETHERAI_API_KEY"] = ""
|
||||
|
||||
messages=[{"role":"user", "content": "Write me a poem about the blue sky"}]
|
||||
|
||||
completion(model="together_ai/OpenAssistant/llama2-70b-oasst-sft-v10", messages=messages)
|
||||
```
|
||||
|
||||
**Complete Code**
|
||||
|
||||
```
|
||||
import litellm
|
||||
from litellm import completion
|
||||
|
||||
# set env variable
|
||||
os.environ["TOGETHERAI_API_KEY"] = ""
|
||||
|
||||
litellm.register_prompt_template(
|
||||
model="OpenAssistant/llama2-70b-oasst-sft-v10",
|
||||
roles={"system":"<|im_start|>system", "assistant":"<|im_start|>assistant", "user":"<|im_start|>user"}, # tell LiteLLM how you want to map the openai messages to this model
|
||||
pre_message_sep= "\n",
|
||||
post_message_sep= "\n"
|
||||
)
|
||||
|
||||
messages=[{"role":"user", "content": "Write me a poem about the blue sky"}]
|
||||
|
||||
response = completion(model="together_ai/OpenAssistant/llama2-70b-oasst-sft-v10", messages=messages)
|
||||
|
||||
print(response)
|
||||
```
|
||||
|
||||
**Output**
|
||||
```
|
||||
{
|
||||
"choices": [
|
||||
{
|
||||
"finish_reason": "stop",
|
||||
"index": 0,
|
||||
"message": {
|
||||
"content": ".\n\nThe sky is a canvas of blue,\nWith clouds that drift and move,",
|
||||
"role": "assistant",
|
||||
"logprobs": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"created": 1693941410.482018,
|
||||
"model": "OpenAssistant/llama2-70b-oasst-sft-v10",
|
||||
"usage": {
|
||||
"prompt_tokens": 7,
|
||||
"completion_tokens": 16,
|
||||
"total_tokens": 23
|
||||
},
|
||||
"litellm_call_id": "f21315db-afd6-4c1e-b43a-0b5682de4b06"
|
||||
}
|
||||
```
|
|
@ -872,6 +872,29 @@ def load_test_model(
|
|||
"exception": e,
|
||||
}
|
||||
|
||||
def validate_environment(self):
|
||||
api_key = None
|
||||
if "OPENAI_API_KEY" in os.environ:
|
||||
api_key = os.getenv("OPENAI_API_KEY")
|
||||
elif "ANTHROPIC_API_KEY" in os.environ:
|
||||
api_key = os.getenv("ANTHROPIC_API_KEY")
|
||||
elif "REPLICATE_API_KEY" in os.environ:
|
||||
api_key = os.getenv("REPLICATE_API_KEY")
|
||||
elif "AZURE_API_KEY" in os.environ:
|
||||
api_key = os.getenv("AZURE_API_KEY")
|
||||
elif "COHERE_API_KEY" in os.getenv("COHERE_API_KEY"):
|
||||
api_key = os.getenv("COHERE_API_KEY")
|
||||
elif "TOGETHERAI_API_KEY" in os.environ:
|
||||
api_key = os.getenv("TOGETHERAI_API_KEY")
|
||||
elif "BASETEN_API_KEY" in os.environ:
|
||||
api_key = os.getenv("BASETEN_API_KEY")
|
||||
elif "AI21_API_KEY" in os.environ:
|
||||
api_key = os.getenv("AI21_API_KEY")
|
||||
elif "OPENROUTER_API_KEY" in os.environ:
|
||||
api_key = os.getenv("OPENROUTER_API_KEY")
|
||||
elif "ALEPHALPHA_API_KEY" in os.environ:
|
||||
api_key = os.getenv("ALEPHALPHA_API_KEY")
|
||||
return api_key
|
||||
|
||||
def set_callbacks(callback_list, function_id=None):
|
||||
global sentry_sdk_instance, capture_exception, add_breadcrumb, posthog, slack_app, alerts_channel, traceloopLogger, heliconeLogger, aispendLogger, berrispendLogger, supabaseClient, liteDebuggerClient, llmonitorLogger, promptLayerLogger, langFuseLogger
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue