forked from phoenix/litellm-mirror
Clarifai-LiteLLM integration (#1)
* intg v1 clarifai-litellm * Added more community models and testcase * Clarifai-updated markdown docs
This commit is contained in:
parent
fa8a9568aa
commit
318b4813f2
7 changed files with 734 additions and 0 deletions
151
cookbook/liteLLM_clarifai_Demo.ipynb
vendored
Normal file
151
cookbook/liteLLM_clarifai_Demo.ipynb
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# LiteLLM Clarifai \n",
|
||||
"This notebook walks you through on how to use liteLLM integration of Clarifai and call LLM model from clarifai with response in openAI output format."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Pre-Requisites"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#install necessary packages\n",
|
||||
"!pip install litellm\n",
|
||||
"!pip install clarifai"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"To obtain Clarifai Personal Access Token follow the steps mentioned in the [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"## Set Clarifai Credentials\n",
|
||||
"import os\n",
|
||||
"os.environ[\"CLARIFAI_API_KEY\"]= \"YOUR_CLARIFAI_PAT\" # Clarifai PAT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Mistral-large"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import litellm\n",
|
||||
"\n",
|
||||
"litellm.set_verbose=False"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Mistral large response : ModelResponse(id='chatcmpl-6eed494d-7ae2-4870-b9c2-6a64d50a6151', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\"In the grand tapestry of time, where tales unfold,\\nLies the chronicle of ages, a sight to behold.\\nA tale of empires rising, and kings of old,\\nOf civilizations lost, and stories untold.\\n\\nOnce upon a yesterday, in a time so vast,\\nHumans took their first steps, casting shadows in the past.\\nFrom the cradle of mankind, a journey they embarked,\\nThrough stone and bronze and iron, their skills they sharpened and marked.\\n\\nEgyptians built pyramids, reaching for the skies,\\nWhile Greeks sought wisdom, truth, in philosophies that lie.\\nRoman legions marched, their empire to expand,\\nAnd in the East, the Silk Road joined the world, hand in hand.\\n\\nThe Middle Ages came, with knights in shining armor,\\nFeudal lords and serfs, a time of both clamor and calm order.\\nThen Renaissance bloomed, like a flower in the sun,\\nA rebirth of art and science, a new age had begun.\\n\\nAcross the vast oceans, explorers sailed with courage bold,\\nDiscovering new lands, stories of adventure, untold.\\nIndustrial Revolution churned, progress in its wake,\\nMachines and factories, a whole new world to make.\\n\\nTwo World Wars raged, a testament to man's strife,\\nYet from the ashes rose hope, a renewed will for life.\\nInto the modern era, technology took flight,\\nConnecting every corner, bathed in digital light.\\n\\nHistory, a symphony, a melody of time,\\nA testament to human will, resilience so sublime.\\nIn every page, a lesson, in every tale, a guide,\\nFor understanding our past, shapes our future's tide.\", role='assistant'))], created=1713896412, model='https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=13, completion_tokens=338, total_tokens=351))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from litellm import completion\n",
|
||||
"\n",
|
||||
"messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n",
|
||||
"response=completion(\n",
|
||||
" model=\"clarifai/mistralai.completion.mistral-large\",\n",
|
||||
" messages=messages,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(f\"Mistral large response : {response}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Claude-2.1 "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Claude-2.1 response : ModelResponse(id='chatcmpl-d126c919-4db4-4aa3-ac8f-7edea41e0b93', choices=[Choices(finish_reason='stop', index=1, message=Message(content=\" Here's a poem I wrote about history:\\n\\nThe Tides of Time\\n\\nThe tides of time ebb and flow,\\nCarrying stories of long ago.\\nFigures and events come into light,\\nShaping the future with all their might.\\n\\nKingdoms rise, empires fall, \\nLeaving traces that echo down every hall.\\nRevolutions bring change with a fiery glow,\\nToppling structures from long ago.\\n\\nExplorers traverse each ocean and land,\\nSeeking treasures they don't understand.\\nWhile artists and writers try to make their mark,\\nHoping their works shine bright in the dark.\\n\\nThe cycle repeats again and again,\\nAs humanity struggles to learn from its pain.\\nThough the players may change on history's stage,\\nThe themes stay the same from age to age.\\n\\nWar and peace, life and death,\\nLove and strife with every breath.\\nThe tides of time continue their dance,\\nAs we join in, by luck or by chance.\\n\\nSo we study the past to light the way forward, \\nHeeding warnings from stories told and heard.\\nThe future unfolds from this unending flow -\\nWhere the tides of time ultimately go.\", role='assistant'))], created=1713896579, model='https://api.clarifai.com/v2/users/anthropic/apps/completion/models/claude-2_1/outputs', object='chat.completion', system_fingerprint=None, usage=Usage(prompt_tokens=12, completion_tokens=232, total_tokens=244))\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"from litellm import completion\n",
|
||||
"\n",
|
||||
"messages = [{\"role\": \"user\",\"content\": \"\"\"Write a poem about history?\"\"\"}]\n",
|
||||
"response=completion(\n",
|
||||
" model=\"clarifai/anthropic.completion.claude-2_1\",\n",
|
||||
" messages=messages,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"print(f\"Claude-2.1 response : {response}\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.9.10"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 2
|
||||
}
|
177
docs/my-website/docs/providers/clarifai.md
Normal file
177
docs/my-website/docs/providers/clarifai.md
Normal file
|
@ -0,0 +1,177 @@
|
|||
|
||||
# Clarifai
|
||||
Anthropic, OpenAI, Mistral, Llama and Gemini LLMs are Supported on Clarifai.
|
||||
|
||||
## Pre-Requisites
|
||||
|
||||
`pip install clarifai`
|
||||
|
||||
`pip install litellm`
|
||||
|
||||
## Required Environment Variables
|
||||
To obtain your Clarifai Personal access token follow this [link](https://docs.clarifai.com/clarifai-basics/authentication/personal-access-tokens/). Optionally the PAT can also be passed in `completion` function.
|
||||
|
||||
```python
|
||||
os.environ["CALRIFAI_API_KEY"] = "YOUR_CLARIFAI_PAT" # CLARIFAI_PAT
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```python
|
||||
import os
|
||||
from litellm import completion
|
||||
|
||||
os.environ["CLARIFAI_API_KEY"] = ""
|
||||
|
||||
response = completion(
|
||||
model="clarifai/mistralai.completion.mistral-large",
|
||||
messages=[{ "content": "Tell me a joke about physics?","role": "user"}]
|
||||
)
|
||||
```
|
||||
|
||||
**Output**
|
||||
```json
|
||||
{
|
||||
"id": "chatcmpl-572701ee-9ab2-411c-ac75-46c1ba18e781",
|
||||
"choices": [
|
||||
{
|
||||
"finish_reason": "stop",
|
||||
"index": 1,
|
||||
"message": {
|
||||
"content": "Sure, here's a physics joke for you:\n\nWhy can't you trust an atom?\n\nBecause they make up everything!",
|
||||
"role": "assistant"
|
||||
}
|
||||
}
|
||||
],
|
||||
"created": 1714410197,
|
||||
"model": "https://api.clarifai.com/v2/users/mistralai/apps/completion/models/mistral-large/outputs",
|
||||
"object": "chat.completion",
|
||||
"system_fingerprint": null,
|
||||
"usage": {
|
||||
"prompt_tokens": 14,
|
||||
"completion_tokens": 24,
|
||||
"total_tokens": 38
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Clarifai models
|
||||
liteLLM supports non-streaming requests to all models on [Clarifai community](https://clarifai.com/explore/models?filterData=%5B%7B%22field%22%3A%22use_cases%22%2C%22value%22%3A%5B%22llm%22%5D%7D%5D&page=1&perPage=24)
|
||||
|
||||
Example Usage - Note: liteLLM supports all models deployed on Clarifai
|
||||
|
||||
## Llama LLMs
|
||||
| Model Name | Function Call |
|
||||
---------------------------|---------------------------------|
|
||||
| clarifai/meta.Llama-2.llama2-7b-chat | `completion('clarifai/meta.Llama-2.llama2-7b-chat', messages)`
|
||||
| clarifai/meta.Llama-2.llama2-13b-chat | `completion('clarifai/meta.Llama-2.llama2-13b-chat', messages)`
|
||||
| clarifai/meta.Llama-2.llama2-70b-chat | `completion('clarifai/meta.Llama-2.llama2-70b-chat', messages)` |
|
||||
| clarifai/meta.Llama-2.codeLlama-70b-Python | `completion('clarifai/meta.Llama-2.codeLlama-70b-Python', messages)`|
|
||||
| clarifai/meta.Llama-2.codeLlama-70b-Instruct | `completion('clarifai/meta.Llama-2.codeLlama-70b-Instruct', messages)` |
|
||||
|
||||
## Mistal LLMs
|
||||
| Model Name | Function Call |
|
||||
|---------------------------------------------|------------------------------------------------------------------------|
|
||||
| clarifai/mistralai.completion.mixtral-8x22B | `completion('clarifai/mistralai.completion.mixtral-8x22B', messages)` |
|
||||
| clarifai/mistralai.completion.mistral-large | `completion('clarifai/mistralai.completion.mistral-large', messages)` |
|
||||
| clarifai/mistralai.completion.mistral-medium | `completion('clarifai/mistralai.completion.mistral-medium', messages)` |
|
||||
| clarifai/mistralai.completion.mistral-small | `completion('clarifai/mistralai.completion.mistral-small', messages)` |
|
||||
| clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1 | `completion('clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1', messages)`
|
||||
| clarifai/mistralai.completion.mistral-7B-OpenOrca | `completion('clarifai/mistralai.completion.mistral-7B-OpenOrca', messages)` |
|
||||
| clarifai/mistralai.completion.openHermes-2-mistral-7B | `completion('clarifai/mistralai.completion.openHermes-2-mistral-7B', messages)` |
|
||||
|
||||
|
||||
## Jurassic LLMs
|
||||
| Model Name | Function Call |
|
||||
|-----------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/ai21.complete.Jurassic2-Grande | `completion('clarifai/ai21.complete.Jurassic2-Grande', messages)` |
|
||||
| clarifai/ai21.complete.Jurassic2-Grande-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Grande-Instruct', messages)` |
|
||||
| clarifai/ai21.complete.Jurassic2-Jumbo-Instruct | `completion('clarifai/ai21.complete.Jurassic2-Jumbo-Instruct', messages)` |
|
||||
| clarifai/ai21.complete.Jurassic2-Jumbo | `completion('clarifai/ai21.complete.Jurassic2-Jumbo', messages)` |
|
||||
| clarifai/ai21.complete.Jurassic2-Large | `completion('clarifai/ai21.complete.Jurassic2-Large', messages)` |
|
||||
|
||||
## Wizard LLMs
|
||||
|
||||
| Model Name | Function Call |
|
||||
|-----------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/wizardlm.generate.wizardCoder-Python-34B | `completion('clarifai/wizardlm.generate.wizardCoder-Python-34B', messages)` |
|
||||
| clarifai/wizardlm.generate.wizardLM-70B | `completion('clarifai/wizardlm.generate.wizardLM-70B', messages)` |
|
||||
| clarifai/wizardlm.generate.wizardLM-13B | `completion('clarifai/wizardlm.generate.wizardLM-13B', messages)` |
|
||||
| clarifai/wizardlm.generate.wizardCoder-15B | `completion('clarifai/wizardlm.generate.wizardCoder-15B', messages)` |
|
||||
|
||||
## Anthropic models
|
||||
|
||||
| Model Name | Function Call |
|
||||
|-----------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/anthropic.completion.claude-v1 | `completion('clarifai/anthropic.completion.claude-v1', messages)` |
|
||||
| clarifai/anthropic.completion.claude-instant-1_2 | `completion('clarifai/anthropic.completion.claude-instant-1_2', messages)` |
|
||||
| clarifai/anthropic.completion.claude-instant | `completion('clarifai/anthropic.completion.claude-instant', messages)` |
|
||||
| clarifai/anthropic.completion.claude-v2 | `completion('clarifai/anthropic.completion.claude-v2', messages)` |
|
||||
| clarifai/anthropic.completion.claude-2_1 | `completion('clarifai/anthropic.completion.claude-2_1', messages)` |
|
||||
| clarifai/anthropic.completion.claude-3-opus | `completion('clarifai/anthropic.completion.claude-3-opus', messages)` |
|
||||
| clarifai/anthropic.completion.claude-3-sonnet | `completion('clarifai/anthropic.completion.claude-3-sonnet', messages)` |
|
||||
|
||||
## OpenAI GPT LLMs
|
||||
|
||||
| Model Name | Function Call |
|
||||
|-----------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/openai.chat-completion.GPT-4 | `completion('clarifai/openai.chat-completion.GPT-4', messages)` |
|
||||
| clarifai/openai.chat-completion.GPT-3_5-turbo | `completion('clarifai/openai.chat-completion.GPT-3_5-turbo', messages)` |
|
||||
| clarifai/openai.chat-completion.gpt-4-turbo | `completion('clarifai/openai.chat-completion.gpt-4-turbo', messages)` |
|
||||
| clarifai/openai.completion.gpt-3_5-turbo-instruct | `completion('clarifai/openai.completion.gpt-3_5-turbo-instruct', messages)` |
|
||||
|
||||
## GCP LLMs
|
||||
|
||||
| Model Name | Function Call |
|
||||
|-----------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/gcp.generate.gemini-1_5-pro | `completion('clarifai/gcp.generate.gemini-1_5-pro', messages)` |
|
||||
| clarifai/gcp.generate.imagen-2 | `completion('clarifai/gcp.generate.imagen-2', messages)` |
|
||||
| clarifai/gcp.generate.code-gecko | `completion('clarifai/gcp.generate.code-gecko', messages)` |
|
||||
| clarifai/gcp.generate.code-bison | `completion('clarifai/gcp.generate.code-bison', messages)` |
|
||||
| clarifai/gcp.generate.text-bison | `completion('clarifai/gcp.generate.text-bison', messages)` |
|
||||
| clarifai/gcp.generate.gemma-2b-it | `completion('clarifai/gcp.generate.gemma-2b-it', messages)` |
|
||||
| clarifai/gcp.generate.gemma-7b-it | `completion('clarifai/gcp.generate.gemma-7b-it', messages)` |
|
||||
| clarifai/gcp.generate.gemini-pro | `completion('clarifai/gcp.generate.gemini-pro', messages)` |
|
||||
| clarifai/gcp.generate.gemma-1_1-7b-it | `completion('clarifai/gcp.generate.gemma-1_1-7b-it', messages)` |
|
||||
|
||||
## Cohere LLMs
|
||||
| Model Name | Function Call |
|
||||
|-----------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/cohere.generate.cohere-generate-command | `completion('clarifai/cohere.generate.cohere-generate-command', messages)` |
|
||||
clarifai/cohere.generate.command-r-plus' | `completion('clarifai/clarifai/cohere.generate.command-r-plus', messages)`|
|
||||
|
||||
## Databricks LLMs
|
||||
|
||||
| Model Name | Function Call |
|
||||
|---------------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/databricks.drbx.dbrx-instruct | `completion('clarifai/databricks.drbx.dbrx-instruct', messages)` |
|
||||
| clarifai/databricks.Dolly-v2.dolly-v2-12b | `completion('clarifai/databricks.Dolly-v2.dolly-v2-12b', messages)`|
|
||||
|
||||
## Microsoft LLMs
|
||||
|
||||
| Model Name | Function Call |
|
||||
|---------------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/microsoft.text-generation.phi-2 | `completion('clarifai/microsoft.text-generation.phi-2', messages)` |
|
||||
| clarifai/microsoft.text-generation.phi-1_5 | `completion('clarifai/microsoft.text-generation.phi-1_5', messages)`|
|
||||
|
||||
## Salesforce models
|
||||
|
||||
| Model Name | Function Call |
|
||||
|-----------------------------------------------------------|-------------------------------------------------------------------------------|
|
||||
| clarifai/salesforce.blip.general-english-image-caption-blip-2 | `completion('clarifai/salesforce.blip.general-english-image-caption-blip-2', messages)` |
|
||||
| clarifai/salesforce.xgen.xgen-7b-8k-instruct | `completion('clarifai/salesforce.xgen.xgen-7b-8k-instruct', messages)` |
|
||||
|
||||
|
||||
## Other Top performing LLMs
|
||||
|
||||
| Model Name | Function Call |
|
||||
|---------------------------------------------------|---------------------------------------------------------------------|
|
||||
| clarifai/deci.decilm.deciLM-7B-instruct | `completion('clarifai/deci.decilm.deciLM-7B-instruct', messages)` |
|
||||
| clarifai/upstage.solar.solar-10_7b-instruct | `completion('clarifai/upstage.solar.solar-10_7b-instruct', messages)` |
|
||||
| clarifai/openchat.openchat.openchat-3_5-1210 | `completion('clarifai/openchat.openchat.openchat-3_5-1210', messages)` |
|
||||
| clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B | `completion('clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B', messages)` |
|
||||
| clarifai/fblgit.una-cybertron.una-cybertron-7b-v2 | `completion('clarifai/fblgit.una-cybertron.una-cybertron-7b-v2', messages)` |
|
||||
| clarifai/tiiuae.falcon.falcon-40b-instruct | `completion('clarifai/tiiuae.falcon.falcon-40b-instruct', messages)` |
|
||||
| clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat | `completion('clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat', messages)` |
|
||||
| clarifai/bigcode.code.StarCoder | `completion('clarifai/bigcode.code.StarCoder', messages)` |
|
||||
| clarifai/mosaicml.mpt.mpt-7b-instruct | `completion('clarifai/mosaicml.mpt.mpt-7b-instruct', messages)` |
|
|
@ -49,6 +49,7 @@ azure_key: Optional[str] = None
|
|||
anthropic_key: Optional[str] = None
|
||||
replicate_key: Optional[str] = None
|
||||
cohere_key: Optional[str] = None
|
||||
clarifai_key: Optional[str] = None
|
||||
maritalk_key: Optional[str] = None
|
||||
ai21_key: Optional[str] = None
|
||||
openrouter_key: Optional[str] = None
|
||||
|
@ -366,6 +367,73 @@ replicate_models: List = [
|
|||
"replit/replit-code-v1-3b:b84f4c074b807211cd75e3e8b1589b6399052125b4c27106e43d47189e8415ad",
|
||||
]
|
||||
|
||||
clarifai_models: List = [
|
||||
'clarifai/meta.Llama-3.Llama-3-8B-Instruct',
|
||||
'clarifai/gcp.generate.gemma-1_1-7b-it',
|
||||
'clarifai/mistralai.completion.mixtral-8x22B',
|
||||
'clarifai/cohere.generate.command-r-plus',
|
||||
'clarifai/databricks.drbx.dbrx-instruct',
|
||||
'clarifai/mistralai.completion.mistral-large',
|
||||
'clarifai/mistralai.completion.mistral-medium',
|
||||
'clarifai/mistralai.completion.mistral-small',
|
||||
'clarifai/mistralai.completion.mixtral-8x7B-Instruct-v0_1',
|
||||
'clarifai/gcp.generate.gemma-2b-it',
|
||||
'clarifai/gcp.generate.gemma-7b-it',
|
||||
'clarifai/deci.decilm.deciLM-7B-instruct',
|
||||
'clarifai/mistralai.completion.mistral-7B-Instruct',
|
||||
'clarifai/gcp.generate.gemini-pro',
|
||||
'clarifai/anthropic.completion.claude-v1',
|
||||
'clarifai/anthropic.completion.claude-instant-1_2',
|
||||
'clarifai/anthropic.completion.claude-instant',
|
||||
'clarifai/anthropic.completion.claude-v2',
|
||||
'clarifai/anthropic.completion.claude-2_1',
|
||||
'clarifai/meta.Llama-2.codeLlama-70b-Python',
|
||||
'clarifai/meta.Llama-2.codeLlama-70b-Instruct',
|
||||
'clarifai/openai.completion.gpt-3_5-turbo-instruct',
|
||||
'clarifai/meta.Llama-2.llama2-7b-chat',
|
||||
'clarifai/meta.Llama-2.llama2-13b-chat',
|
||||
'clarifai/meta.Llama-2.llama2-70b-chat',
|
||||
'clarifai/openai.chat-completion.gpt-4-turbo',
|
||||
'clarifai/microsoft.text-generation.phi-2',
|
||||
'clarifai/meta.Llama-2.llama2-7b-chat-vllm',
|
||||
'clarifai/upstage.solar.solar-10_7b-instruct',
|
||||
'clarifai/openchat.openchat.openchat-3_5-1210',
|
||||
'clarifai/togethercomputer.stripedHyena.stripedHyena-Nous-7B',
|
||||
'clarifai/gcp.generate.text-bison',
|
||||
'clarifai/meta.Llama-2.llamaGuard-7b',
|
||||
'clarifai/fblgit.una-cybertron.una-cybertron-7b-v2',
|
||||
'clarifai/openai.chat-completion.GPT-4',
|
||||
'clarifai/openai.chat-completion.GPT-3_5-turbo',
|
||||
'clarifai/ai21.complete.Jurassic2-Grande',
|
||||
'clarifai/ai21.complete.Jurassic2-Grande-Instruct',
|
||||
'clarifai/ai21.complete.Jurassic2-Jumbo-Instruct',
|
||||
'clarifai/ai21.complete.Jurassic2-Jumbo',
|
||||
'clarifai/ai21.complete.Jurassic2-Large',
|
||||
'clarifai/cohere.generate.cohere-generate-command',
|
||||
'clarifai/wizardlm.generate.wizardCoder-Python-34B',
|
||||
'clarifai/wizardlm.generate.wizardLM-70B',
|
||||
'clarifai/tiiuae.falcon.falcon-40b-instruct',
|
||||
'clarifai/togethercomputer.RedPajama.RedPajama-INCITE-7B-Chat',
|
||||
'clarifai/gcp.generate.code-gecko',
|
||||
'clarifai/gcp.generate.code-bison',
|
||||
'clarifai/mistralai.completion.mistral-7B-OpenOrca',
|
||||
'clarifai/mistralai.completion.openHermes-2-mistral-7B',
|
||||
'clarifai/wizardlm.generate.wizardLM-13B',
|
||||
'clarifai/huggingface-research.zephyr.zephyr-7B-alpha',
|
||||
'clarifai/wizardlm.generate.wizardCoder-15B',
|
||||
'clarifai/microsoft.text-generation.phi-1_5',
|
||||
'clarifai/databricks.Dolly-v2.dolly-v2-12b',
|
||||
'clarifai/bigcode.code.StarCoder',
|
||||
'clarifai/salesforce.xgen.xgen-7b-8k-instruct',
|
||||
'clarifai/mosaicml.mpt.mpt-7b-instruct',
|
||||
'clarifai/anthropic.completion.claude-3-opus',
|
||||
'clarifai/anthropic.completion.claude-3-sonnet',
|
||||
'clarifai/gcp.generate.gemini-1_5-pro',
|
||||
'clarifai/gcp.generate.imagen-2',
|
||||
'clarifai/salesforce.blip.general-english-image-caption-blip-2',
|
||||
]
|
||||
|
||||
|
||||
huggingface_models: List = [
|
||||
"meta-llama/Llama-2-7b-hf",
|
||||
"meta-llama/Llama-2-7b-chat-hf",
|
||||
|
@ -470,6 +538,7 @@ provider_list: List = [
|
|||
"text-completion-openai",
|
||||
"cohere",
|
||||
"cohere_chat",
|
||||
"clarifai",
|
||||
"anthropic",
|
||||
"replicate",
|
||||
"huggingface",
|
||||
|
@ -608,6 +677,7 @@ from .llms.anthropic import AnthropicConfig
|
|||
from .llms.anthropic_text import AnthropicTextConfig
|
||||
from .llms.replicate import ReplicateConfig
|
||||
from .llms.cohere import CohereConfig
|
||||
from .llms.clarifai import ClarifaiConfig
|
||||
from .llms.ai21 import AI21Config
|
||||
from .llms.together_ai import TogetherAIConfig
|
||||
from .llms.cloudflare import CloudflareConfig
|
||||
|
|
216
litellm/llms/clarifai.py
Normal file
216
litellm/llms/clarifai.py
Normal file
|
@ -0,0 +1,216 @@
|
|||
import os, types, traceback
|
||||
import json
|
||||
import requests
|
||||
import time
|
||||
from typing import Callable, Optional
|
||||
from litellm.utils import ModelResponse, Usage, Choices, Message
|
||||
import litellm
|
||||
import httpx
|
||||
from .prompt_templates.factory import prompt_factory, custom_prompt
|
||||
|
||||
|
||||
class ClarifaiError(Exception):
|
||||
def __init__(self, status_code, message, url):
|
||||
self.status_code = status_code
|
||||
self.message = message
|
||||
self.request = httpx.Request(
|
||||
method="POST", url=url
|
||||
)
|
||||
self.response = httpx.Response(status_code=status_code, request=self.request)
|
||||
super().__init__(
|
||||
self.message
|
||||
)
|
||||
|
||||
class ClarifaiConfig:
|
||||
"""
|
||||
Reference: https://clarifai.com/meta/Llama-2/models/llama2-70b-chat
|
||||
TODO fill in the details
|
||||
"""
|
||||
max_tokens: Optional[int] = None
|
||||
temperature: Optional[int] = None
|
||||
top_k: Optional[int] = None
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_tokens: Optional[int] = None,
|
||||
temperature: Optional[int] = None,
|
||||
top_k: Optional[int] = None,
|
||||
) -> None:
|
||||
locals_ = locals()
|
||||
for key, value in locals_.items():
|
||||
if key != "self" and value is not None:
|
||||
setattr(self.__class__, key, value)
|
||||
|
||||
@classmethod
|
||||
def get_config(cls):
|
||||
return {
|
||||
k: v
|
||||
for k, v in cls.__dict__.items()
|
||||
if not k.startswith("__")
|
||||
and not isinstance(
|
||||
v,
|
||||
(
|
||||
types.FunctionType,
|
||||
types.BuiltinFunctionType,
|
||||
classmethod,
|
||||
staticmethod,
|
||||
),
|
||||
)
|
||||
and v is not None
|
||||
}
|
||||
|
||||
def validate_environment(api_key):
|
||||
headers = {
|
||||
"accept": "application/json",
|
||||
"content-type": "application/json",
|
||||
}
|
||||
if api_key:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
return headers
|
||||
|
||||
def completions_to_model(payload):
|
||||
# if payload["n"] != 1:
|
||||
# raise HTTPException(
|
||||
# status_code=422,
|
||||
# detail="Only one generation is supported. Please set candidate_count to 1.",
|
||||
# )
|
||||
|
||||
params = {}
|
||||
if temperature := payload.get("temperature"):
|
||||
params["temperature"] = temperature
|
||||
if max_tokens := payload.get("max_tokens"):
|
||||
params["max_tokens"] = max_tokens
|
||||
return {
|
||||
"inputs": [{"data": {"text": {"raw": payload["prompt"]}}}],
|
||||
"model": {"output_info": {"params": params}},
|
||||
}
|
||||
|
||||
def convert_model_to_url(model: str, api_base: str):
|
||||
user_id, app_id, model_id = model.split(".")
|
||||
return f"{api_base}/users/{user_id}/apps/{app_id}/models/{model_id}/outputs"
|
||||
|
||||
def get_prompt_model_name(url: str):
|
||||
clarifai_model_name = url.split("/")[-2]
|
||||
if "claude" in clarifai_model_name:
|
||||
return "anthropic", clarifai_model_name.replace("_", ".")
|
||||
if ("llama" in clarifai_model_name)or ("mistral" in clarifai_model_name):
|
||||
return "", "meta-llama/llama-2-chat"
|
||||
else:
|
||||
return "", clarifai_model_name
|
||||
|
||||
def completion(
|
||||
model: str,
|
||||
messages: list,
|
||||
api_base: str,
|
||||
model_response: ModelResponse,
|
||||
print_verbose: Callable,
|
||||
encoding,
|
||||
api_key,
|
||||
logging_obj,
|
||||
custom_prompt_dict={},
|
||||
optional_params=None,
|
||||
litellm_params=None,
|
||||
logger_fn=None,
|
||||
):
|
||||
headers = validate_environment(api_key)
|
||||
model = convert_model_to_url(model, api_base)
|
||||
prompt = " ".join(message["content"] for message in messages) # TODO
|
||||
|
||||
## Load Config
|
||||
config = litellm.ClarifaiConfig.get_config()
|
||||
for k, v in config.items():
|
||||
if (
|
||||
k not in optional_params
|
||||
):
|
||||
optional_params[k] = v
|
||||
|
||||
custom_llm_provider, orig_model_name = get_prompt_model_name(model)
|
||||
if custom_llm_provider == "anthropic":
|
||||
prompt = prompt_factory(
|
||||
model=orig_model_name,
|
||||
messages=messages,
|
||||
api_key=api_key,
|
||||
custom_llm_provider="clarifai"
|
||||
)
|
||||
else:
|
||||
prompt = prompt_factory(
|
||||
model=orig_model_name,
|
||||
messages=messages,
|
||||
api_key=api_key,
|
||||
custom_llm_provider=custom_llm_provider
|
||||
)
|
||||
# print(prompt); exit(0)
|
||||
|
||||
data = {
|
||||
"prompt": prompt,
|
||||
**optional_params,
|
||||
}
|
||||
data = completions_to_model(data)
|
||||
|
||||
|
||||
## LOGGING
|
||||
logging_obj.pre_call(
|
||||
input=prompt,
|
||||
api_key=api_key,
|
||||
additional_args={
|
||||
"complete_input_dict": data,
|
||||
"headers": headers,
|
||||
"api_base": api_base,
|
||||
},
|
||||
)
|
||||
|
||||
## COMPLETION CALL
|
||||
response = requests.post(
|
||||
model,
|
||||
headers=headers,
|
||||
data=json.dumps(data),
|
||||
)
|
||||
# print(response.content); exit()
|
||||
"""
|
||||
{"status":{"code":10000,"description":"Ok","req_id":"d914cf7e097487997910650cde954a37"},"outputs":[{"id":"c2baa668174b4547bd4d2e9f8996198d","status":{"code":10000,"description":"Ok"},"created_at":"2024-02-07T10:57:52.917990493Z","model":{"id":"GPT-4","name":"GPT-4","created_at":"2023-06-08T17:40:07.964967Z","modified_at":"2023-12-04T11:39:54.587604Z","app_id":"chat-completion","model_version":{"id":"5d7a50b44aec4a01a9c492c5a5fcf387","created_at":"2023-11-09T19:57:56.961259Z","status":{"code":21100,"description":"Model is trained and ready"},"completed_at":"2023-11-09T20:00:48.933172Z","visibility":{"gettable":50},"app_id":"chat-completion","user_id":"openai","metadata":{}},"user_id":"openai","model_type_id":"text-to-text","visibility":{"gettable":50},"toolkits":[],"use_cases":[],"languages":[],"languages_full":[],"check_consents":[],"workflow_recommended":false,"image":{"url":"https://data.clarifai.com/small/users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","hosted":{"prefix":"https://data.clarifai.com","suffix":"users/openai/apps/chat-completion/inputs/image/34326a9914d361bb93ae8e5381689755","sizes":["small"],"crossorigin":"use-credentials"}}},"input":{"id":"fba1f22a332743f083ddae0a7eb443ae","data":{"text":{"raw":"what\'s the weather in SF","url":"https://samples.clarifai.com/placeholder.gif"}}},"data":{"text":{"raw":"As an AI, I\'m unable to provide real-time information or updates. Please check a reliable weather website or app for the current weather in San Francisco.","text_info":{"encoding":"UnknownTextEnc"}}}}]}
|
||||
"""
|
||||
if response.status_code != 200:
|
||||
raise ClarifaiError(status_code=response.status_code, message=response.text, url=model)
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
return response.iter_lines()
|
||||
else:
|
||||
logging_obj.post_call(
|
||||
input=prompt,
|
||||
api_key=api_key,
|
||||
original_response=response.text,
|
||||
additional_args={"complete_input_dict": data},
|
||||
)
|
||||
## RESPONSE OBJECT
|
||||
completion_response = response.json()
|
||||
# print(completion_response)
|
||||
try:
|
||||
choices_list = []
|
||||
for idx, item in enumerate(completion_response["outputs"]):
|
||||
if len(item["data"]["text"]["raw"]) > 0:
|
||||
message_obj = Message(content=item["data"]["text"]["raw"])
|
||||
else:
|
||||
message_obj = Message(content=None)
|
||||
choice_obj = Choices(
|
||||
finish_reason="stop",
|
||||
index=idx + 1, #check
|
||||
message=message_obj,
|
||||
)
|
||||
choices_list.append(choice_obj)
|
||||
model_response["choices"] = choices_list
|
||||
except Exception as e:
|
||||
raise ClarifaiError(
|
||||
message=traceback.format_exc(), status_code=response.status_code, url=model
|
||||
)
|
||||
|
||||
# Calculate Usage
|
||||
prompt_tokens = len(encoding.encode(prompt))
|
||||
completion_tokens = len(
|
||||
encoding.encode(model_response["choices"][0]["message"].get("content"))
|
||||
)
|
||||
model_response["model"] = model
|
||||
model_response["usage"] = Usage(
|
||||
prompt_tokens=prompt_tokens,
|
||||
completion_tokens=completion_tokens,
|
||||
total_tokens=prompt_tokens + completion_tokens,
|
||||
)
|
||||
return model_response
|
|
@ -1306,6 +1306,9 @@ def prompt_factory(
|
|||
return anthropic_pt(messages=messages)
|
||||
elif "mistral." in model:
|
||||
return mistral_instruct_pt(messages=messages)
|
||||
elif custom_llm_provider == "clarifai":
|
||||
if "claude" in model:
|
||||
return anthropic_pt(messages=messages)
|
||||
elif custom_llm_provider == "perplexity":
|
||||
for message in messages:
|
||||
message.pop("name", None)
|
||||
|
|
|
@ -53,6 +53,7 @@ from .llms import (
|
|||
ollama,
|
||||
ollama_chat,
|
||||
cloudflare,
|
||||
clarifai,
|
||||
cohere,
|
||||
cohere_chat,
|
||||
petals,
|
||||
|
@ -1150,6 +1151,55 @@ def completion(
|
|||
)
|
||||
|
||||
response = model_response
|
||||
elif ("clarifai" in model
|
||||
or custom_llm_provider == "clarifai"
|
||||
or model in litellm.clarifai_models
|
||||
):
|
||||
clarifai_key = None
|
||||
clarifai_key = (
|
||||
api_key
|
||||
or litellm.clarifai_key
|
||||
or litellm.api_key
|
||||
or get_secret("CLARIFAI_API_KEY")
|
||||
or get_secret("CLARIFAI_API_TOKEN")
|
||||
)
|
||||
|
||||
api_base = (
|
||||
api_base
|
||||
or litellm.api_base
|
||||
or get_secret("CLARIFAI_API_BASE")
|
||||
or "https://api.clarifai.com/v2"
|
||||
)
|
||||
|
||||
custom_prompt_dict = custom_prompt_dict or litellm.custom_prompt_dict
|
||||
model_response = clarifai.completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
api_base=api_base,
|
||||
model_response=model_response,
|
||||
print_verbose=print_verbose,
|
||||
optional_params=optional_params,
|
||||
litellm_params=litellm_params,
|
||||
logger_fn=logger_fn,
|
||||
encoding=encoding, # for calculating input/output tokens
|
||||
api_key=clarifai_key,
|
||||
logging_obj=logging,
|
||||
custom_prompt_dict=custom_prompt_dict,
|
||||
)
|
||||
|
||||
if "stream" in optional_params and optional_params["stream"] == True:
|
||||
# don't try to access stream object,
|
||||
|
||||
model_response = CustomStreamWrapper(model_response, model, logging_obj=logging, custom_llm_provider="replicate")
|
||||
|
||||
if optional_params.get("stream", False) or acompletion == True:
|
||||
## LOGGING
|
||||
logging.post_call(
|
||||
input=messages,
|
||||
api_key=clarifai_key,
|
||||
original_response=model_response,
|
||||
)
|
||||
response = model_response
|
||||
|
||||
elif custom_llm_provider == "anthropic":
|
||||
api_key = (
|
||||
|
|
67
litellm/tests/test_clarifai_completion.py
Normal file
67
litellm/tests/test_clarifai_completion.py
Normal file
|
@ -0,0 +1,67 @@
|
|||
import sys, os
|
||||
import traceback
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import os, io
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
) # Adds the parent directory to the system path
|
||||
import pytest
|
||||
import litellm
|
||||
from litellm import embedding, completion, completion_cost, Timeout, ModelResponse
|
||||
from litellm import RateLimitError
|
||||
|
||||
# litellm.num_retries = 3
|
||||
litellm.cache = None
|
||||
litellm.success_callback = []
|
||||
user_message = "Write a short poem about the sky"
|
||||
messages = [{"content": user_message, "role": "user"}]
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def reset_callbacks():
|
||||
print("\npytest fixture - resetting callbacks")
|
||||
litellm.success_callback = []
|
||||
litellm._async_success_callback = []
|
||||
litellm.failure_callback = []
|
||||
litellm.callbacks = []
|
||||
|
||||
def test_completion_clarifai_claude_2_1():
|
||||
print("calling clarifai claude completion")
|
||||
import os
|
||||
|
||||
clarifai_pat = os.environ["CLARIFAI_API_KEY"]
|
||||
|
||||
try:
|
||||
response = completion(
|
||||
model="clarifai/anthropic.completion.claude-2_1",
|
||||
messages=messages,
|
||||
max_tokens=10,
|
||||
temperature=0.1,
|
||||
)
|
||||
print(response)
|
||||
|
||||
except RateLimitError:
|
||||
pass
|
||||
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occured: {e}")
|
||||
|
||||
|
||||
def test_completion_clarifai_mistral_large():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
response: ModelResponse = completion(
|
||||
model="clarifai/mistralai.completion.mistral-small",
|
||||
messages=messages,
|
||||
max_tokens=10,
|
||||
temperature=0.78,
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
assert len(response.choices) > 0
|
||||
assert len(response.choices[0].message.content) > 0
|
||||
except RateLimitError:
|
||||
pass
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
Loading…
Add table
Add a link
Reference in a new issue