Merge pull request #3955 from liuyl/empower-functions-v1

Add empower-functions integration to litellm
This commit is contained in:
Krish Dholakia 2024-07-09 12:09:55 -07:00 committed by GitHub
commit aa2fd29e48
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 142 additions and 6 deletions

View file

@ -238,6 +238,7 @@ curl 'http://0.0.0.0:4000/key/generate' \
| [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ | | | | [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ | | |
| [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ | | | [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ | | | | [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ | | |
| [empower](https://docs.litellm.ai/docs/providers/empower) | ✅ | ✅ | ✅ | ✅ |
| [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ | | | [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ | |
| [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ | | | | [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ | | |
| [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ | | | | [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ | | |

View file

@ -0,0 +1,89 @@
# Empower
LiteLLM supports all models on Empower.
## API Keys
```python
import os
os.environ["EMPOWER_API_KEY"] = "your-api-key"
```
## Example Usage
```python
from litellm import completion
import os
os.environ["EMPOWER_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
response = completion(model="empower/empower-functions", messages=messages)
print(response)
```
## Example Usage - Streaming
```python
from litellm import completion
import os
os.environ["EMPOWER_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
response = completion(model="empower/empower-functions", messages=messages, streaming=True)
for chunk in response:
print(chunk['choices'][0]['delta'])
```
## Example Usage - Automatic Tool Calling
```python
from litellm import completion
import os
os.environ["EMPOWER_API_KEY"] = "your-api-key"
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
response = completion(
model="empower/empower-functions-small",
messages=messages,
tools=tools,
tool_choice="auto", # auto is default, but we'll be explicit
)
print("\nLLM Response:\n", response)
```
## Empower Models
liteLLM supports `non-streaming` and `streaming` requests to all models on https://empower.dev/
Example Empower Usage - Note: liteLLM supports all models deployed on Empower
### Empower LLMs - Automatic Tool Using models
| Model Name | Function Call | Required OS Variables |
|-----------------------------------|------------------------------------------------------------------------|---------------------------------|
| empower/empower-functions | `completion('empower/empower-functions', messages)` | `os.environ['TOGETHERAI_API_KEY']` |
| empower/empower-functions-small | `completion('empower/empower-functions-small', messages)` | `os.environ['TOGETHERAI_API_KEY']` |

View file

@ -334,6 +334,7 @@ cohere_models: List = []
cohere_chat_models: List = [] cohere_chat_models: List = []
mistral_chat_models: List = [] mistral_chat_models: List = []
anthropic_models: List = [] anthropic_models: List = []
empower_models: List = []
openrouter_models: List = [] openrouter_models: List = []
vertex_language_models: List = [] vertex_language_models: List = []
vertex_vision_models: List = [] vertex_vision_models: List = []
@ -364,6 +365,8 @@ for key, value in model_cost.items():
mistral_chat_models.append(key) mistral_chat_models.append(key)
elif value.get("litellm_provider") == "anthropic": elif value.get("litellm_provider") == "anthropic":
anthropic_models.append(key) anthropic_models.append(key)
elif value.get("litellm_provider") == "empower":
empower_models.append(key)
elif value.get("litellm_provider") == "openrouter": elif value.get("litellm_provider") == "openrouter":
openrouter_models.append(key) openrouter_models.append(key)
elif value.get("litellm_provider") == "vertex_ai-text-models": elif value.get("litellm_provider") == "vertex_ai-text-models":
@ -411,6 +414,7 @@ openai_compatible_endpoints: List = [
"https://integrate.api.nvidia.com/v1", "https://integrate.api.nvidia.com/v1",
"api.deepseek.com/v1", "api.deepseek.com/v1",
"api.together.xyz/v1", "api.together.xyz/v1",
"app.empower.dev/api/v1",
"inference.friendli.ai/v1", "inference.friendli.ai/v1",
] ]
@ -428,6 +432,7 @@ openai_compatible_providers: List = [
"xinference", "xinference",
"together_ai", "together_ai",
"fireworks_ai", "fireworks_ai",
"empower",
"friendliai", "friendliai",
"azure_ai", "azure_ai",
] ]
@ -530,6 +535,10 @@ huggingface_models: List = [
"meta-llama/Llama-2-70b", "meta-llama/Llama-2-70b",
"meta-llama/Llama-2-70b-chat", "meta-llama/Llama-2-70b-chat",
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers ] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers
empower_models = [
"empower/empower-functions",
"empower/empower-functions-small",
]
together_ai_models: List = [ together_ai_models: List = [
# llama llms - chat # llama llms - chat
@ -665,6 +674,7 @@ provider_list: List = [
"triton", "triton",
"predibase", "predibase",
"databricks", "databricks",
"empower",
"custom", # custom apis "custom", # custom apis
] ]

View file

@ -281,6 +281,34 @@ def test_completion_claude():
# test_completion_claude() # test_completion_claude()
def test_completion_empower():
litellm.set_verbose = True
messages = [
{
"role": "user",
"content": "\nWhat is the query for `console.log` => `console.error`\n",
},
{
"role": "assistant",
"content": "\nThis is the GritQL query for the given before/after examples:\n<gritql>\n`console.log` => `console.error`\n</gritql>\n",
},
{
"role": "user",
"content": "\nWhat is the query for `console.info` => `consdole.heaven`\n",
},
]
try:
# test without max tokens
response = completion(
model="empower/empower-functions-small",
messages=messages,
)
# Add any assertions, here to check response args
print(response)
except Exception as e:
pytest.fail(f"Error occurred: {e}")
def test_completion_claude_3_empty_response(): def test_completion_claude_3_empty_response():
litellm.set_verbose = True litellm.set_verbose = True

View file

@ -4262,6 +4262,9 @@ def get_llm_provider(
# deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1 # deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
api_base = "https://api.deepinfra.com/v1/openai" api_base = "https://api.deepinfra.com/v1/openai"
dynamic_api_key = get_secret("DEEPINFRA_API_KEY") dynamic_api_key = get_secret("DEEPINFRA_API_KEY")
elif custom_llm_provider == "empower":
api_base = "https://app.empower.dev/api/v1"
dynamic_api_key = get_secret("EMPOWER_API_KEY")
elif custom_llm_provider == "groq": elif custom_llm_provider == "groq":
# groq is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.groq.com/openai/v1 # groq is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.groq.com/openai/v1
api_base = "https://api.groq.com/openai/v1" api_base = "https://api.groq.com/openai/v1"
@ -4384,6 +4387,9 @@ def get_llm_provider(
elif endpoint == "https://codestral.mistral.ai/v1": elif endpoint == "https://codestral.mistral.ai/v1":
custom_llm_provider = "text-completion-codestral" custom_llm_provider = "text-completion-codestral"
dynamic_api_key = get_secret("CODESTRAL_API_KEY") dynamic_api_key = get_secret("CODESTRAL_API_KEY")
elif endpoint == "app.empower.dev/api/v1":
custom_llm_provider = "empower"
dynamic_api_key = get_secret("EMPOWER_API_KEY")
elif endpoint == "api.deepseek.com/v1": elif endpoint == "api.deepseek.com/v1":
custom_llm_provider = "deepseek" custom_llm_provider = "deepseek"
dynamic_api_key = get_secret("DEEPSEEK_API_KEY") dynamic_api_key = get_secret("DEEPSEEK_API_KEY")
@ -4480,6 +4486,8 @@ def get_llm_provider(
# openai embeddings # openai embeddings
elif model in litellm.open_ai_embedding_models: elif model in litellm.open_ai_embedding_models:
custom_llm_provider = "openai" custom_llm_provider = "openai"
elif model in litellm.empower_models:
custom_llm_provider = "empower"
if custom_llm_provider is None or custom_llm_provider == "": if custom_llm_provider is None or custom_llm_provider == "":
if litellm.suppress_debug_info == False: if litellm.suppress_debug_info == False:
print() # noqa print() # noqa