forked from phoenix/litellm-mirror
add document
This commit is contained in:
parent
668c894e9a
commit
6a004b9211
4 changed files with 111 additions and 2 deletions
|
@ -218,6 +218,7 @@ curl 'http://0.0.0.0:4000/key/generate' \
|
|||
| [cloudflare AI Workers](https://docs.litellm.ai/docs/providers/cloudflare_workers) | ✅ | ✅ | ✅ | ✅ |
|
||||
| [cohere](https://docs.litellm.ai/docs/providers/cohere) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| [anthropic](https://docs.litellm.ai/docs/providers/anthropic) | ✅ | ✅ | ✅ | ✅ |
|
||||
| [empower](https://docs.litellm.ai/docs/providers/empower) | ✅ | ✅ | ✅ | ✅ |
|
||||
| [huggingface](https://docs.litellm.ai/docs/providers/huggingface) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
| [replicate](https://docs.litellm.ai/docs/providers/replicate) | ✅ | ✅ | ✅ | ✅ |
|
||||
| [together_ai](https://docs.litellm.ai/docs/providers/togetherai) | ✅ | ✅ | ✅ | ✅ |
|
||||
|
|
89
docs/my-website/docs/providers/empower.md
Normal file
89
docs/my-website/docs/providers/empower.md
Normal file
|
@ -0,0 +1,89 @@
|
|||
# Empower
|
||||
LiteLLM supports all models on Empower.
|
||||
|
||||
## API Keys
|
||||
|
||||
```python
|
||||
import os
|
||||
os.environ["EMPOWER_API_KEY"] = "your-api-key"
|
||||
```
|
||||
## Example Usage
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
os.environ["EMPOWER_API_KEY"] = "your-api-key"
|
||||
|
||||
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
|
||||
|
||||
response = completion(model="empower/empower-functions", messages=messages)
|
||||
print(response)
|
||||
```
|
||||
|
||||
## Example Usage - Streaming
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
os.environ["EMPOWER_API_KEY"] = "your-api-key"
|
||||
|
||||
messages = [{"role": "user", "content": "Write me a poem about the blue sky"}]
|
||||
|
||||
response = completion(model="empower/empower-functions", messages=messages, streaming=True)
|
||||
for chunk in response:
|
||||
print(chunk['choices'][0]['delta'])
|
||||
|
||||
```
|
||||
|
||||
## Example Usage - Automatic Tool Calling
|
||||
|
||||
```python
|
||||
from litellm import completion
|
||||
import os
|
||||
|
||||
os.environ["EMPOWER_API_KEY"] = "your-api-key"
|
||||
|
||||
messages = [{"role": "user", "content": "What's the weather like in San Francisco, Tokyo, and Paris?"}]
|
||||
tools = [
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "get_current_weather",
|
||||
"description": "Get the current weather in a given location",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g. San Francisco, CA",
|
||||
},
|
||||
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
},
|
||||
}
|
||||
]
|
||||
|
||||
response = completion(
|
||||
model="empower/empower-functions-small",
|
||||
messages=messages,
|
||||
tools=tools,
|
||||
tool_choice="auto", # auto is default, but we'll be explicit
|
||||
)
|
||||
print("\nLLM Response:\n", response)
|
||||
```
|
||||
|
||||
## Empower Models
|
||||
liteLLM supports `non-streaming` and `streaming` requests to all models on https://empower.dev/
|
||||
|
||||
Example Empower Usage - Note: liteLLM supports all models deployed on Empower
|
||||
|
||||
|
||||
### Empower LLMs - Automatic Tool Using models
|
||||
| Model Name | Function Call | Required OS Variables |
|
||||
|-----------------------------------|------------------------------------------------------------------------|---------------------------------|
|
||||
| empower/empower-functions | `completion('empower/empower-functions', messages)` | `os.environ['TOGETHERAI_API_KEY']` |
|
||||
| empower/empower-functions-small | `completion('empower/empower-functions-small', messages)` | `os.environ['TOGETHERAI_API_KEY']` |
|
||||
|
|
@ -313,6 +313,7 @@ cohere_models: List = []
|
|||
cohere_chat_models: List = []
|
||||
mistral_chat_models: List = []
|
||||
anthropic_models: List = []
|
||||
empower_models: List = []
|
||||
openrouter_models: List = []
|
||||
vertex_language_models: List = []
|
||||
vertex_vision_models: List = []
|
||||
|
@ -342,6 +343,8 @@ for key, value in model_cost.items():
|
|||
mistral_chat_models.append(key)
|
||||
elif value.get("litellm_provider") == "anthropic":
|
||||
anthropic_models.append(key)
|
||||
elif value.get("litellm_provider") == "empower":
|
||||
empower_models.append(key)
|
||||
elif value.get("litellm_provider") == "openrouter":
|
||||
openrouter_models.append(key)
|
||||
elif value.get("litellm_provider") == "vertex_ai-text-models":
|
||||
|
@ -385,6 +388,7 @@ openai_compatible_endpoints: List = [
|
|||
"api.groq.com/openai/v1",
|
||||
"api.deepseek.com/v1",
|
||||
"api.together.xyz/v1",
|
||||
"app.empower.dev/api/v1"
|
||||
]
|
||||
|
||||
# this is maintained for Exception Mapping
|
||||
|
@ -398,6 +402,7 @@ openai_compatible_providers: List = [
|
|||
"xinference",
|
||||
"together_ai",
|
||||
"fireworks_ai",
|
||||
"empower"
|
||||
]
|
||||
|
||||
|
||||
|
@ -498,6 +503,10 @@ huggingface_models: List = [
|
|||
"meta-llama/Llama-2-70b",
|
||||
"meta-llama/Llama-2-70b-chat",
|
||||
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers
|
||||
empower_models = [
|
||||
"empower/empower-functions",
|
||||
"empower/empower-functions-small",
|
||||
]
|
||||
|
||||
together_ai_models: List = [
|
||||
# llama llms - chat
|
||||
|
@ -625,6 +634,7 @@ provider_list: List = [
|
|||
"triton",
|
||||
"predibase",
|
||||
"databricks",
|
||||
"empower",
|
||||
"custom", # custom apis
|
||||
]
|
||||
|
||||
|
|
|
@ -4873,6 +4873,7 @@ def register_model(model_cost: Union[str, dict]):
|
|||
litellm.model_cost.setdefault(key, {}).update(value)
|
||||
verbose_logger.debug(f"{key} added to model cost map")
|
||||
# add new model names to provider lists
|
||||
print(f"provider: {value.get('litellm_provider')}")
|
||||
if value.get("litellm_provider") == "openai":
|
||||
if key not in litellm.open_ai_chat_completion_models:
|
||||
litellm.open_ai_chat_completion_models.append(key)
|
||||
|
@ -6732,6 +6733,9 @@ def get_llm_provider(
|
|||
# deepinfra is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.endpoints.anyscale.com/v1
|
||||
api_base = "https://api.deepinfra.com/v1/openai"
|
||||
dynamic_api_key = get_secret("DEEPINFRA_API_KEY")
|
||||
elif custom_llm_provider == "empower":
|
||||
api_base = "https://app.empower.dev/api/v1"
|
||||
dynamic_api_key = get_secret("EMPOWER_API_KEY")
|
||||
elif custom_llm_provider == "groq":
|
||||
# groq is openai compatible, we just need to set this to custom_openai and have the api_base be https://api.groq.com/openai/v1
|
||||
api_base = "https://api.groq.com/openai/v1"
|
||||
|
@ -6825,6 +6829,9 @@ def get_llm_provider(
|
|||
elif endpoint == "api.groq.com/openai/v1":
|
||||
custom_llm_provider = "groq"
|
||||
dynamic_api_key = get_secret("GROQ_API_KEY")
|
||||
elif endpoint == "app.empower.dev/api/v1":
|
||||
custom_llm_provider = "empower"
|
||||
dynamic_api_key = get_secret("EMPOWER_API_KEY")
|
||||
elif endpoint == "api.deepseek.com/v1":
|
||||
custom_llm_provider = "deepseek"
|
||||
dynamic_api_key = get_secret("DEEPSEEK_API_KEY")
|
||||
|
@ -6915,6 +6922,8 @@ def get_llm_provider(
|
|||
# openai embeddings
|
||||
elif model in litellm.open_ai_embedding_models:
|
||||
custom_llm_provider = "openai"
|
||||
elif model in litellm.empower_models:
|
||||
custom_llm_provider = "empower"
|
||||
if custom_llm_provider is None or custom_llm_provider == "":
|
||||
if litellm.suppress_debug_info == False:
|
||||
print() # noqa
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue