forked from phoenix/litellm-mirror
fix supported LLM on docs
This commit is contained in:
parent
37d743fb2b
commit
0cc561bc5d
8 changed files with 11 additions and 11 deletions
2
cookbook/LiteLLM_AB_TestLLMs.ipynb
vendored
2
cookbook/LiteLLM_AB_TestLLMs.ipynb
vendored
|
@ -50,7 +50,7 @@
|
||||||
"\n",
|
"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# define a dict of model id and % of requests for model\n",
|
"# define a dict of model id and % of requests for model\n",
|
||||||
"# see models here: https://docs.litellm.ai/docs/completion/supported\n",
|
"# see models here: https://docs.litellm.ai/docs/providers\n",
|
||||||
"split_per_model = {\n",
|
"split_per_model = {\n",
|
||||||
"\t\"gpt-4\": 0.3,\n",
|
"\t\"gpt-4\": 0.3,\n",
|
||||||
"\t\"gpt-3.5-turbo\": 0.7\n",
|
"\t\"gpt-3.5-turbo\": 0.7\n",
|
||||||
|
|
2
cookbook/LiteLLM_Comparing_LLMs.ipynb
vendored
2
cookbook/LiteLLM_Comparing_LLMs.ipynb
vendored
|
@ -100,7 +100,7 @@
|
||||||
"source": [
|
"source": [
|
||||||
"results = [] # for storing results\n",
|
"results = [] # for storing results\n",
|
||||||
"\n",
|
"\n",
|
||||||
"models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/completion/supported\n",
|
"models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/providers\n",
|
||||||
"for question in questions:\n",
|
"for question in questions:\n",
|
||||||
" row = [question]\n",
|
" row = [question]\n",
|
||||||
" for model in models:\n",
|
" for model in models:\n",
|
||||||
|
|
|
@ -22,7 +22,7 @@
|
||||||
</h4>
|
</h4>
|
||||||
|
|
||||||
<h4 align="center">
|
<h4 align="center">
|
||||||
<a href="https://docs.litellm.ai/docs/completion/supported" target="_blank">100+ Supported Models</a> |
|
<a href="https://docs.litellm.ai/docs/providers" target="_blank">100+ Supported Models</a> |
|
||||||
<a href="https://docs.litellm.ai/docs/" target="_blank">Docs</a> |
|
<a href="https://docs.litellm.ai/docs/" target="_blank">Docs</a> |
|
||||||
<a href="https://litellm.ai/playground" target="_blank">Demo Website</a>
|
<a href="https://litellm.ai/playground" target="_blank">Demo Website</a>
|
||||||
</h4>
|
</h4>
|
||||||
|
@ -101,7 +101,7 @@ python3 main.py
|
||||||
### Set your LLM Configs
|
### Set your LLM Configs
|
||||||
Set your LLMs and LLM weights you want to run A/B testing with
|
Set your LLMs and LLM weights you want to run A/B testing with
|
||||||
In main.py set your selected LLMs you want to AB test in `llm_dict`
|
In main.py set your selected LLMs you want to AB test in `llm_dict`
|
||||||
You can A/B test more than 100+ LLMs using LiteLLM https://docs.litellm.ai/docs/completion/supported
|
You can A/B test more than 100+ LLMs using LiteLLM https://docs.litellm.ai/docs/providers
|
||||||
```python
|
```python
|
||||||
llm_dict = {
|
llm_dict = {
|
||||||
"gpt-4": 0.2,
|
"gpt-4": 0.2,
|
||||||
|
@ -114,7 +114,7 @@ llm_dict = {
|
||||||
#### Setting your API Keys
|
#### Setting your API Keys
|
||||||
Set your LLM API keys in a .env file in the directory or set them as `os.environ` variables.
|
Set your LLM API keys in a .env file in the directory or set them as `os.environ` variables.
|
||||||
|
|
||||||
See https://docs.litellm.ai/docs/completion/supported for the format of API keys
|
See https://docs.litellm.ai/docs/providers for the format of API keys
|
||||||
|
|
||||||
LiteLLM generalizes api keys to follow the following format
|
LiteLLM generalizes api keys to follow the following format
|
||||||
`PROVIDER_API_KEY`
|
`PROVIDER_API_KEY`
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
---
|
---
|
||||||
displayed_sidebar: tutorialSidebar
|
displayed_sidebar: tutorialSidebar
|
||||||
---
|
---
|
||||||
# litellm
|
# Litellm
|
||||||
|
|
||||||
import QueryParamReader from '../src/components/queryParamReader.js'
|
import QueryParamReader from '../src/components/queryParamReader.js'
|
||||||
|
|
||||||
|
@ -21,7 +21,7 @@ a light package to simplify calling OpenAI, Azure, Cohere, Anthropic, Huggingfac
|
||||||
|
|
||||||
# usage
|
# usage
|
||||||
|
|
||||||
<a href='https://docs.litellm.ai/docs/completion/supported' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
|
<a href='https://docs.litellm.ai/docs/providers' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
|
||||||
|
|
||||||
Demo - https://litellm.ai/playground \
|
Demo - https://litellm.ai/playground \
|
||||||
Read the docs - https://docs.litellm.ai/docs/
|
Read the docs - https://docs.litellm.ai/docs/
|
||||||
|
|
|
@ -10,7 +10,7 @@ In this tutorial, we'll walk through A/B testing between GPT-4 and Llama2 in pro
|
||||||
|
|
||||||
|
|
||||||
* [Deploying models on Huggingface](https://huggingface.co/docs/inference-endpoints/guides/create_endpoint)
|
* [Deploying models on Huggingface](https://huggingface.co/docs/inference-endpoints/guides/create_endpoint)
|
||||||
* [All supported providers on LiteLLM](https://docs.litellm.ai/docs/completion/supported)
|
* [All supported providers on LiteLLM](https://docs.litellm.ai/docs/providers)
|
||||||
|
|
||||||
# Code Walkthrough
|
# Code Walkthrough
|
||||||
|
|
||||||
|
|
|
@ -76,7 +76,7 @@ os.environ['ANTHROPIC_API_KEY'] = ""
|
||||||
``` python
|
``` python
|
||||||
results = [] # for storing results
|
results = [] # for storing results
|
||||||
|
|
||||||
models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/completion/supported
|
models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/providers
|
||||||
for question in questions:
|
for question in questions:
|
||||||
row = [question]
|
row = [question]
|
||||||
for model in models:
|
for model in models:
|
||||||
|
|
|
@ -16,7 +16,7 @@ a light package to simplify calling OpenAI, Azure, Cohere, Anthropic, Huggingfac
|
||||||
- guarantees [consistent output](https://litellm.readthedocs.io/en/latest/output/), text responses will always be available at `['choices'][0]['message']['content']`
|
- guarantees [consistent output](https://litellm.readthedocs.io/en/latest/output/), text responses will always be available at `['choices'][0]['message']['content']`
|
||||||
- exception mapping - common exceptions across providers are mapped to the [OpenAI exception types](https://help.openai.com/en/articles/6897213-openai-library-error-types-guidance)
|
- exception mapping - common exceptions across providers are mapped to the [OpenAI exception types](https://help.openai.com/en/articles/6897213-openai-library-error-types-guidance)
|
||||||
# usage
|
# usage
|
||||||
<a href='https://docs.litellm.ai/docs/completion/supported' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
|
<a href='https://docs.litellm.ai/docs/providers' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
|
||||||
|
|
||||||
Demo - https://litellm.ai/playground \
|
Demo - https://litellm.ai/playground \
|
||||||
Read the docs - https://docs.litellm.ai/docs/
|
Read the docs - https://docs.litellm.ai/docs/
|
||||||
|
|
|
@ -144,7 +144,7 @@ huggingface_models = [
|
||||||
"meta-llama/Llama-2-13b-chat",
|
"meta-llama/Llama-2-13b-chat",
|
||||||
"meta-llama/Llama-2-70b",
|
"meta-llama/Llama-2-70b",
|
||||||
"meta-llama/Llama-2-70b-chat",
|
"meta-llama/Llama-2-70b-chat",
|
||||||
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/completion/supported
|
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers
|
||||||
|
|
||||||
ai21_models = ["j2-ultra", "j2-mid", "j2-light"]
|
ai21_models = ["j2-ultra", "j2-mid", "j2-light"]
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue