fix supported LLM on docs

This commit is contained in:
ishaan-jaff 2023-09-08 15:07:13 -07:00
parent 37d743fb2b
commit 0cc561bc5d
8 changed files with 11 additions and 11 deletions

View file

@ -50,7 +50,7 @@
"\n",
"\n",
"# define a dict of model id and % of requests for model\n",
"# see models here: https://docs.litellm.ai/docs/completion/supported\n",
"# see models here: https://docs.litellm.ai/docs/providers\n",
"split_per_model = {\n",
"\t\"gpt-4\": 0.3,\n",
"\t\"gpt-3.5-turbo\": 0.7\n",

View file

@ -100,7 +100,7 @@
"source": [
"results = [] # for storing results\n",
"\n",
"models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/completion/supported\n",
"models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/providers\n",
"for question in questions:\n",
" row = [question]\n",
" for model in models:\n",

View file

@ -22,7 +22,7 @@
</h4>
<h4 align="center">
<a href="https://docs.litellm.ai/docs/completion/supported" target="_blank">100+ Supported Models</a> |
<a href="https://docs.litellm.ai/docs/providers" target="_blank">100+ Supported Models</a> |
<a href="https://docs.litellm.ai/docs/" target="_blank">Docs</a> |
<a href="https://litellm.ai/playground" target="_blank">Demo Website</a>
</h4>
@ -101,7 +101,7 @@ python3 main.py
### Set your LLM Configs
Set your LLMs and LLM weights you want to run A/B testing with
In main.py set your selected LLMs you want to AB test in `llm_dict`
You can A/B test more than 100+ LLMs using LiteLLM https://docs.litellm.ai/docs/completion/supported
You can A/B test more than 100+ LLMs using LiteLLM https://docs.litellm.ai/docs/providers
```python
llm_dict = {
"gpt-4": 0.2,
@ -114,7 +114,7 @@ llm_dict = {
#### Setting your API Keys
Set your LLM API keys in a .env file in the directory or set them as `os.environ` variables.
See https://docs.litellm.ai/docs/completion/supported for the format of API keys
See https://docs.litellm.ai/docs/providers for the format of API keys
LiteLLM generalizes api keys to follow the following format
`PROVIDER_API_KEY`

View file

@ -1,7 +1,7 @@
---
displayed_sidebar: tutorialSidebar
---
# litellm
# Litellm
import QueryParamReader from '../src/components/queryParamReader.js'
@ -21,7 +21,7 @@ a light package to simplify calling OpenAI, Azure, Cohere, Anthropic, Huggingfac
# usage
<a href='https://docs.litellm.ai/docs/completion/supported' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
<a href='https://docs.litellm.ai/docs/providers' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
Demo - https://litellm.ai/playground \
Read the docs - https://docs.litellm.ai/docs/

View file

@ -10,7 +10,7 @@ In this tutorial, we'll walk through A/B testing between GPT-4 and Llama2 in pro
* [Deploying models on Huggingface](https://huggingface.co/docs/inference-endpoints/guides/create_endpoint)
* [All supported providers on LiteLLM](https://docs.litellm.ai/docs/completion/supported)
* [All supported providers on LiteLLM](https://docs.litellm.ai/docs/providers)
# Code Walkthrough

View file

@ -76,7 +76,7 @@ os.environ['ANTHROPIC_API_KEY'] = ""
``` python
results = [] # for storing results
models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/completion/supported
models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/providers
for question in questions:
row = [question]
for model in models:

View file

@ -16,7 +16,7 @@ a light package to simplify calling OpenAI, Azure, Cohere, Anthropic, Huggingfac
- guarantees [consistent output](https://litellm.readthedocs.io/en/latest/output/), text responses will always be available at `['choices'][0]['message']['content']`
- exception mapping - common exceptions across providers are mapped to the [OpenAI exception types](https://help.openai.com/en/articles/6897213-openai-library-error-types-guidance)
# usage
<a href='https://docs.litellm.ai/docs/completion/supported' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
<a href='https://docs.litellm.ai/docs/providers' target="_blank"><img alt='None' src='https://img.shields.io/badge/Supported_LLMs-100000?style=for-the-badge&logo=None&logoColor=000000&labelColor=000000&color=8400EA'/></a>
Demo - https://litellm.ai/playground \
Read the docs - https://docs.litellm.ai/docs/

View file

@ -144,7 +144,7 @@ huggingface_models = [
"meta-llama/Llama-2-13b-chat",
"meta-llama/Llama-2-70b",
"meta-llama/Llama-2-70b-chat",
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/completion/supported
] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers
ai21_models = ["j2-ultra", "j2-mid", "j2-light"]