diff --git a/cookbook/LiteLLM_AB_TestLLMs.ipynb b/cookbook/LiteLLM_AB_TestLLMs.ipynb index 211c255ae..690427be8 100644 --- a/cookbook/LiteLLM_AB_TestLLMs.ipynb +++ b/cookbook/LiteLLM_AB_TestLLMs.ipynb @@ -50,7 +50,7 @@ "\n", "\n", "# define a dict of model id and % of requests for model\n", - "# see models here: https://docs.litellm.ai/docs/completion/supported\n", + "# see models here: https://docs.litellm.ai/docs/providers\n", "split_per_model = {\n", "\t\"gpt-4\": 0.3,\n", "\t\"gpt-3.5-turbo\": 0.7\n", diff --git a/cookbook/LiteLLM_Comparing_LLMs.ipynb b/cookbook/LiteLLM_Comparing_LLMs.ipynb index b35369d82..7f5ce809b 100644 --- a/cookbook/LiteLLM_Comparing_LLMs.ipynb +++ b/cookbook/LiteLLM_Comparing_LLMs.ipynb @@ -100,7 +100,7 @@ "source": [ "results = [] # for storing results\n", "\n", - "models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/completion/supported\n", + "models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/providers\n", "for question in questions:\n", " row = [question]\n", " for model in models:\n", diff --git a/cookbook/llm-ab-test-server/readme.md b/cookbook/llm-ab-test-server/readme.md index 846cbde0c..763912a19 100644 --- a/cookbook/llm-ab-test-server/readme.md +++ b/cookbook/llm-ab-test-server/readme.md @@ -22,7 +22,7 @@

- 100+ Supported Models | + 100+ Supported Models | Docs | Demo Website

@@ -101,7 +101,7 @@ python3 main.py ### Set your LLM Configs Set your LLMs and LLM weights you want to run A/B testing with In main.py set your selected LLMs you want to AB test in `llm_dict` -You can A/B test more than 100+ LLMs using LiteLLM https://docs.litellm.ai/docs/completion/supported +You can A/B test more than 100+ LLMs using LiteLLM https://docs.litellm.ai/docs/providers ```python llm_dict = { "gpt-4": 0.2, @@ -114,7 +114,7 @@ llm_dict = { #### Setting your API Keys Set your LLM API keys in a .env file in the directory or set them as `os.environ` variables. -See https://docs.litellm.ai/docs/completion/supported for the format of API keys +See https://docs.litellm.ai/docs/providers for the format of API keys LiteLLM generalizes api keys to follow the following format `PROVIDER_API_KEY` diff --git a/docs/my-website/docs/index.md b/docs/my-website/docs/index.md index 44faddde8..7f16fc1ac 100644 --- a/docs/my-website/docs/index.md +++ b/docs/my-website/docs/index.md @@ -1,7 +1,7 @@ --- displayed_sidebar: tutorialSidebar --- -# litellm +# Litellm import QueryParamReader from '../src/components/queryParamReader.js' @@ -21,7 +21,7 @@ a light package to simplify calling OpenAI, Azure, Cohere, Anthropic, Huggingfac # usage -None +None Demo - https://litellm.ai/playground \ Read the docs - https://docs.litellm.ai/docs/ diff --git a/docs/my-website/docs/tutorials/ab_test_llms.md b/docs/my-website/docs/tutorials/ab_test_llms.md index 426f4bf4f..3d482c053 100644 --- a/docs/my-website/docs/tutorials/ab_test_llms.md +++ b/docs/my-website/docs/tutorials/ab_test_llms.md @@ -10,7 +10,7 @@ In this tutorial, we'll walk through A/B testing between GPT-4 and Llama2 in pro * [Deploying models on Huggingface](https://huggingface.co/docs/inference-endpoints/guides/create_endpoint) -* [All supported providers on LiteLLM](https://docs.litellm.ai/docs/completion/supported) +* [All supported providers on LiteLLM](https://docs.litellm.ai/docs/providers) # Code Walkthrough diff --git a/docs/my-website/docs/tutorials/compare_llms_2.md b/docs/my-website/docs/tutorials/compare_llms_2.md index c4c73b174..20aee6889 100644 --- a/docs/my-website/docs/tutorials/compare_llms_2.md +++ b/docs/my-website/docs/tutorials/compare_llms_2.md @@ -76,7 +76,7 @@ os.environ['ANTHROPIC_API_KEY'] = "" ``` python results = [] # for storing results -models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/completion/supported +models = ['gpt-3.5-turbo', 'claude-2'] # define what models you're testing, see: https://docs.litellm.ai/docs/providers for question in questions: row = [question] for model in models: diff --git a/docs/my-website/src/pages/index.md b/docs/my-website/src/pages/index.md index e64cdab1e..653dedc9c 100644 --- a/docs/my-website/src/pages/index.md +++ b/docs/my-website/src/pages/index.md @@ -16,7 +16,7 @@ a light package to simplify calling OpenAI, Azure, Cohere, Anthropic, Huggingfac - guarantees [consistent output](https://litellm.readthedocs.io/en/latest/output/), text responses will always be available at `['choices'][0]['message']['content']` - exception mapping - common exceptions across providers are mapped to the [OpenAI exception types](https://help.openai.com/en/articles/6897213-openai-library-error-types-guidance) # usage -None +None Demo - https://litellm.ai/playground \ Read the docs - https://docs.litellm.ai/docs/ diff --git a/litellm/__init__.py b/litellm/__init__.py index f3a117ed5..f36dceecd 100644 --- a/litellm/__init__.py +++ b/litellm/__init__.py @@ -144,7 +144,7 @@ huggingface_models = [ "meta-llama/Llama-2-13b-chat", "meta-llama/Llama-2-70b", "meta-llama/Llama-2-70b-chat", -] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/completion/supported +] # these have been tested on extensively. But by default all text2text-generation and text-generation models are supported by liteLLM. - https://docs.litellm.ai/docs/providers ai21_models = ["j2-ultra", "j2-mid", "j2-light"]