diff --git a/README.md b/README.md index b3cfea0f0..d155d2d4c 100644 --- a/README.md +++ b/README.md @@ -45,8 +45,13 @@ from litellm import completion import os ## set ENV variables +<<<<<<< HEAD os.environ["OPENAI_API_KEY"] = "your-openai-key" os.environ["COHERE_API_KEY"] = "your-cohere-key" +======= +os.environ["OPENAI_API_KEY"] = "your-api-key" +os.environ["COHERE_API_KEY"] = "your-api-key" +>>>>>>> 6a9d754 (remove api key from docs) messages = [{ "content": "Hello, how are you?","role": "user"}] diff --git a/docs/my-website/docs/completion/config.md b/docs/my-website/docs/completion/config.md index 4bfd003fd..44ff6e8a7 100644 --- a/docs/my-website/docs/completion/config.md +++ b/docs/my-website/docs/completion/config.md @@ -17,8 +17,8 @@ config = { } # set env var -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your anthropic key +os.environ["OPENAI_API_KEY"] = "your-api-key" +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" sample_text = "how does a court case get to the Supreme Court?" * 1000 diff --git a/docs/my-website/docs/completion/output.md b/docs/my-website/docs/completion/output.md index 4b56e7baa..eda86ac27 100644 --- a/docs/my-website/docs/completion/output.md +++ b/docs/my-website/docs/completion/output.md @@ -57,7 +57,7 @@ You can also access information like latency. ```python import litellm -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-5b46387675a944d2" +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" messages=[{"role": "user", "content": "Hey!"}] diff --git a/docs/my-website/docs/getting_started.md b/docs/my-website/docs/getting_started.md index 050a951e8..00d455e0c 100644 --- a/docs/my-website/docs/getting_started.md +++ b/docs/my-website/docs/getting_started.md @@ -12,8 +12,8 @@ By default we provide a free $10 community-key to try all providers supported on from litellm import completion ## set ENV variables -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key -os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key +os.environ["OPENAI_API_KEY"] = "your-api-key" +os.environ["COHERE_API_KEY"] = "your-api-key" messages = [{ "content": "Hello, how are you?","role": "user"}] diff --git a/docs/my-website/docs/index.md b/docs/my-website/docs/index.md index a0c519320..1a22b114e 100644 --- a/docs/my-website/docs/index.md +++ b/docs/my-website/docs/index.md @@ -23,7 +23,7 @@ from litellm import completion import os ## set ENV variables -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key +os.environ["OPENAI_API_KEY"] = "your-api-key" response = completion( model="gpt-3.5-turbo", @@ -39,7 +39,7 @@ from litellm import completion import os ## set ENV variables -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" response = completion( model="claude-2", @@ -133,7 +133,7 @@ from litellm import completion import os ## set ENV variables -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key +os.environ["OPENAI_API_KEY"] = "your-api-key" response = completion( model="gpt-3.5-turbo", @@ -150,7 +150,7 @@ from litellm import completion import os ## set ENV variables -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" response = completion( model="claude-2", @@ -262,7 +262,7 @@ Pass the completion response to `litellm.completion_cost(completion_response=res ```python from litellm import completion, completion_cost import os -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["OPENAI_API_KEY"] = "your-api-key" response = completion( model="gpt-3.5-turbo", @@ -289,7 +289,7 @@ import os from litellm import completion # use the LiteLLM API Key -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" messages = [{ "content": "Hello, how are you?","role": "user"}] @@ -304,7 +304,7 @@ import os from litellm import completion # use the LiteLLM API Key -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["OPENAI_API_KEY"] = "your-api-key" messages = [{ "content": "Hello, how are you?","role": "user"}] @@ -319,7 +319,7 @@ import os from litellm import completion # use the LiteLLM API Key -os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["TOGETHERAI_API_KEY"] = "your-api-key" messages = [{ "content": "Hello, how are you?","role": "user"}] @@ -335,7 +335,7 @@ import os from litellm import completion # use the LiteLLM API Key -os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["TOGETHERAI_API_KEY"] = "your-api-key" messages = [{ "content": "Hello, how are you?","role": "user"}] diff --git a/docs/my-website/docs/providers/ai21.md b/docs/my-website/docs/providers/ai21.md index db5c90b39..c0987b312 100644 --- a/docs/my-website/docs/providers/ai21.md +++ b/docs/my-website/docs/providers/ai21.md @@ -5,24 +5,18 @@ LiteLLM supports j2-light, j2-mid and j2-ultra from [AI21](https://www.ai21.com/ They're available to use without a waitlist. ### API KEYS -We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key. - ```python import os -os.environ["AI21_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your ai21 key +os.environ["AI21_API_KEY"] = "your-api-key" ``` -**Need a dedicated key?** -Email us @ krrish@berri.ai - -[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key) ### Sample Usage ```python from litellm import completion -# set env variable - [OPTIONAL] replace with your ai21 key -os.environ["AI21_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +# set env variable +os.environ["AI21_API_KEY"] = "your-api-key" messages = [{"role": "user", "content": "Write me a poem about the blue sky"}] diff --git a/docs/my-website/docs/providers/anthropic.md b/docs/my-website/docs/providers/anthropic.md index c6fee4539..2ae162cea 100644 --- a/docs/my-website/docs/providers/anthropic.md +++ b/docs/my-website/docs/providers/anthropic.md @@ -2,18 +2,12 @@ LiteLLM supports Claude-1, 1.2 and Claude-2. ## API Keys -We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key. ```python import os -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your anthropic key +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" ``` -**Need a dedicated key?** -Email us @ krrish@berri.ai - -## Supported Models for LiteLLM Key -These are the models that currently work with the "sk-litellm-.." keys. ## Sample Usage @@ -22,7 +16,7 @@ import os from litellm import completion # set env - [OPTIONAL] replace with your anthropic key -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" messages = [{"role": "user", "content": "Hey! how's it going?"}] response = completion(model="claude-instant-1", messages=messages) @@ -36,8 +30,8 @@ Just set `stream=True` when calling completion. import os from litellm import completion -# set env - [OPTIONAL] replace with your anthropic key -os.environ["ANTHROPIC_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +# set env +os.environ["ANTHROPIC_API_KEY"] = "your-api-key" messages = [{"role": "user", "content": "Hey! how's it going?"}] response = completion(model="claude-instant-1", messages=messages, stream=True) diff --git a/docs/my-website/docs/providers/nlp_cloud.md b/docs/my-website/docs/providers/nlp_cloud.md index 399707b45..3d74fb7e1 100644 --- a/docs/my-website/docs/providers/nlp_cloud.md +++ b/docs/my-website/docs/providers/nlp_cloud.md @@ -3,17 +3,12 @@ LiteLLM supports all LLMs on NLP Cloud. ## API Keys -We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key. ```python import os -os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your nlp cloud key +os.environ["NLP_CLOUD_API_KEY"] = "your-api-key" ``` -**Need a dedicated key?** -Email us @ krrish@berri.ai - -[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key) ## Sample Usage @@ -21,8 +16,8 @@ Email us @ krrish@berri.ai import os from litellm import completion -# set env - [OPTIONAL] replace with your nlp cloud key -os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +# set env +os.environ["NLP_CLOUD_API_KEY"] = "your-api-key" messages = [{"role": "user", "content": "Hey! how's it going?"}] response = completion(model="dolphin", messages=messages) @@ -36,8 +31,8 @@ Just set `stream=True` when calling completion. import os from litellm import completion -# set env - [OPTIONAL] replace with your nlp cloud key -os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +# set env +os.environ["NLP_CLOUD_API_KEY"] = "your-api-key" messages = [{"role": "user", "content": "Hey! how's it going?"}] response = completion(model="dolphin", messages=messages, stream=True) @@ -57,7 +52,7 @@ import os from litellm import completion # set env - [OPTIONAL] replace with your nlp cloud key -os.environ["NLP_CLOUD_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["NLP_CLOUD_API_KEY"] = "your-api-key" messages = [{"role": "user", "content": "Hey! how's it going?"}] diff --git a/docs/my-website/docs/providers/openai.md b/docs/my-website/docs/providers/openai.md index d2db29871..634f9dbbb 100644 --- a/docs/my-website/docs/providers/openai.md +++ b/docs/my-website/docs/providers/openai.md @@ -2,12 +2,11 @@ LiteLLM supports OpenAI Chat + Text completion and embedding calls. ### API Keys -We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key. ```python import os -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key +os.environ["OPENAI_API_KEY"] = "your-api-key" ``` **Need a dedicated key?** Email us @ krrish@berri.ai @@ -19,7 +18,7 @@ Email us @ krrish@berri.ai import os from litellm import completion -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key +os.environ["OPENAI_API_KEY"] = "your-api-key" messages = [{ "content": "Hello, how are you?","role": "user"}] diff --git a/docs/my-website/docs/providers/togetherai.md b/docs/my-website/docs/providers/togetherai.md index d8275f061..d718619f0 100644 --- a/docs/my-website/docs/providers/togetherai.md +++ b/docs/my-website/docs/providers/togetherai.md @@ -2,26 +2,17 @@ LiteLLM supports all models on Together AI. ## API Keys -We provide a free $10 community-key for testing all providers on LiteLLM. You can replace this with your own key. ```python import os -os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your together ai key +os.environ["TOGETHERAI_API_KEY"] = "your-api-key" ``` - -**Need a dedicated key?** -Email us @ krrish@berri.ai - -[**See all supported models by the litellm api key**](../proxy_api.md#supported-models-for-litellm-key) - - ## Sample Usage ```python from litellm import completion -# set env variable - [OPTIONAL] replace with your together ai key -os.environ["TOGETHERAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" +os.environ["TOGETHERAI_API_KEY"] = "your-api-key" messages = [{"role": "user", "content": "Write me a poem about the blue sky"}] diff --git a/docs/my-website/docs/proxy_api.md b/docs/my-website/docs/proxy_api.md index cbc6198aa..89bfacbe1 100644 --- a/docs/my-website/docs/proxy_api.md +++ b/docs/my-website/docs/proxy_api.md @@ -9,8 +9,8 @@ import os from litellm import completion ## set ENV variables -os.environ["OPENAI_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your openai key -os.environ["COHERE_API_KEY"] = "sk-litellm-7_NPZhMGxY2GoHC59LgbDw" # [OPTIONAL] replace with your cohere key +os.environ["OPENAI_API_KEY"] = "your-api-key" +os.environ["COHERE_API_KEY"] = "your-api-key" messages = [{ "content": "Hello, how are you?","role": "user"}] diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index 5fbe7b9e0..795fb81a0 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 38371ff60..5c9909cc2 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index 4cc36beb7..6071f8c1c 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -437,7 +437,6 @@ def test_completion_openai_litellm_key(): # test_completion_openai_litellm_key() -# commented out for now, as openrouter is quite flaky - causing our deployments to fail. Please run this before pushing changes. def test_completion_openrouter1(): try: response = completion( @@ -453,7 +452,7 @@ def test_completion_openrouter1(): def test_completion_openrouter2(): try: response = completion( - model="google/palm-2-chat-bison", + model="openrouter/openai/gpt-4-32k", messages=messages, max_tokens=5, ) @@ -464,6 +463,47 @@ def test_completion_openrouter2(): # test_completion_openrouter() +def test_completion_hf_model_no_provider(): + try: + response = completion( + model="WizardLM/WizardLM-70B-V1.0", + messages=messages, + max_tokens=5, + ) + # Add any assertions here to check the response + print(response) + pytest.fail(f"Error occurred: {e}") + except Exception as e: + pass + +test_completion_hf_model_no_provider() + +def test_completion_hf_model_no_provider_2(): + try: + response = completion( + model="meta-llama/Llama-2-70b-chat-hf", + messages=messages, + max_tokens=5, + ) + # Add any assertions here to check the response + pytest.fail(f"Error occurred: {e}") + except Exception as e: + pass + +test_completion_hf_model_no_provider_2() + +def test_completion_openrouter2(): + try: + response = completion( + model="openrouter/openai/gpt-4-32k", + messages=messages, + max_tokens=5, + ) + # Add any assertions here to check the response + print(response) + except Exception as e: + pytest.fail(f"Error occurred: {e}") + def test_completion_openai_with_more_optional_params(): try: response = completion( @@ -865,7 +905,6 @@ def test_completion_with_fallbacks(): except Exception as e: pytest.fail(f"Error occurred: {e}") - # def test_completion_with_fallbacks_multiple_keys(): # print(f"backup key 1: {os.getenv('BACKUP_OPENAI_API_KEY_1')}") # print(f"backup key 2: {os.getenv('BACKUP_OPENAI_API_KEY_2')}") diff --git a/litellm/utils.py b/litellm/utils.py index 94ede879c..b5c9ad2fe 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -1187,7 +1187,7 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None): model = model.split("/", 1)[1] return model, custom_llm_provider - # check if model in known model provider list + # check if model in known model provider list -> for huggingface models, raise exception as they don't have a fixed provider (can be togetherai, anyscale, baseten, runpod, et.) ## openai - chatcompletion + text completion if model in litellm.open_ai_chat_completion_models: custom_llm_provider = "openai" @@ -1208,15 +1208,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None): ## vertex - text + chat models elif model in litellm.vertex_chat_models or model in litellm.vertex_text_models: custom_llm_provider = "vertex_ai" - ## huggingface - elif model in litellm.huggingface_models: - custom_llm_provider = "huggingface" ## ai21 elif model in litellm.ai21_models: custom_llm_provider = "ai21" - ## together_ai - elif model in litellm.together_ai_models: - custom_llm_provider = "together_ai" ## aleph_alpha elif model in litellm.aleph_alpha_models: custom_llm_provider = "aleph_alpha" @@ -1231,6 +1225,9 @@ def get_llm_provider(model: str, custom_llm_provider: Optional[str] = None): custom_llm_provider = "petals" if custom_llm_provider is None or custom_llm_provider=="": + print() + print("\033[1;31mProvider List: https://docs.litellm.ai/docs/providers\033[0m") + print() raise ValueError(f"LLM Provider NOT provided. Pass in the LLM provider you are trying to call. E.g. For 'Huggingface' inference endpoints pass in `completion(model='huggingface/{model}',..)` Learn more: https://docs.litellm.ai/docs/providers") return model, custom_llm_provider except Exception as e: