From 7e50b9a29eeee71f176dff7a043dfb92b69d18f1 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 31 Jul 2023 18:56:26 -0700 Subject: [PATCH 01/11] Update setup.py --- setup.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 4d8ed7b017..8390a05190 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,11 @@ setup( install_requires=[ 'openai', 'cohere', - 'func_timeout' - 'pytest' + 'func_timeout', + 'pytest', + 'anthropic', + 'replicate', + 'python-dotenv', + 'openai[datalib]' ], ) From db2b4cb1c1c847498bb878c4b71b93e4e406c9e4 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Tue, 1 Aug 2023 08:18:44 -0700 Subject: [PATCH 02/11] Update README.md --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 3661df9528..993309062b 100644 --- a/README.md +++ b/README.md @@ -19,12 +19,6 @@ Read the docs - https://litellm.readthedocs.io/en/latest/ pip install litellm ``` -Stable version -``` -pip install litellm==0.1.1 -``` - -* Code Sample: [Getting Started Notebook](https://colab.research.google.com/drive/1gR3pY-JzDZahzpVdbGBtrNGDBmzUNJaJ?usp=sharing) ```python from litellm import completion @@ -47,6 +41,12 @@ response = completion("chatgpt-test", messages, azure=True) # openrouter call response = completion("google/palm-2-codechat-bison", messages) ``` +Code Sample: [Getting Started Notebook](https://colab.research.google.com/drive/1gR3pY-JzDZahzpVdbGBtrNGDBmzUNJaJ?usp=sharing) + +Stable version +``` +pip install litellm==0.1.1 +``` # hosted version - [Grab time if you want access 👋](https://calendly.com/d/4mp-gd3-k5k/berriai-1-1-onboarding-litellm-hosted-version) From 502eb94db37dd82f24d312c280eb2f3a9d85ff94 Mon Sep 17 00:00:00 2001 From: Krish Dholakia Date: Tue, 1 Aug 2023 08:19:24 -0700 Subject: [PATCH 03/11] Update README.md --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 993309062b..a073c0ae7f 100644 --- a/README.md +++ b/README.md @@ -12,9 +12,11 @@ litellm manages: - translating inputs to completion and embedding endpoints - guarantees consistent output, text responses will always be available at `['choices'][0]['message']['content']` -Read the docs - https://litellm.readthedocs.io/en/latest/ # usage -## installation + +Read the docs - https://litellm.readthedocs.io/en/latest/ + +## quick start ``` pip install litellm ``` From c3fcff2018ef0432b71aa4688158fe684ff90bc9 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 1 Aug 2023 08:26:07 -0700 Subject: [PATCH 04/11] Update tests.yml --- .github/workflows/tests.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index a2658432d3..d899f2360c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -37,3 +37,8 @@ jobs: - name: Run tests run: pytest litellm/tests + + - name: Upload coverage reports to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} From 3d80db72f26d41f5de62cd9ee48f69673b267dcb Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 1 Aug 2023 10:39:47 -0700 Subject: [PATCH 05/11] v1 of improving docs --- docs/input.md | 144 ++++++++++++++++++++++++++++++++++++++++++++++++++ mkdocs.yml | 2 + 2 files changed, 146 insertions(+) create mode 100644 docs/input.md diff --git a/docs/input.md b/docs/input.md new file mode 100644 index 0000000000..d92837a252 --- /dev/null +++ b/docs/input.md @@ -0,0 +1,144 @@ +# Completion Function - completion() +## Input - Request Body +**`model`** +string Required
+ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. + +--- + +**`messages`** +array Required
+A list of messages comprising the conversation so far. + +--- +>> **`role`** +>> string Required
+>> The role of the messages author. One of system, user, assistant, or function. +>>
+>> +>> --- + +>> **`content`** +>> string Required
+>> The contents of the message. content is required for all messages, and may be null for assistant messages with function calls. +>>
+>> +>> --- + +>> **`name`** +>> string Optional
+>> The name of the author of this message. name is required if role is function, and it should be the name of the function whose response is in the content. May contain a-z, A-Z, 0-9, and underscores, with a maximum length of 64 characters. +>>
+>> +>> --- + +>> **`function_call`** +>> object Optional
+>> The name and arguments of a function that should be called, as generated by the model. +>>
+>> +>> --- + +**`functions`** +array Optional
+A list of functions the model may generate JSON inputs for. +
+ +--- +>> **`name`** +>> string Required
+>> The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. +>>
+>> +>> --- + +>> **`description`** +>> string Optional
+>> A description of what the function does, used by the model to choose when and how to call the function. +>>
+>> +>> --- + +>> **`parameters`** +>> object Required
+>> The parameters the functions accept, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format. +>> To describe a function that accepts no parameters, provide the value {"type": "object", "properties": {}}. +>>
+>> +>> --- + + +**`function_call`** +string or object Optional
+Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via {"name": "my_function"} forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. +
+ +--- + +**`temperature`** +number Optional, Defaults to 1
+What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. +
+ +--- + +**`top_p`** +number Optional, Defaults to 1
+An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. +
+ +--- + +**`n`** +integer Optional, Defaults to 1
+How many chat completion choices to generate for each input message. +
+ +--- + +**`stream`** +boolean Optional, Defaults to false
+If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Example Python code. +
+ +--- + +**`stop`** +string or array Optional, Defaults to null
+Up to 4 sequences where the API will stop generating further tokens. +
+ +--- + +**`max_tokens`** +integer Optional, Defaults to inf
+The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens. +
+ +--- + +**`presence_penalty`** +number Optional, Defaults to 0
+Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. See more information about frequency and presence penalties. +
+ +--- + +**`frequency_penalty`** +number Optional, Defaults to 0
+Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. See more information about frequency and presence penalties. +
+ +--- + +**`logit_bias`** +map Optional, Defaults to null
+Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase the likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. +
+ +--- + +**`user`** +string Optional
+A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. + diff --git a/mkdocs.yml b/mkdocs.yml index d8b01b471e..1dca283e03 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -2,6 +2,8 @@ site_name: liteLLM nav: - ⚡ Getting Started: - Installation & Quick Start: index.md + - completion(): + - input: input.md - 🤖 Supported LLM APIs: - Supported Completion & Chat APIs: supported.md - Supported Embedding APIs: supported_embedding.md From 4eb60d223cca11b33f2966508833bccafbb43cfd Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 1 Aug 2023 10:54:15 -0700 Subject: [PATCH 06/11] commit --- docs/input.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/docs/input.md b/docs/input.md index d92837a252..6ab42ab426 100644 --- a/docs/input.md +++ b/docs/input.md @@ -2,13 +2,16 @@ ## Input - Request Body **`model`** string Required
-ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. +ID of the model to use. See the model endpoint compatibility + table for details on which models work with the Chat API. --- **`messages`** array Required
-A list of messages comprising the conversation so far. + +A list of messages comprising the conversation so far. Example Python Code + --- >> **`role`** From 358ace6485df0a2e21c59c67bbc826b4108049f6 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 1 Aug 2023 11:27:29 -0700 Subject: [PATCH 07/11] improved input() --- docs/input.md | 45 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/docs/input.md b/docs/input.md index 6ab42ab426..98b423c13c 100644 --- a/docs/input.md +++ b/docs/input.md @@ -1,5 +1,12 @@ # Completion Function - completion() +The Input params are **exactly the same** as the +OpenAI Create chat completion, and let you call **Azure OpenAI, Anthropic, Cohere, Replicate** models in the same format. + +In addition, liteLLM allows you to pass in the following **Optional** liteLLM args:
+`forceTimeout`, `azure`, `logger_fn`, `verbose` + ## Input - Request Body + **`model`** string Required
ID of the model to use. See the model endpoint compatibility @@ -12,6 +19,24 @@ ID of the model to use. See the Example Python Code +```python +from litellm import completion + +messages= + [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Knock knock."}, + {"role": "assistant", "content": "Who's there?"}, + {"role": "user", "content": "Orange."}, + ] + +# openai call +response = completion(model="gpt-3.5-turbo", messages=messages, temperature=0) + +# cohere call +response = completion(model="command-nightly", messages=messages, temperature=0) +``` + --- >> **`role`** @@ -64,8 +89,8 @@ A list of functions the model may generate JSON inputs for. >> **`parameters`** >> object Required
->> The parameters the functions accept, described as a JSON Schema object. See the guide for examples, and the JSON Schema reference for documentation about the format. ->> To describe a function that accepts no parameters, provide the value {"type": "object", "properties": {}}. +>> The parameters the functions accept, described as a JSON Schema object. +>> To describe a function that accepts no parameters, provide the value `{"type": "object", "properties": {}}`. >>
>> >> --- @@ -73,21 +98,21 @@ A list of functions the model may generate JSON inputs for. **`function_call`** string or object Optional
-Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via {"name": "my_function"} forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present. +Controls how the model responds to function calls. "none" means the model does not call a function, and responds to the end-user. "auto" means the model can pick between an end-user or calling a function. Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. "none" is the default when no functions are present. "auto" is the default if functions are present.
--- **`temperature`** number Optional, Defaults to 1
-What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or top_p but not both. +What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both.
--- **`top_p`** number Optional, Defaults to 1
-An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or temperature but not both. +An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or 1temperature` but not both.
--- @@ -101,7 +126,7 @@ How many chat completion choices to generate for each input message. **`stream`** boolean Optional, Defaults to false
-If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a data: [DONE] message. Example Python code. +If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only server-sent events as they become available, with the stream terminated by a `data: [DONE]` message.
--- @@ -115,21 +140,21 @@ Up to 4 sequences where the API will stop generating further tokens. **`max_tokens`** integer Optional, Defaults to inf
-The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length. Example Python code for counting tokens. +The maximum number of tokens to generate in the chat completion. The total length of input tokens and generated tokens is limited by the model's context length
--- **`presence_penalty`** number Optional, Defaults to 0
-Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. See more information about frequency and presence penalties. +Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.
--- **`frequency_penalty`** number Optional, Defaults to 0
-Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. See more information about frequency and presence penalties. +Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.
--- @@ -143,5 +168,5 @@ Modify the likelihood of specified tokens appearing in the completion. Accepts a **`user`** string Optional
-A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. Learn more. +A unique identifier representing your end-user, which can help liteLLM to monitor and detect abuse. From 1abb6c8d68679dc3d8ee22ac3b6ac0c4ae3a5d23 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 1 Aug 2023 11:36:33 -0700 Subject: [PATCH 08/11] fix link --- docs/input.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/input.md b/docs/input.md index 98b423c13c..a9b8f3f2ce 100644 --- a/docs/input.md +++ b/docs/input.md @@ -4,13 +4,13 @@ The Input params are **exactly the same** as the In addition, liteLLM allows you to pass in the following **Optional** liteLLM args:
`forceTimeout`, `azure`, `logger_fn`, `verbose` + ## Input - Request Body **`model`** string Required
-ID of the model to use. See the model endpoint compatibility - table for details on which models work with the Chat API. +ID of the model to use. See the model endpoint compatibility table for details on which models work with the Chat API. --- From 3d3a4aee9e53f7ba15e7e0fc83e6e449291a206a Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 1 Aug 2023 13:28:22 -0700 Subject: [PATCH 09/11] temp comment out test_exceptions.py --- litellm/tests/test_exceptions.py | 222 +++++++++++++++---------------- 1 file changed, 111 insertions(+), 111 deletions(-) diff --git a/litellm/tests/test_exceptions.py b/litellm/tests/test_exceptions.py index 38be0e2c15..9d981a0e4e 100644 --- a/litellm/tests/test_exceptions.py +++ b/litellm/tests/test_exceptions.py @@ -1,129 +1,129 @@ -from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, OpenAIError -import os -import sys -import traceback -sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path -import litellm -from litellm import embedding, completion -from concurrent.futures import ThreadPoolExecutor -#### What this tests #### -# This tests exception mapping -> trigger an exception from an llm provider -> assert if output is of the expected type +# from openai.error import AuthenticationError, InvalidRequestError, RateLimitError, OpenAIError +# import os +# import sys +# import traceback +# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path +# import litellm +# from litellm import embedding, completion +# from concurrent.futures import ThreadPoolExecutor +# #### What this tests #### +# # This tests exception mapping -> trigger an exception from an llm provider -> assert if output is of the expected type -# 5 providers -> OpenAI, Azure, Anthropic, Cohere, Replicate +# # 5 providers -> OpenAI, Azure, Anthropic, Cohere, Replicate -# 3 main types of exceptions -> - Rate Limit Errors, Context Window Errors, Auth errors (incorrect/rotated key, etc.) +# # 3 main types of exceptions -> - Rate Limit Errors, Context Window Errors, Auth errors (incorrect/rotated key, etc.) -# Approach: Run each model through the test -> assert if the correct error (always the same one) is triggered +# # Approach: Run each model through the test -> assert if the correct error (always the same one) is triggered -models = ["gpt-3.5-turbo", "chatgpt-test", "claude-instant-1", "command-nightly", "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"] +# models = ["gpt-3.5-turbo", "chatgpt-test", "claude-instant-1", "command-nightly", "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1"] -# Test 1: Rate Limit Errors -def test_model(model): - try: - sample_text = "how does a court case get to the Supreme Court?" * 50000 - messages = [{ "content": sample_text,"role": "user"}] - azure = False - if model == "chatgpt-test": - azure = True - print(f"model: {model}") - response = completion(model=model, messages=messages, azure=azure) - except RateLimitError: - return True - except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server - return True - except Exception as e: - print(f"Uncaught Exception {model}: {type(e).__name__} - {e}") - pass - return False +# # Test 1: Rate Limit Errors +# def test_model(model): +# try: +# sample_text = "how does a court case get to the Supreme Court?" * 50000 +# messages = [{ "content": sample_text,"role": "user"}] +# azure = False +# if model == "chatgpt-test": +# azure = True +# print(f"model: {model}") +# response = completion(model=model, messages=messages, azure=azure) +# except RateLimitError: +# return True +# except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server +# return True +# except Exception as e: +# print(f"Uncaught Exception {model}: {type(e).__name__} - {e}") +# pass +# return False -# Repeat each model 500 times -extended_models = [model for model in models for _ in range(250)] +# # Repeat each model 500 times +# extended_models = [model for model in models for _ in range(250)] -def worker(model): - return test_model(model) +# def worker(model): +# return test_model(model) -# Create a dictionary to store the results -counts = {True: 0, False: 0} +# # Create a dictionary to store the results +# counts = {True: 0, False: 0} -# Use Thread Pool Executor -with ThreadPoolExecutor(max_workers=500) as executor: - # Use map to start the operation in thread pool - results = executor.map(worker, extended_models) +# # Use Thread Pool Executor +# with ThreadPoolExecutor(max_workers=500) as executor: +# # Use map to start the operation in thread pool +# results = executor.map(worker, extended_models) - # Iterate over results and count True/False - for result in results: - counts[result] += 1 +# # Iterate over results and count True/False +# for result in results: +# counts[result] += 1 -accuracy_score = counts[True]/(counts[True] + counts[False]) -print(f"accuracy_score: {accuracy_score}") +# accuracy_score = counts[True]/(counts[True] + counts[False]) +# print(f"accuracy_score: {accuracy_score}") -# Test 2: Context Window Errors -print("Testing Context Window Errors") -def test_model(model): # pass extremely long input - sample_text = "how does a court case get to the Supreme Court?" * 100000 - messages = [{ "content": sample_text,"role": "user"}] - try: - azure = False - if model == "chatgpt-test": - azure = True - print(f"model: {model}") - response = completion(model=model, messages=messages, azure=azure) - except InvalidRequestError: - return True - except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server - return True - except Exception as e: - print(f"Error Type: {type(e).__name__}") - print(f"Uncaught Exception - {e}") - pass - return False +# # Test 2: Context Window Errors +# print("Testing Context Window Errors") +# def test_model(model): # pass extremely long input +# sample_text = "how does a court case get to the Supreme Court?" * 100000 +# messages = [{ "content": sample_text,"role": "user"}] +# try: +# azure = False +# if model == "chatgpt-test": +# azure = True +# print(f"model: {model}") +# response = completion(model=model, messages=messages, azure=azure) +# except InvalidRequestError: +# return True +# except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server +# return True +# except Exception as e: +# print(f"Error Type: {type(e).__name__}") +# print(f"Uncaught Exception - {e}") +# pass +# return False -## TEST SCORE -true_val = 0 -for model in models: - if test_model(model=model) == True: - true_val += 1 -accuracy_score = true_val/len(models) -print(f"CTX WINDOW accuracy_score: {accuracy_score}") +# ## TEST SCORE +# true_val = 0 +# for model in models: +# if test_model(model=model) == True: +# true_val += 1 +# accuracy_score = true_val/len(models) +# print(f"CTX WINDOW accuracy_score: {accuracy_score}") -# Test 3: InvalidAuth Errors -def logger_fn(model_call_object: dict): - print(f"model call details: {model_call_object}") +# # Test 3: InvalidAuth Errors +# def logger_fn(model_call_object: dict): +# print(f"model call details: {model_call_object}") -def test_model(model): # set the model key to an invalid key, depending on the model - messages = [{ "content": "Hello, how are you?","role": "user"}] - try: - azure = False - if model == "gpt-3.5-turbo": - os.environ["OPENAI_API_KEY"] = "bad-key" - elif model == "chatgpt-test": - os.environ["AZURE_API_KEY"] = "bad-key" - azure = True - elif model == "claude-instant-1": - os.environ["ANTHROPIC_API_KEY"] = "bad-key" - elif model == "command-nightly": - os.environ["COHERE_API_KEY"] = "bad-key" - elif model == "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1": - os.environ["REPLICATE_API_KEY"] = "bad-key" - os.environ["REPLICATE_API_TOKEN"] = "bad-key" - print(f"model: {model}") - response = completion(model=model, messages=messages, azure=azure, logger_fn=logger_fn) - print(f"response: {response}") - except AuthenticationError as e: - return True - except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server - return True - except Exception as e: - print(f"Uncaught Exception - {e}") - pass - return False +# def test_model(model): # set the model key to an invalid key, depending on the model +# messages = [{ "content": "Hello, how are you?","role": "user"}] +# try: +# azure = False +# if model == "gpt-3.5-turbo": +# os.environ["OPENAI_API_KEY"] = "bad-key" +# elif model == "chatgpt-test": +# os.environ["AZURE_API_KEY"] = "bad-key" +# azure = True +# elif model == "claude-instant-1": +# os.environ["ANTHROPIC_API_KEY"] = "bad-key" +# elif model == "command-nightly": +# os.environ["COHERE_API_KEY"] = "bad-key" +# elif model == "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1": +# os.environ["REPLICATE_API_KEY"] = "bad-key" +# os.environ["REPLICATE_API_TOKEN"] = "bad-key" +# print(f"model: {model}") +# response = completion(model=model, messages=messages, azure=azure, logger_fn=logger_fn) +# print(f"response: {response}") +# except AuthenticationError as e: +# return True +# except OpenAIError: # is at least an openai error -> in case of random model errors - e.g. overloaded server +# return True +# except Exception as e: +# print(f"Uncaught Exception - {e}") +# pass +# return False -## TEST SCORE -true_val = 0 -for model in models: - if test_model(model=model) == True: - true_val += 1 -accuracy_score = true_val/len(models) -print(f"INVALID AUTH accuracy_score: {accuracy_score}") \ No newline at end of file +# ## TEST SCORE +# true_val = 0 +# for model in models: +# if test_model(model=model) == True: +# true_val += 1 +# accuracy_score = true_val/len(models) +# print(f"INVALID AUTH accuracy_score: {accuracy_score}") From d1932a0561a4d16e7f4fcc2c625221c4a1c849ce Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Tue, 1 Aug 2023 13:48:06 -0700 Subject: [PATCH 10/11] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index a073c0ae7f..42a129880e 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,7 @@ # *🚅 litellm* [![PyPI Version](https://img.shields.io/pypi/v/litellm.svg)](https://pypi.org/project/litellm/) [![PyPI Version](https://img.shields.io/badge/stable%20version-v0.1.1-blue?color=green&link=https://pypi.org/project/litellm/0.1.1/)](https://pypi.org/project/litellm/0.1.1/) +[![CircleCI](https://dl.circleci.com/status-badge/img/gh/BerriAI/litellm/tree/main.svg?style=svg)](https://dl.circleci.com/status-badge/redirect/gh/BerriAI/litellm/tree/main) [![ New Relea Tests](https://github.com/BerriAI/litellm/actions/workflows/tests.yml/badge.svg)](https://github.com/BerriAI/litellm/actions/workflows/tests.yml) [![Publish to PyPI](https://github.com/BerriAI/litellm/actions/workflows/publish_pypi.yml/badge.svg?branch=main)](https://github.com/BerriAI/litellm/actions/workflows/publish_pypi.yml) ![Downloads](https://img.shields.io/pypi/dm/litellm) From 947afe43c5232af9cdefd305aeb22acac8e00c24 Mon Sep 17 00:00:00 2001 From: ishaan-jaff Date: Tue, 1 Aug 2023 14:25:35 -0700 Subject: [PATCH 11/11] remove replicate test for now --- litellm/tests/test_completion.py | 22 ++++++++++++++-------- mkdocs.yml | 2 +- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/litellm/tests/test_completion.py b/litellm/tests/test_completion.py index b9bbbebe4b..88d2ef782b 100644 --- a/litellm/tests/test_completion.py +++ b/litellm/tests/test_completion.py @@ -97,11 +97,17 @@ def test_completion_cohere(): except Exception as e: pytest.fail(f"Error occurred: {e}") -def test_completion_replicate_llama(): - model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" - try: - response = completion(model=model_name, messages=messages, max_tokens=500) - # Add any assertions here to check the response - print(response) - except Exception as e: - pytest.fail(f"Error occurred: {e}") \ No newline at end of file + +# def test_completion_replicate_llama(): +# model_name = "replicate/llama-2-70b-chat:2c1608e18606fad2812020dc541930f2d0495ce32eee50074220b87300bc16e1" +# try: +# response = completion(model=model_name, messages=messages, max_tokens=500) +# # Add any assertions here to check the response +# print(response) +# except Exception as e: +# print(f"in replicate llama, got error {e}") +# pass +# if e == "FunctionTimedOut": +# pass +# else: +# pytest.fail(f"Error occurred: {e}") diff --git a/mkdocs.yml b/mkdocs.yml index 1dca283e03..763fefadd5 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -3,7 +3,7 @@ nav: - ⚡ Getting Started: - Installation & Quick Start: index.md - completion(): - - input: input.md + - Input - Request Body: input.md - 🤖 Supported LLM APIs: - Supported Completion & Chat APIs: supported.md - Supported Embedding APIs: supported_embedding.md