diff --git a/litellm/llms/anthropic.py b/litellm/llms/anthropic.py index c90b61a11..2ea07215b 100644 --- a/litellm/llms/anthropic.py +++ b/litellm/llms/anthropic.py @@ -141,5 +141,5 @@ class AnthropicLLM: } return model_response - def embedding(): # logic for parsing in - calling - parsing out model embedding calls + def embedding(self): # logic for parsing in - calling - parsing out model embedding calls pass diff --git a/litellm/llms/huggingface_restapi.py b/litellm/llms/huggingface_restapi.py index 974a6c049..23ac16bef 100644 --- a/litellm/llms/huggingface_restapi.py +++ b/litellm/llms/huggingface_restapi.py @@ -6,6 +6,7 @@ from litellm import logging import time from typing import Callable from litellm.utils import ModelResponse +from typing import Optional class HuggingfaceError(Exception): @@ -45,7 +46,7 @@ class HuggingfaceRestAPILLM: logger_fn=None, ): # logic for parsing in - calling - parsing out model completion calls if custom_api_base: - completion_url = custom_api_base + completion_url: Optional[str] = custom_api_base elif "HF_API_BASE" in os.environ: completion_url = os.getenv("HF_API_BASE") else: @@ -136,5 +137,5 @@ class HuggingfaceRestAPILLM: return model_response pass - def embedding(): # logic for parsing in - calling - parsing out model embedding calls + def embedding(self): # logic for parsing in - calling - parsing out model embedding calls pass diff --git a/litellm/main.py b/litellm/main.py index 713a21ed6..1fc89e2e7 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -265,8 +265,8 @@ def completion( or api_key or litellm.replicate_key ) - # set replicate kye - os.environ["REPLICATE_API_TOKEN"] = replicate_key + # set replicate key + os.environ["REPLICATE_API_TOKEN"]: str = replicate_key prompt = " ".join([message["content"] for message in messages]) input = {"prompt": prompt} if "max_tokens" in optional_params: diff --git a/litellm/testing.py b/litellm/testing.py index 3e3ce286e..5db01d182 100644 --- a/litellm/testing.py +++ b/litellm/testing.py @@ -101,7 +101,7 @@ def duration_test_model(original_function): @duration_test_model -def load_test_model(models: list, prompt: str = None, num_calls: int = None): +def load_test_model(models: list, prompt: str = "", num_calls: int = 0): test_calls = 100 if num_calls: test_calls = num_calls diff --git a/litellm/tests/test_api_key_param.py b/litellm/tests/test_api_key_param.py index cebcb1a37..c444b3904 100644 --- a/litellm/tests/test_api_key_param.py +++ b/litellm/tests/test_api_key_param.py @@ -22,7 +22,7 @@ messages = [{"content": user_message, "role": "user"}] ## Test 1: Setting key dynamically temp_key = os.environ.get("ANTHROPIC_API_KEY") -os.environ["ANTHROPIC_API_KEY"] = "bad-key" +os.environ["ANTHROPIC_API_KEY"]: str = "bad-key" # test on openai completion call try: response = completion( @@ -39,7 +39,7 @@ os.environ["ANTHROPIC_API_KEY"] = temp_key ## Test 2: Setting key via __init__ params -litellm.anthropic_key = os.environ.get("ANTHROPIC_API_KEY") +litellm.anthropic_key: str = os.environ.get("ANTHROPIC_API_KEY") os.environ.pop("ANTHROPIC_API_KEY") # test on openai completion call try: diff --git a/litellm/utils.py b/litellm/utils.py index a14b5f8f1..b79067c36 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -21,7 +21,7 @@ from .exceptions import ( ServiceUnavailableError, OpenAIError, ) -from typing import List, Dict, Union +from typing import List, Dict, Union, Optional ####### ENVIRONMENT VARIABLES ################### dotenv.load_dotenv() # Loading env variables using dotenv @@ -35,10 +35,10 @@ heliconeLogger = None aispendLogger = None berrispendLogger = None supabaseClient = None -callback_list = [] +callback_list: Optional[List[str]] = [] user_logger_fn = None -additional_details = {} -local_cache = {} +additional_details: Optional[Dict[str, str]] = {} +local_cache: Optional[Dict[str, str]] = {} ######## Model Response ######################### # All liteLLM Model responses will be in this format, Follows the OpenAI Format