linting type fixes

This commit is contained in:
ishaan-jaff 2023-08-18 11:36:06 -07:00
parent fbcb6d1c20
commit 5e7d22512d
6 changed files with 13 additions and 12 deletions

View file

@ -141,5 +141,5 @@ class AnthropicLLM:
}
return model_response
def embedding(): # logic for parsing in - calling - parsing out model embedding calls
def embedding(self): # logic for parsing in - calling - parsing out model embedding calls
pass

View file

@ -6,6 +6,7 @@ from litellm import logging
import time
from typing import Callable
from litellm.utils import ModelResponse
from typing import Optional
class HuggingfaceError(Exception):
@ -45,7 +46,7 @@ class HuggingfaceRestAPILLM:
logger_fn=None,
): # logic for parsing in - calling - parsing out model completion calls
if custom_api_base:
completion_url = custom_api_base
completion_url: Optional[str] = custom_api_base
elif "HF_API_BASE" in os.environ:
completion_url = os.getenv("HF_API_BASE")
else:
@ -136,5 +137,5 @@ class HuggingfaceRestAPILLM:
return model_response
pass
def embedding(): # logic for parsing in - calling - parsing out model embedding calls
def embedding(self): # logic for parsing in - calling - parsing out model embedding calls
pass

View file

@ -265,8 +265,8 @@ def completion(
or api_key
or litellm.replicate_key
)
# set replicate kye
os.environ["REPLICATE_API_TOKEN"] = replicate_key
# set replicate key
os.environ["REPLICATE_API_TOKEN"]: str = replicate_key
prompt = " ".join([message["content"] for message in messages])
input = {"prompt": prompt}
if "max_tokens" in optional_params:

View file

@ -101,7 +101,7 @@ def duration_test_model(original_function):
@duration_test_model
def load_test_model(models: list, prompt: str = None, num_calls: int = None):
def load_test_model(models: list, prompt: str = "", num_calls: int = 0):
test_calls = 100
if num_calls:
test_calls = num_calls

View file

@ -22,7 +22,7 @@ messages = [{"content": user_message, "role": "user"}]
## Test 1: Setting key dynamically
temp_key = os.environ.get("ANTHROPIC_API_KEY")
os.environ["ANTHROPIC_API_KEY"] = "bad-key"
os.environ["ANTHROPIC_API_KEY"]: str = "bad-key"
# test on openai completion call
try:
response = completion(
@ -39,7 +39,7 @@ os.environ["ANTHROPIC_API_KEY"] = temp_key
## Test 2: Setting key via __init__ params
litellm.anthropic_key = os.environ.get("ANTHROPIC_API_KEY")
litellm.anthropic_key: str = os.environ.get("ANTHROPIC_API_KEY")
os.environ.pop("ANTHROPIC_API_KEY")
# test on openai completion call
try:

View file

@ -21,7 +21,7 @@ from .exceptions import (
ServiceUnavailableError,
OpenAIError,
)
from typing import List, Dict, Union
from typing import List, Dict, Union, Optional
####### ENVIRONMENT VARIABLES ###################
dotenv.load_dotenv() # Loading env variables using dotenv
@ -35,10 +35,10 @@ heliconeLogger = None
aispendLogger = None
berrispendLogger = None
supabaseClient = None
callback_list = []
callback_list: Optional[List[str]] = []
user_logger_fn = None
additional_details = {}
local_cache = {}
additional_details: Optional[Dict[str, str]] = {}
local_cache: Optional[Dict[str, str]] = {}
######## Model Response #########################
# All liteLLM Model responses will be in this format, Follows the OpenAI Format