mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
remove install_and_import remove petals
This commit is contained in:
parent
03a7f7091e
commit
2a36f06763
2 changed files with 17 additions and 68 deletions
|
@ -14,7 +14,6 @@ from litellm import ( # type: ignore
|
||||||
)
|
)
|
||||||
from litellm.utils import (
|
from litellm.utils import (
|
||||||
get_secret,
|
get_secret,
|
||||||
install_and_import,
|
|
||||||
CustomStreamWrapper,
|
CustomStreamWrapper,
|
||||||
read_config_args,
|
read_config_args,
|
||||||
completion_with_fallbacks,
|
completion_with_fallbacks,
|
||||||
|
@ -34,7 +33,6 @@ from typing import Callable, List, Optional, Dict
|
||||||
encoding = tiktoken.get_encoding("cl100k_base")
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
from litellm.utils import (
|
from litellm.utils import (
|
||||||
get_secret,
|
get_secret,
|
||||||
install_and_import,
|
|
||||||
CustomStreamWrapper,
|
CustomStreamWrapper,
|
||||||
ModelResponse,
|
ModelResponse,
|
||||||
read_config_args,
|
read_config_args,
|
||||||
|
@ -344,8 +342,10 @@ def completion(
|
||||||
response = model_response
|
response = model_response
|
||||||
elif "replicate" in model or custom_llm_provider == "replicate":
|
elif "replicate" in model or custom_llm_provider == "replicate":
|
||||||
# import replicate/if it fails then pip install replicate
|
# import replicate/if it fails then pip install replicate
|
||||||
install_and_import("replicate")
|
try:
|
||||||
import replicate
|
import replicate
|
||||||
|
except:
|
||||||
|
Exception("Replicate import failed please run `pip install replicate`")
|
||||||
|
|
||||||
# Setting the relevant API KEY for replicate, replicate defaults to using os.environ.get("REPLICATE_API_TOKEN")
|
# Setting the relevant API KEY for replicate, replicate defaults to using os.environ.get("REPLICATE_API_TOKEN")
|
||||||
replicate_key = os.environ.get("REPLICATE_API_TOKEN")
|
replicate_key = os.environ.get("REPLICATE_API_TOKEN")
|
||||||
|
@ -507,8 +507,10 @@ def completion(
|
||||||
)
|
)
|
||||||
elif model in litellm.cohere_models:
|
elif model in litellm.cohere_models:
|
||||||
# import cohere/if it fails then pip install cohere
|
# import cohere/if it fails then pip install cohere
|
||||||
install_and_import("cohere")
|
try:
|
||||||
import cohere
|
import cohere
|
||||||
|
except:
|
||||||
|
Exception("Cohere import failed please run `pip install cohere`")
|
||||||
|
|
||||||
cohere_key = (
|
cohere_key = (
|
||||||
api_key
|
api_key
|
||||||
|
@ -776,39 +778,6 @@ def completion(
|
||||||
)
|
)
|
||||||
return response
|
return response
|
||||||
response = model_response
|
response = model_response
|
||||||
elif custom_llm_provider == "petals" or (
|
|
||||||
litellm.api_base and "chat.petals.dev" in litellm.api_base
|
|
||||||
):
|
|
||||||
url = "https://chat.petals.dev/api/v1/generate"
|
|
||||||
import requests
|
|
||||||
|
|
||||||
prompt = " ".join([message["content"] for message in messages])
|
|
||||||
|
|
||||||
## LOGGING
|
|
||||||
logging.pre_call(
|
|
||||||
input=prompt,
|
|
||||||
api_key=None,
|
|
||||||
additional_args={"url": url, "max_new_tokens": 100},
|
|
||||||
)
|
|
||||||
|
|
||||||
response = requests.post(
|
|
||||||
url, data={"inputs": prompt, "max_new_tokens": 100, "model": model}
|
|
||||||
)
|
|
||||||
## LOGGING
|
|
||||||
logging.post_call(
|
|
||||||
input=prompt,
|
|
||||||
api_key=None,
|
|
||||||
original_response=response.text,
|
|
||||||
additional_args={"url": url, "max_new_tokens": 100},
|
|
||||||
)
|
|
||||||
|
|
||||||
completion_response = response.json()["outputs"]
|
|
||||||
|
|
||||||
# RESPONSE OBJECT
|
|
||||||
model_response["choices"][0]["message"]["content"] = completion_response
|
|
||||||
model_response["created"] = time.time()
|
|
||||||
model_response["model"] = model
|
|
||||||
response = model_response
|
|
||||||
else:
|
else:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
f"Unable to map your input to a model. Check your input - {args}"
|
f"Unable to map your input to a model. Check your input - {args}"
|
||||||
|
|
|
@ -119,33 +119,6 @@ def print_verbose(print_statement):
|
||||||
if random.random() <= 0.3:
|
if random.random() <= 0.3:
|
||||||
print("Get help - https://discord.com/invite/wuPM9dRgDw")
|
print("Get help - https://discord.com/invite/wuPM9dRgDw")
|
||||||
|
|
||||||
|
|
||||||
####### Package Import Handler ###################
|
|
||||||
|
|
||||||
|
|
||||||
def install_and_import(package: str):
|
|
||||||
if package in globals().keys():
|
|
||||||
print_verbose(f"{package} has already been imported.")
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
# Import the module
|
|
||||||
module = importlib.import_module(package)
|
|
||||||
except ImportError:
|
|
||||||
print_verbose(f"{package} is not installed. Installing...")
|
|
||||||
subprocess.call([sys.executable, "-m", "pip", "install", package])
|
|
||||||
globals()[package] = importlib.import_module(package)
|
|
||||||
# except VersionConflict as vc:
|
|
||||||
# print_verbose(f"Detected version conflict for {package}. Upgrading...")
|
|
||||||
# subprocess.call([sys.executable, "-m", "pip", "install", "--upgrade", package])
|
|
||||||
# globals()[package] = importlib.import_module(package)
|
|
||||||
finally:
|
|
||||||
if package not in globals().keys():
|
|
||||||
globals()[package] = importlib.import_module(package)
|
|
||||||
|
|
||||||
|
|
||||||
##################################################
|
|
||||||
|
|
||||||
|
|
||||||
####### LOGGING ###################
|
####### LOGGING ###################
|
||||||
from enum import Enum
|
from enum import Enum
|
||||||
|
|
||||||
|
@ -599,7 +572,11 @@ def token_counter(model, text):
|
||||||
# use tiktoken or anthropic's tokenizer depending on the model
|
# use tiktoken or anthropic's tokenizer depending on the model
|
||||||
num_tokens = 0
|
num_tokens = 0
|
||||||
if "claude" in model:
|
if "claude" in model:
|
||||||
install_and_import("anthropic")
|
try:
|
||||||
|
import anthropic
|
||||||
|
except Exception:
|
||||||
|
Exception("Anthropic import failed please run `pip install anthropic`")
|
||||||
|
|
||||||
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||||
|
|
||||||
anthropic = Anthropic()
|
anthropic = Anthropic()
|
||||||
|
@ -1293,7 +1270,10 @@ def prompt_token_calculator(model, messages):
|
||||||
text = " ".join(message["content"] for message in messages)
|
text = " ".join(message["content"] for message in messages)
|
||||||
num_tokens = 0
|
num_tokens = 0
|
||||||
if "claude" in model:
|
if "claude" in model:
|
||||||
install_and_import("anthropic")
|
try:
|
||||||
|
import anthropic
|
||||||
|
except:
|
||||||
|
Exception("Anthropic import failed please run `pip install anthropic`")
|
||||||
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT
|
||||||
|
|
||||||
anthropic = Anthropic()
|
anthropic = Anthropic()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue