mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
LITELLM: Remove requests
library usage (#7235)
* fix(generic_api_callback.py): remove requests lib usage * fix(budget_manager.py): remove requests lib usgae * fix(main.py): cleanup requests lib usage * fix(utils.py): remove requests lib usage * fix(argilla.py): fix argilla test * fix(athina.py): replace 'requests' lib usage with litellm module * fix(greenscale.py): replace 'requests' lib usage with httpx * fix: remove unused 'requests' lib import + replace usage in some places * fix(prompt_layer.py): remove 'requests' lib usage from prompt layer * fix(ollama_chat.py): remove 'requests' lib usage * fix(baseten.py): replace 'requests' lib usage * fix(codestral/): replace 'requests' lib usage * fix(predibase/): replace 'requests' lib usage * refactor: cleanup unused 'requests' lib imports * fix(oobabooga.py): cleanup 'requests' lib usage * fix(invoke_handler.py): remove unused 'requests' lib usage * refactor: cleanup unused 'requests' lib import * fix: fix linting errors * refactor(ollama/): move ollama to using base llm http handler removes 'requests' lib dep for ollama integration * fix(ollama_chat.py): fix linting errors * fix(ollama/completion/transformation.py): convert non-jpeg/png image to jpeg/png before passing to ollama
This commit is contained in:
parent
224ead1531
commit
b82add11ba
46 changed files with 523 additions and 612 deletions
|
@ -43,7 +43,6 @@ import aiohttp
|
|||
import dotenv
|
||||
import httpx
|
||||
import openai
|
||||
import requests
|
||||
import tiktoken
|
||||
from httpx import Proxy
|
||||
from httpx._utils import get_environment_proxies
|
||||
|
@ -4175,7 +4174,7 @@ def get_max_tokens(model: str) -> Optional[int]:
|
|||
config_url = f"https://huggingface.co/{model_name}/raw/main/config.json"
|
||||
try:
|
||||
# Make the HTTP request to get the raw JSON file
|
||||
response = requests.get(config_url)
|
||||
response = litellm.module_level_client.get(config_url)
|
||||
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
|
||||
|
||||
# Parse the JSON response
|
||||
|
@ -4186,7 +4185,7 @@ def get_max_tokens(model: str) -> Optional[int]:
|
|||
return max_position_embeddings
|
||||
else:
|
||||
return None
|
||||
except requests.exceptions.RequestException:
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
try:
|
||||
|
@ -4361,7 +4360,7 @@ def get_model_info( # noqa: PLR0915
|
|||
|
||||
try:
|
||||
# Make the HTTP request to get the raw JSON file
|
||||
response = requests.get(config_url)
|
||||
response = litellm.module_level_client.get(config_url)
|
||||
response.raise_for_status() # Raise an exception for bad responses (4xx or 5xx)
|
||||
|
||||
# Parse the JSON response
|
||||
|
@ -4374,7 +4373,7 @@ def get_model_info( # noqa: PLR0915
|
|||
return max_position_embeddings
|
||||
else:
|
||||
return None
|
||||
except requests.exceptions.RequestException:
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
try:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue