mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-25 10:44:24 +00:00
LITELLM: Remove requests
library usage (#7235)
* fix(generic_api_callback.py): remove requests lib usage * fix(budget_manager.py): remove requests lib usgae * fix(main.py): cleanup requests lib usage * fix(utils.py): remove requests lib usage * fix(argilla.py): fix argilla test * fix(athina.py): replace 'requests' lib usage with litellm module * fix(greenscale.py): replace 'requests' lib usage with httpx * fix: remove unused 'requests' lib import + replace usage in some places * fix(prompt_layer.py): remove 'requests' lib usage from prompt layer * fix(ollama_chat.py): remove 'requests' lib usage * fix(baseten.py): replace 'requests' lib usage * fix(codestral/): replace 'requests' lib usage * fix(predibase/): replace 'requests' lib usage * refactor: cleanup unused 'requests' lib imports * fix(oobabooga.py): cleanup 'requests' lib usage * fix(invoke_handler.py): remove unused 'requests' lib usage * refactor: cleanup unused 'requests' lib import * fix: fix linting errors * refactor(ollama/): move ollama to using base llm http handler removes 'requests' lib dep for ollama integration * fix(ollama_chat.py): fix linting errors * fix(ollama/completion/transformation.py): convert non-jpeg/png image to jpeg/png before passing to ollama
This commit is contained in:
parent
224ead1531
commit
b82add11ba
46 changed files with 523 additions and 612 deletions
|
@ -12,7 +12,6 @@ from functools import partial
|
|||
from typing import Callable, List, Literal, Optional, Union
|
||||
|
||||
import httpx # type: ignore
|
||||
import requests # type: ignore
|
||||
|
||||
import litellm
|
||||
from litellm import verbose_logger
|
||||
|
@ -22,7 +21,6 @@ from litellm.litellm_core_utils.prompt_templates.factory import (
|
|||
custom_prompt,
|
||||
prompt_factory,
|
||||
)
|
||||
from litellm.llms.base import BaseLLM
|
||||
from litellm.llms.custom_httpx.http_handler import (
|
||||
AsyncHTTPHandler,
|
||||
get_async_httpx_client,
|
||||
|
@ -95,7 +93,7 @@ async def make_call(
|
|||
return completion_stream
|
||||
|
||||
|
||||
class CodestralTextCompletion(BaseLLM):
|
||||
class CodestralTextCompletion:
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
|
||||
|
@ -139,7 +137,7 @@ class CodestralTextCompletion(BaseLLM):
|
|||
def process_text_completion_response(
|
||||
self,
|
||||
model: str,
|
||||
response: Union[requests.Response, httpx.Response],
|
||||
response: httpx.Response,
|
||||
model_response: TextCompletionResponse,
|
||||
stream: bool,
|
||||
logging_obj: LiteLLMLogging,
|
||||
|
@ -317,7 +315,7 @@ class CodestralTextCompletion(BaseLLM):
|
|||
|
||||
### SYNC STREAMING
|
||||
if stream is True:
|
||||
response = requests.post(
|
||||
response = litellm.module_level_client.post(
|
||||
completion_url,
|
||||
headers=headers,
|
||||
data=json.dumps(data),
|
||||
|
@ -333,7 +331,7 @@ class CodestralTextCompletion(BaseLLM):
|
|||
### SYNC COMPLETION
|
||||
else:
|
||||
|
||||
response = requests.post(
|
||||
response = litellm.module_level_client.post(
|
||||
url=completion_url,
|
||||
headers=headers,
|
||||
data=json.dumps(data),
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue