mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 11:14:04 +00:00
LITELLM: Remove requests
library usage (#7235)
* fix(generic_api_callback.py): remove requests lib usage * fix(budget_manager.py): remove requests lib usgae * fix(main.py): cleanup requests lib usage * fix(utils.py): remove requests lib usage * fix(argilla.py): fix argilla test * fix(athina.py): replace 'requests' lib usage with litellm module * fix(greenscale.py): replace 'requests' lib usage with httpx * fix: remove unused 'requests' lib import + replace usage in some places * fix(prompt_layer.py): remove 'requests' lib usage from prompt layer * fix(ollama_chat.py): remove 'requests' lib usage * fix(baseten.py): replace 'requests' lib usage * fix(codestral/): replace 'requests' lib usage * fix(predibase/): replace 'requests' lib usage * refactor: cleanup unused 'requests' lib imports * fix(oobabooga.py): cleanup 'requests' lib usage * fix(invoke_handler.py): remove unused 'requests' lib usage * refactor: cleanup unused 'requests' lib import * fix: fix linting errors * refactor(ollama/): move ollama to using base llm http handler removes 'requests' lib dep for ollama integration * fix(ollama_chat.py): fix linting errors * fix(ollama/completion/transformation.py): convert non-jpeg/png image to jpeg/png before passing to ollama
This commit is contained in:
parent
f628290ce7
commit
03e711e3e4
46 changed files with 523 additions and 612 deletions
|
@ -303,7 +303,7 @@ def run_server( # noqa: PLR0915
|
|||
return
|
||||
if model and "ollama" in model and api_base is None:
|
||||
run_ollama_serve()
|
||||
import requests
|
||||
import httpx
|
||||
|
||||
if test_async is True:
|
||||
import concurrent
|
||||
|
@ -319,7 +319,7 @@ def run_server( # noqa: PLR0915
|
|||
],
|
||||
}
|
||||
|
||||
response = requests.post("http://0.0.0.0:4000/queue/request", json=data)
|
||||
response = httpx.post("http://0.0.0.0:4000/queue/request", json=data)
|
||||
|
||||
response = response.json()
|
||||
|
||||
|
@ -327,7 +327,7 @@ def run_server( # noqa: PLR0915
|
|||
try:
|
||||
url = response["url"]
|
||||
polling_url = f"{api_base}{url}"
|
||||
polling_response = requests.get(polling_url)
|
||||
polling_response = httpx.get(polling_url)
|
||||
polling_response = polling_response.json()
|
||||
print("\n RESPONSE FROM POLLING JOB", polling_response) # noqa
|
||||
status = polling_response["status"]
|
||||
|
@ -378,7 +378,7 @@ def run_server( # noqa: PLR0915
|
|||
if health is not False:
|
||||
|
||||
print("\nLiteLLM: Health Testing models in config") # noqa
|
||||
response = requests.get(url=f"http://{host}:{port}/health")
|
||||
response = httpx.get(url=f"http://{host}:{port}/health")
|
||||
print(json.dumps(response.json(), indent=4)) # noqa
|
||||
return
|
||||
if test is not False:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue