mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
LITELLM: Remove requests
library usage (#7235)
* fix(generic_api_callback.py): remove requests lib usage * fix(budget_manager.py): remove requests lib usgae * fix(main.py): cleanup requests lib usage * fix(utils.py): remove requests lib usage * fix(argilla.py): fix argilla test * fix(athina.py): replace 'requests' lib usage with litellm module * fix(greenscale.py): replace 'requests' lib usage with httpx * fix: remove unused 'requests' lib import + replace usage in some places * fix(prompt_layer.py): remove 'requests' lib usage from prompt layer * fix(ollama_chat.py): remove 'requests' lib usage * fix(baseten.py): replace 'requests' lib usage * fix(codestral/): replace 'requests' lib usage * fix(predibase/): replace 'requests' lib usage * refactor: cleanup unused 'requests' lib imports * fix(oobabooga.py): cleanup 'requests' lib usage * fix(invoke_handler.py): remove unused 'requests' lib usage * refactor: cleanup unused 'requests' lib import * fix: fix linting errors * refactor(ollama/): move ollama to using base llm http handler removes 'requests' lib dep for ollama integration * fix(ollama_chat.py): fix linting errors * fix(ollama/completion/transformation.py): convert non-jpeg/png image to jpeg/png before passing to ollama
This commit is contained in:
parent
f628290ce7
commit
03e711e3e4
46 changed files with 523 additions and 612 deletions
|
@ -1940,10 +1940,11 @@ def test_ollama_image():
|
|||
mock_response = MagicMock()
|
||||
mock_response.status_code = 200
|
||||
mock_response.headers = {"Content-Type": "application/json"}
|
||||
data_json = json.loads(kwargs["data"])
|
||||
mock_response.json.return_value = {
|
||||
# return the image in the response so that it can be tested
|
||||
# against the original
|
||||
"response": kwargs["json"]["images"]
|
||||
"response": data_json["images"]
|
||||
}
|
||||
return mock_response
|
||||
|
||||
|
@ -1971,9 +1972,10 @@ def test_ollama_image():
|
|||
[datauri_base64_data, datauri_base64_data],
|
||||
]
|
||||
|
||||
client = HTTPHandler()
|
||||
for test in tests:
|
||||
try:
|
||||
with patch("requests.post", side_effect=mock_post):
|
||||
with patch.object(client, "post", side_effect=mock_post):
|
||||
response = completion(
|
||||
model="ollama/llava",
|
||||
messages=[
|
||||
|
@ -1988,6 +1990,7 @@ def test_ollama_image():
|
|||
],
|
||||
}
|
||||
],
|
||||
client=client,
|
||||
)
|
||||
if not test[1]:
|
||||
# the conversion process may not always generate the same image,
|
||||
|
@ -2387,8 +2390,8 @@ def test_completion_ollama_hosted():
|
|||
response = completion(
|
||||
model="ollama/phi",
|
||||
messages=messages,
|
||||
max_tokens=2,
|
||||
api_base="https://test-ollama-endpoint.onrender.com",
|
||||
max_tokens=20,
|
||||
# api_base="https://test-ollama-endpoint.onrender.com",
|
||||
)
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue