forked from phoenix/litellm-mirror
change get to post request
This commit is contained in:
parent
661d1bff8b
commit
1f98f38f13
10 changed files with 25 additions and 35 deletions
BIN
dist/litellm-0.1.452-py3-none-any.whl
vendored
Normal file
BIN
dist/litellm-0.1.452-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
dist/litellm-0.1.452.tar.gz
vendored
Normal file
BIN
dist/litellm-0.1.452.tar.gz
vendored
Normal file
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -1,22 +1,22 @@
|
|||
# #### What this tests ####
|
||||
# # This tests if logging to the litedebugger integration actually works
|
||||
# # pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
||||
# import sys, os
|
||||
# import traceback
|
||||
# import pytest
|
||||
#### What this tests ####
|
||||
# This tests if logging to the litedebugger integration actually works
|
||||
# pytest mistakes intentional bad calls as failed tests -> [TODO] fix this
|
||||
import sys, os
|
||||
import traceback
|
||||
import pytest
|
||||
|
||||
# sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||
# import litellm
|
||||
# from litellm import embedding, completion
|
||||
sys.path.insert(0, os.path.abspath('../..')) # Adds the parent directory to the system path
|
||||
import litellm
|
||||
from litellm import embedding, completion
|
||||
|
||||
# litellm.email = "krrish@berri.ai"
|
||||
litellm.email = "krrish@berri.ai"
|
||||
|
||||
# user_message = "Hello, how are you?"
|
||||
# messages = [{ "content": user_message,"role": "user"}]
|
||||
user_message = "Hello, how are you?"
|
||||
messages = [{ "content": user_message,"role": "user"}]
|
||||
|
||||
|
||||
# #openai call
|
||||
# response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
||||
#openai call
|
||||
response = completion(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hi 👋 - i'm openai"}])
|
||||
|
||||
# #bad request call
|
||||
# response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
|
||||
#bad request call
|
||||
response = completion(model="chatgpt-test", messages=[{"role": "user", "content": "Hi 👋 - i'm a bad request"}])
|
||||
|
|
|
@ -1022,7 +1022,6 @@ def handle_success(args, kwargs, result, start_time, end_time):
|
|||
)
|
||||
pass
|
||||
|
||||
|
||||
def get_model_list():
|
||||
global last_fetched_at
|
||||
# if user is using hosted product -> get their updated model list - refresh every 5 minutes
|
||||
|
@ -1036,24 +1035,15 @@ def get_model_list():
|
|||
# make the api call
|
||||
last_fetched_at = time.time()
|
||||
print(f"last_fetched_at: {last_fetched_at}")
|
||||
response = requests.get(
|
||||
url="http://api.litellm.ai/get_model_list",
|
||||
headers={"content-type": "application/json"},
|
||||
data=json.dumps({"user_email": user_email}),
|
||||
)
|
||||
response = requests.post(url="http://api.litellm.ai/get_model_list", headers={"content-type": "application/json"}, data=json.dumps({"user_email": user_email}))
|
||||
print_verbose(f"get_model_list response: {response.text}")
|
||||
data = response.json()
|
||||
# update model list
|
||||
model_list = data["model_list"]
|
||||
# set environment variables
|
||||
env_dict = data["model_keys"]
|
||||
for key, value in env_dict.items():
|
||||
os.environ[key] = value
|
||||
litellm.model_list = (
|
||||
model_list # update the user's current litellm model list
|
||||
)
|
||||
# return litellm model list by default
|
||||
return litellm.model_list
|
||||
return model_list
|
||||
return None
|
||||
# return None by default
|
||||
return None
|
||||
|
||||
|
||||
def acreate(*args, **kwargs): ## Thin client to handle the acreate langchain call
|
||||
|
|
6
poetry.lock
generated
6
poetry.lock
generated
|
@ -423,13 +423,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "0.27.8"
|
||||
version = "0.27.9"
|
||||
description = "Python client library for the OpenAI API"
|
||||
optional = false
|
||||
python-versions = ">=3.7.1"
|
||||
files = [
|
||||
{file = "openai-0.27.8-py3-none-any.whl", hash = "sha256:e0a7c2f7da26bdbe5354b03c6d4b82a2f34bd4458c7a17ae1a7092c3e397e03c"},
|
||||
{file = "openai-0.27.8.tar.gz", hash = "sha256:2483095c7db1eee274cebac79e315a986c4e55207bb4fa7b82d185b3a2ed9536"},
|
||||
{file = "openai-0.27.9-py3-none-any.whl", hash = "sha256:6a3cf8e276d1a6262b50562fbc0cba7967cfebb78ed827d375986b48fdad6475"},
|
||||
{file = "openai-0.27.9.tar.gz", hash = "sha256:b687761c82f5ebb6f61efc791b2083d2d068277b94802d4d1369efe39851813d"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.452"
|
||||
version = "0.1.454"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue