diff --git a/litellm/__pycache__/main.cpython-311.pyc b/litellm/__pycache__/main.cpython-311.pyc index f8e81abe6c..7c1a53cf54 100644 Binary files a/litellm/__pycache__/main.cpython-311.pyc and b/litellm/__pycache__/main.cpython-311.pyc differ diff --git a/litellm/__pycache__/utils.cpython-311.pyc b/litellm/__pycache__/utils.cpython-311.pyc index 3f33c9e65d..c4e28b05a5 100644 Binary files a/litellm/__pycache__/utils.cpython-311.pyc and b/litellm/__pycache__/utils.cpython-311.pyc differ diff --git a/litellm/tests/test_model_response_typing/server.py b/litellm/tests/test_model_response_typing/server.py new file mode 100644 index 0000000000..0399f0d919 --- /dev/null +++ b/litellm/tests/test_model_response_typing/server.py @@ -0,0 +1,23 @@ +# #### What this tests #### +# # This tests if the litellm model response type is returnable in a flask app + +# import sys, os +# import traceback +# from flask import Flask, request, jsonify, abort, Response +# sys.path.insert(0, os.path.abspath('../../..')) # Adds the parent directory to the system path + +# import litellm +# from litellm import completion + +# litellm.set_verbose = False + +# app = Flask(__name__) + +# @app.route('/') +# def hello(): +# data = request.json +# return completion(**data) + +# if __name__ == '__main__': +# from waitress import serve +# serve(app, host='localhost', port=8080, threads=10) \ No newline at end of file diff --git a/litellm/tests/test_model_response_typing/test.py b/litellm/tests/test_model_response_typing/test.py new file mode 100644 index 0000000000..c1620e9412 --- /dev/null +++ b/litellm/tests/test_model_response_typing/test.py @@ -0,0 +1,14 @@ +# import requests, json + +# BASE_URL = 'http://localhost:8080' + +# def test_hello_route(): +# data = {"model": "gpt-3.5-turbo", "messages": [{"role": "user", "content": "hey, how's it going?"}]} +# headers = {'Content-Type': 'application/json'} +# response = requests.get(BASE_URL, headers=headers, data=json.dumps(data)) +# print(response.text) +# assert response.status_code == 200 +# print("Hello route test passed!") + +# if __name__ == '__main__': +# test_hello_route() \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index e5886c8e63..8646037680 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -51,67 +51,35 @@ local_cache = {} # 'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41} # } -class Message: - def __init__(self): - self.content: str = "default" - self.role: str = "assistant" +class Message(OpenAIObject): + def __init__(self, content="default", role="assistant", **params): + super(Message, self).__init__(**params) + self.content = content + self.role = role - def __getitem__(self, key): - return getattr(self, key) +class Choices(OpenAIObject): + def __init__(self, finish_reason="stop", index=0, message=Message(), **params): + super(Choices, self).__init__(**params) + self.finish_reason = finish_reason + self.index = index + self.message = message - def __setitem__(self, key, value): - setattr(self, key, value) - - def __iter__(self): - return iter(vars(self)) - - def __str__(self): - result = f"{{\n 'role': '{self.role}',\n 'content': \"{self.content}\"\n}}" - return result - -class Choices: - def __init__(self): - self.finish_reason: str = "stop" - self.index: int = 0 - self.message: Message = Message() - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __iter__(self): - return iter(vars(self)) - - def __str__(self): - result = f"{{\n 'finish_reason': '{self.finish_reason}',\n 'index': {self.index},\n 'message': {self.message}\n}}" - return result - -class ModelResponse(dict): - def __init__(self): - self.choices: List[Choices] = [Choices()] - self.created: str = None - self.model: str = None - self.usage: Dict[str, Union[int, None]] = { +class ModelResponse(OpenAIObject): + def __init__(self, choices=None, created=None, model=None, usage=None, **params): + super(ModelResponse, self).__init__(**params) + self.choices = choices if choices else [Choices()] + self.created = created + self.model = model + self.usage = usage if usage else { "prompt_tokens": None, "completion_tokens": None, "total_tokens": None } - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - setattr(self, key, value) - - def __iter__(self): - return iter(vars(self)) - - def __str__(self): - choices_str = ",\n".join(str(choice) for choice in self.choices) - result = f"{{\n 'choices': [\n{choices_str}\n ],\n 'created': {self.created},\n 'model': '{self.model}',\n 'usage': {self.usage}\n}}" - return result + def to_dict_recursive(self): + d = super().to_dict_recursive() + d['choices'] = [choice.to_dict_recursive() for choice in self.choices] + return d ############################################################ def print_verbose(print_statement): if litellm.set_verbose: diff --git a/pyproject.toml b/pyproject.toml index 1d143a7b1d..4c1b088a69 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "litellm" -version = "0.1.422" +version = "0.1.424" description = "Library to easily interface with LLM API providers" authors = ["BerriAI"] license = "MIT License"