diff --git a/docs/my-website/docs/completion/output.md b/docs/my-website/docs/completion/output.md index 166a6c34d..cfeccf288 100644 --- a/docs/my-website/docs/completion/output.md +++ b/docs/my-website/docs/completion/output.md @@ -1,20 +1,25 @@ # Output Format - completion() -Here's the exact json output you can expect from all litellm `completion` calls for all models +Here's the exact json output and type you can expect from all litellm `completion` calls for all models ```python { 'choices': [ - { - 'finish_reason': 'stop', - 'index': 0, - 'message': { - 'role': 'assistant', - 'content': " I'm doing well, thank you for asking. I am Claude, an AI assistant created by Anthropic." - } + { + 'finish_reason': str, # String: 'stop' + 'index': int, # Integer: 0 + 'message': { # Dictionary [str, str] + 'role': str, # String: 'assistant' + 'content': str # String: "default message" } - ], - 'created': 1691429984.3852863, - 'model': 'claude-instant-1', - 'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41} + } + ], + 'created': str, # String: None + 'model': str, # String: None + 'usage': { # Dictionary [str, int] + 'prompt_tokens': int, # Integer + 'completion_tokens': int, # Integer + 'total_tokens': int # Integer + } } + ``` \ No newline at end of file diff --git a/litellm/utils.py b/litellm/utils.py index 02c0c49c4..fb391dbc2 100644 --- a/litellm/utils.py +++ b/litellm/utils.py @@ -5,7 +5,6 @@ import litellm, openai import random, uuid, requests import datetime, time import tiktoken -from pkg_resources import DistributionNotFound, VersionConflict encoding = tiktoken.get_encoding("cl100k_base") from .integrations.helicone import HeliconeLogger from .integrations.aispend import AISpendLogger @@ -111,7 +110,7 @@ class ModelResponse: choices_str = ",\n".join(str(choice) for choice in self.choices) result = f"{{\n 'choices': [\n{choices_str}\n ],\n 'created': {self.created},\n 'model': '{self.model}',\n 'usage': {self.usage}\n}}" return result - +############################################################ def print_verbose(print_statement): if litellm.set_verbose: print(f"LiteLLM: {print_statement}")