forked from phoenix/litellm-mirror
remove unused pkg resources + docs update
This commit is contained in:
parent
d394c77623
commit
6f48f7ab6e
2 changed files with 18 additions and 14 deletions
|
@ -1,20 +1,25 @@
|
||||||
# Output Format - completion()
|
# Output Format - completion()
|
||||||
Here's the exact json output you can expect from all litellm `completion` calls for all models
|
Here's the exact json output and type you can expect from all litellm `completion` calls for all models
|
||||||
|
|
||||||
```python
|
```python
|
||||||
{
|
{
|
||||||
'choices': [
|
'choices': [
|
||||||
{
|
{
|
||||||
'finish_reason': 'stop',
|
'finish_reason': str, # String: 'stop'
|
||||||
'index': 0,
|
'index': int, # Integer: 0
|
||||||
'message': {
|
'message': { # Dictionary [str, str]
|
||||||
'role': 'assistant',
|
'role': str, # String: 'assistant'
|
||||||
'content': " I'm doing well, thank you for asking. I am Claude, an AI assistant created by Anthropic."
|
'content': str # String: "default message"
|
||||||
}
|
|
||||||
}
|
}
|
||||||
],
|
}
|
||||||
'created': 1691429984.3852863,
|
],
|
||||||
'model': 'claude-instant-1',
|
'created': str, # String: None
|
||||||
'usage': {'prompt_tokens': 18, 'completion_tokens': 23, 'total_tokens': 41}
|
'model': str, # String: None
|
||||||
|
'usage': { # Dictionary [str, int]
|
||||||
|
'prompt_tokens': int, # Integer
|
||||||
|
'completion_tokens': int, # Integer
|
||||||
|
'total_tokens': int # Integer
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
```
|
```
|
|
@ -5,7 +5,6 @@ import litellm, openai
|
||||||
import random, uuid, requests
|
import random, uuid, requests
|
||||||
import datetime, time
|
import datetime, time
|
||||||
import tiktoken
|
import tiktoken
|
||||||
from pkg_resources import DistributionNotFound, VersionConflict
|
|
||||||
encoding = tiktoken.get_encoding("cl100k_base")
|
encoding = tiktoken.get_encoding("cl100k_base")
|
||||||
from .integrations.helicone import HeliconeLogger
|
from .integrations.helicone import HeliconeLogger
|
||||||
from .integrations.aispend import AISpendLogger
|
from .integrations.aispend import AISpendLogger
|
||||||
|
@ -111,7 +110,7 @@ class ModelResponse:
|
||||||
choices_str = ",\n".join(str(choice) for choice in self.choices)
|
choices_str = ",\n".join(str(choice) for choice in self.choices)
|
||||||
result = f"{{\n 'choices': [\n{choices_str}\n ],\n 'created': {self.created},\n 'model': '{self.model}',\n 'usage': {self.usage}\n}}"
|
result = f"{{\n 'choices': [\n{choices_str}\n ],\n 'created': {self.created},\n 'model': '{self.model}',\n 'usage': {self.usage}\n}}"
|
||||||
return result
|
return result
|
||||||
|
############################################################
|
||||||
def print_verbose(print_statement):
|
def print_verbose(print_statement):
|
||||||
if litellm.set_verbose:
|
if litellm.set_verbose:
|
||||||
print(f"LiteLLM: {print_statement}")
|
print(f"LiteLLM: {print_statement}")
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue