mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
fixing claude max token testing
This commit is contained in:
parent
dd61a5b35e
commit
54409a2a30
5 changed files with 17 additions and 8 deletions
Binary file not shown.
Binary file not shown.
|
@ -60,6 +60,8 @@ def completion(
|
|||
prompt += f"{AnthropicConstants.HUMAN_PROMPT.value}{message['content']}"
|
||||
prompt += f"{AnthropicConstants.AI_PROMPT.value}"
|
||||
max_tokens_to_sample = optional_params.get("max_tokens_to_sample", 256) # required anthropic param, default to 256 if user does not provide an input
|
||||
if max_tokens_to_sample != 256: # not default - print for testing
|
||||
print_verbose(f"LiteLLM.Anthropic: Max Tokens Set")
|
||||
data = {
|
||||
"model": model,
|
||||
"prompt": prompt,
|
||||
|
|
|
@ -3,7 +3,7 @@ import traceback
|
|||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
import os
|
||||
import os, io
|
||||
|
||||
sys.path.insert(
|
||||
0, os.path.abspath("../..")
|
||||
|
@ -51,6 +51,11 @@ def test_completion_claude():
|
|||
|
||||
def test_completion_claude_max_tokens():
|
||||
try:
|
||||
litellm.set_verbose = True
|
||||
# Redirect stdout
|
||||
old_stdout = sys.stdout
|
||||
sys.stdout = new_stdout = io.StringIO()
|
||||
|
||||
# test setting max tokens for claude-2
|
||||
user_message = "tell me everything about YC - be verbose"
|
||||
messages = [{"content": user_message, "role": "user"}]
|
||||
|
@ -58,15 +63,17 @@ def test_completion_claude_max_tokens():
|
|||
response = completion(
|
||||
model="claude-instant-1", messages=messages, max_tokens=1200
|
||||
)
|
||||
# Restore stdout
|
||||
sys.stdout = old_stdout
|
||||
output = new_stdout.getvalue().strip()
|
||||
# Add any assertions here to check the response
|
||||
print(response)
|
||||
text_response = response['choices'][0]['message']['content']
|
||||
print(len(text_response))
|
||||
assert(len(text_response) > 2000)
|
||||
print(response.response_ms)
|
||||
if "LiteLLM.Anthropic: Max Tokens Set" not in output:
|
||||
raise Exception("Required log message not found!")
|
||||
print(f"response: {response}")
|
||||
litellm.set_verbose = False
|
||||
except Exception as e:
|
||||
pytest.fail(f"Error occurred: {e}")
|
||||
# test_completion_claude_max_tokens()
|
||||
test_completion_claude_max_tokens()
|
||||
|
||||
# def test_completion_oobabooga():
|
||||
# try:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm"
|
||||
version = "0.1.792"
|
||||
version = "0.1.793"
|
||||
description = "Library to easily interface with LLM API providers"
|
||||
authors = ["BerriAI"]
|
||||
license = "MIT License"
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue