forked from phoenix/litellm-mirror
feat - make anthropic async
This commit is contained in:
parent
a2c63075ef
commit
58c4b02447
3 changed files with 231 additions and 140 deletions
|
@ -7,6 +7,9 @@ from typing import Callable, Optional, List
|
||||||
from litellm.utils import ModelResponse, Usage, map_finish_reason, CustomStreamWrapper
|
from litellm.utils import ModelResponse, Usage, map_finish_reason, CustomStreamWrapper
|
||||||
import litellm
|
import litellm
|
||||||
from .prompt_templates.factory import prompt_factory, custom_prompt
|
from .prompt_templates.factory import prompt_factory, custom_prompt
|
||||||
|
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
|
||||||
|
|
||||||
|
async_handler = AsyncHTTPHandler()
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
|
|
||||||
|
@ -36,7 +39,9 @@ class AnthropicConfig:
|
||||||
to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
|
to pass metadata to anthropic, it's {"user_id": "any-relevant-information"}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
max_tokens: Optional[int] = 4096 # anthropic requires a default value (Opus, Sonnet, and Haiku have the same default)
|
max_tokens: Optional[int] = (
|
||||||
|
4096 # anthropic requires a default value (Opus, Sonnet, and Haiku have the same default)
|
||||||
|
)
|
||||||
stop_sequences: Optional[list] = None
|
stop_sequences: Optional[list] = None
|
||||||
temperature: Optional[int] = None
|
temperature: Optional[int] = None
|
||||||
top_p: Optional[int] = None
|
top_p: Optional[int] = None
|
||||||
|
@ -46,7 +51,9 @@ class AnthropicConfig:
|
||||||
|
|
||||||
def __init__(
|
def __init__(
|
||||||
self,
|
self,
|
||||||
max_tokens: Optional[int] = 4096, # You can pass in a value yourself or use the default value 4096
|
max_tokens: Optional[
|
||||||
|
int
|
||||||
|
] = 4096, # You can pass in a value yourself or use the default value 4096
|
||||||
stop_sequences: Optional[list] = None,
|
stop_sequences: Optional[list] = None,
|
||||||
temperature: Optional[int] = None,
|
temperature: Optional[int] = None,
|
||||||
top_p: Optional[int] = None,
|
top_p: Optional[int] = None,
|
||||||
|
@ -95,121 +102,18 @@ def validate_environment(api_key, user_headers):
|
||||||
return headers
|
return headers
|
||||||
|
|
||||||
|
|
||||||
def completion(
|
def process_response(
|
||||||
model: str,
|
model,
|
||||||
messages: list,
|
response,
|
||||||
api_base: str,
|
model_response,
|
||||||
custom_prompt_dict: dict,
|
_is_function_call,
|
||||||
model_response: ModelResponse,
|
stream,
|
||||||
print_verbose: Callable,
|
|
||||||
encoding,
|
|
||||||
api_key,
|
|
||||||
logging_obj,
|
logging_obj,
|
||||||
optional_params=None,
|
api_key,
|
||||||
litellm_params=None,
|
data,
|
||||||
logger_fn=None,
|
messages,
|
||||||
headers={},
|
print_verbose,
|
||||||
):
|
):
|
||||||
headers = validate_environment(api_key, headers)
|
|
||||||
_is_function_call = False
|
|
||||||
messages = copy.deepcopy(messages)
|
|
||||||
optional_params = copy.deepcopy(optional_params)
|
|
||||||
if model in custom_prompt_dict:
|
|
||||||
# check if the model has a registered custom prompt
|
|
||||||
model_prompt_details = custom_prompt_dict[model]
|
|
||||||
prompt = custom_prompt(
|
|
||||||
role_dict=model_prompt_details["roles"],
|
|
||||||
initial_prompt_value=model_prompt_details["initial_prompt_value"],
|
|
||||||
final_prompt_value=model_prompt_details["final_prompt_value"],
|
|
||||||
messages=messages,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# Separate system prompt from rest of message
|
|
||||||
system_prompt_indices = []
|
|
||||||
system_prompt = ""
|
|
||||||
for idx, message in enumerate(messages):
|
|
||||||
if message["role"] == "system":
|
|
||||||
system_prompt += message["content"]
|
|
||||||
system_prompt_indices.append(idx)
|
|
||||||
if len(system_prompt_indices) > 0:
|
|
||||||
for idx in reversed(system_prompt_indices):
|
|
||||||
messages.pop(idx)
|
|
||||||
if len(system_prompt) > 0:
|
|
||||||
optional_params["system"] = system_prompt
|
|
||||||
# Format rest of message according to anthropic guidelines
|
|
||||||
try:
|
|
||||||
messages = prompt_factory(
|
|
||||||
model=model, messages=messages, custom_llm_provider="anthropic"
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
raise AnthropicError(status_code=400, message=str(e))
|
|
||||||
|
|
||||||
## Load Config
|
|
||||||
config = litellm.AnthropicConfig.get_config()
|
|
||||||
for k, v in config.items():
|
|
||||||
if (
|
|
||||||
k not in optional_params
|
|
||||||
): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in
|
|
||||||
optional_params[k] = v
|
|
||||||
|
|
||||||
## Handle Tool Calling
|
|
||||||
if "tools" in optional_params:
|
|
||||||
_is_function_call = True
|
|
||||||
headers["anthropic-beta"] = "tools-2024-04-04"
|
|
||||||
|
|
||||||
anthropic_tools = []
|
|
||||||
for tool in optional_params["tools"]:
|
|
||||||
new_tool = tool["function"]
|
|
||||||
new_tool["input_schema"] = new_tool.pop("parameters") # rename key
|
|
||||||
anthropic_tools.append(new_tool)
|
|
||||||
|
|
||||||
optional_params["tools"] = anthropic_tools
|
|
||||||
|
|
||||||
stream = optional_params.pop("stream", None)
|
|
||||||
|
|
||||||
data = {
|
|
||||||
"model": model,
|
|
||||||
"messages": messages,
|
|
||||||
**optional_params,
|
|
||||||
}
|
|
||||||
|
|
||||||
## LOGGING
|
|
||||||
logging_obj.pre_call(
|
|
||||||
input=messages,
|
|
||||||
api_key=api_key,
|
|
||||||
additional_args={
|
|
||||||
"complete_input_dict": data,
|
|
||||||
"api_base": api_base,
|
|
||||||
"headers": headers,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
print_verbose(f"_is_function_call: {_is_function_call}")
|
|
||||||
## COMPLETION CALL
|
|
||||||
if (
|
|
||||||
stream and not _is_function_call
|
|
||||||
): # if function call - fake the streaming (need complete blocks for output parsing in openai format)
|
|
||||||
print_verbose("makes anthropic streaming POST request")
|
|
||||||
data["stream"] = stream
|
|
||||||
response = requests.post(
|
|
||||||
api_base,
|
|
||||||
headers=headers,
|
|
||||||
data=json.dumps(data),
|
|
||||||
stream=stream,
|
|
||||||
)
|
|
||||||
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise AnthropicError(
|
|
||||||
status_code=response.status_code, message=response.text
|
|
||||||
)
|
|
||||||
|
|
||||||
return response.iter_lines()
|
|
||||||
else:
|
|
||||||
response = requests.post(api_base, headers=headers, data=json.dumps(data))
|
|
||||||
if response.status_code != 200:
|
|
||||||
raise AnthropicError(
|
|
||||||
status_code=response.status_code, message=response.text
|
|
||||||
)
|
|
||||||
|
|
||||||
## LOGGING
|
## LOGGING
|
||||||
logging_obj.post_call(
|
logging_obj.post_call(
|
||||||
input=messages,
|
input=messages,
|
||||||
|
@ -222,9 +126,7 @@ def completion(
|
||||||
try:
|
try:
|
||||||
completion_response = response.json()
|
completion_response = response.json()
|
||||||
except:
|
except:
|
||||||
raise AnthropicError(
|
raise AnthropicError(message=response.text, status_code=response.status_code)
|
||||||
message=response.text, status_code=response.status_code
|
|
||||||
)
|
|
||||||
if "error" in completion_response:
|
if "error" in completion_response:
|
||||||
raise AnthropicError(
|
raise AnthropicError(
|
||||||
message=str(completion_response["error"]),
|
message=str(completion_response["error"]),
|
||||||
|
@ -328,6 +230,193 @@ def completion(
|
||||||
return model_response
|
return model_response
|
||||||
|
|
||||||
|
|
||||||
|
async def acompletion_function(
|
||||||
|
model: str,
|
||||||
|
messages: list,
|
||||||
|
api_base: str,
|
||||||
|
custom_prompt_dict: dict,
|
||||||
|
model_response: ModelResponse,
|
||||||
|
print_verbose: Callable,
|
||||||
|
encoding,
|
||||||
|
api_key,
|
||||||
|
logging_obj,
|
||||||
|
stream,
|
||||||
|
_is_function_call,
|
||||||
|
data=None,
|
||||||
|
optional_params=None,
|
||||||
|
litellm_params=None,
|
||||||
|
logger_fn=None,
|
||||||
|
headers={},
|
||||||
|
):
|
||||||
|
response = await async_handler.post(
|
||||||
|
api_base, headers=headers, data=json.dumps(data)
|
||||||
|
)
|
||||||
|
return process_response(
|
||||||
|
model=model,
|
||||||
|
response=response,
|
||||||
|
model_response=model_response,
|
||||||
|
_is_function_call=_is_function_call,
|
||||||
|
stream=stream,
|
||||||
|
logging_obj=logging_obj,
|
||||||
|
api_key=api_key,
|
||||||
|
data=data,
|
||||||
|
messages=messages,
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def completion(
|
||||||
|
model: str,
|
||||||
|
messages: list,
|
||||||
|
api_base: str,
|
||||||
|
custom_prompt_dict: dict,
|
||||||
|
model_response: ModelResponse,
|
||||||
|
print_verbose: Callable,
|
||||||
|
encoding,
|
||||||
|
api_key,
|
||||||
|
logging_obj,
|
||||||
|
optional_params=None,
|
||||||
|
acompletion=None,
|
||||||
|
litellm_params=None,
|
||||||
|
logger_fn=None,
|
||||||
|
headers={},
|
||||||
|
):
|
||||||
|
headers = validate_environment(api_key, headers)
|
||||||
|
_is_function_call = False
|
||||||
|
messages = copy.deepcopy(messages)
|
||||||
|
optional_params = copy.deepcopy(optional_params)
|
||||||
|
if model in custom_prompt_dict:
|
||||||
|
# check if the model has a registered custom prompt
|
||||||
|
model_prompt_details = custom_prompt_dict[model]
|
||||||
|
prompt = custom_prompt(
|
||||||
|
role_dict=model_prompt_details["roles"],
|
||||||
|
initial_prompt_value=model_prompt_details["initial_prompt_value"],
|
||||||
|
final_prompt_value=model_prompt_details["final_prompt_value"],
|
||||||
|
messages=messages,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
# Separate system prompt from rest of message
|
||||||
|
system_prompt_indices = []
|
||||||
|
system_prompt = ""
|
||||||
|
for idx, message in enumerate(messages):
|
||||||
|
if message["role"] == "system":
|
||||||
|
system_prompt += message["content"]
|
||||||
|
system_prompt_indices.append(idx)
|
||||||
|
if len(system_prompt_indices) > 0:
|
||||||
|
for idx in reversed(system_prompt_indices):
|
||||||
|
messages.pop(idx)
|
||||||
|
if len(system_prompt) > 0:
|
||||||
|
optional_params["system"] = system_prompt
|
||||||
|
# Format rest of message according to anthropic guidelines
|
||||||
|
try:
|
||||||
|
messages = prompt_factory(
|
||||||
|
model=model, messages=messages, custom_llm_provider="anthropic"
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
raise AnthropicError(status_code=400, message=str(e))
|
||||||
|
|
||||||
|
## Load Config
|
||||||
|
config = litellm.AnthropicConfig.get_config()
|
||||||
|
for k, v in config.items():
|
||||||
|
if (
|
||||||
|
k not in optional_params
|
||||||
|
): # completion(top_k=3) > anthropic_config(top_k=3) <- allows for dynamic variables to be passed in
|
||||||
|
optional_params[k] = v
|
||||||
|
|
||||||
|
## Handle Tool Calling
|
||||||
|
if "tools" in optional_params:
|
||||||
|
_is_function_call = True
|
||||||
|
headers["anthropic-beta"] = "tools-2024-04-04"
|
||||||
|
|
||||||
|
anthropic_tools = []
|
||||||
|
for tool in optional_params["tools"]:
|
||||||
|
new_tool = tool["function"]
|
||||||
|
new_tool["input_schema"] = new_tool.pop("parameters") # rename key
|
||||||
|
anthropic_tools.append(new_tool)
|
||||||
|
|
||||||
|
optional_params["tools"] = anthropic_tools
|
||||||
|
|
||||||
|
stream = optional_params.pop("stream", None)
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"model": model,
|
||||||
|
"messages": messages,
|
||||||
|
**optional_params,
|
||||||
|
}
|
||||||
|
|
||||||
|
## LOGGING
|
||||||
|
logging_obj.pre_call(
|
||||||
|
input=messages,
|
||||||
|
api_key=api_key,
|
||||||
|
additional_args={
|
||||||
|
"complete_input_dict": data,
|
||||||
|
"api_base": api_base,
|
||||||
|
"headers": headers,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
print_verbose(f"_is_function_call: {_is_function_call}")
|
||||||
|
if acompletion == True:
|
||||||
|
if optional_params.get("stream", False):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
return acompletion_function(
|
||||||
|
model=model,
|
||||||
|
messages=messages,
|
||||||
|
data=data,
|
||||||
|
api_base=api_base,
|
||||||
|
custom_prompt_dict=custom_prompt_dict,
|
||||||
|
model_response=model_response,
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
encoding=encoding,
|
||||||
|
api_key=api_key,
|
||||||
|
logging_obj=logging_obj,
|
||||||
|
optional_params=optional_params,
|
||||||
|
stream=stream,
|
||||||
|
_is_function_call=_is_function_call,
|
||||||
|
litellm_params=litellm_params,
|
||||||
|
logger_fn=logger_fn,
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
## COMPLETION CALL
|
||||||
|
if (
|
||||||
|
stream and not _is_function_call
|
||||||
|
): # if function call - fake the streaming (need complete blocks for output parsing in openai format)
|
||||||
|
print_verbose("makes anthropic streaming POST request")
|
||||||
|
data["stream"] = stream
|
||||||
|
response = requests.post(
|
||||||
|
api_base,
|
||||||
|
headers=headers,
|
||||||
|
data=json.dumps(data),
|
||||||
|
stream=stream,
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise AnthropicError(
|
||||||
|
status_code=response.status_code, message=response.text
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.iter_lines()
|
||||||
|
else:
|
||||||
|
response = requests.post(api_base, headers=headers, data=json.dumps(data))
|
||||||
|
if response.status_code != 200:
|
||||||
|
raise AnthropicError(
|
||||||
|
status_code=response.status_code, message=response.text
|
||||||
|
)
|
||||||
|
return process_response(
|
||||||
|
model=model,
|
||||||
|
response=response,
|
||||||
|
model_response=model_response,
|
||||||
|
_is_function_call=_is_function_call,
|
||||||
|
stream=stream,
|
||||||
|
logging_obj=logging_obj,
|
||||||
|
api_key=api_key,
|
||||||
|
data=data,
|
||||||
|
messages=messages,
|
||||||
|
print_verbose=print_verbose,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class ModelResponseIterator:
|
class ModelResponseIterator:
|
||||||
def __init__(self, model_response):
|
def __init__(self, model_response):
|
||||||
self.model_response = model_response
|
self.model_response = model_response
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
import httpx, asyncio
|
import httpx, asyncio
|
||||||
from typing import Optional
|
from typing import Optional, Union
|
||||||
|
|
||||||
|
|
||||||
class AsyncHTTPHandler:
|
class AsyncHTTPHandler:
|
||||||
|
@ -25,7 +25,7 @@ class AsyncHTTPHandler:
|
||||||
async def post(
|
async def post(
|
||||||
self,
|
self,
|
||||||
url: str,
|
url: str,
|
||||||
data: Optional[dict] = None,
|
data: Optional[Union[dict, str]] = None,
|
||||||
params: Optional[dict] = None,
|
params: Optional[dict] = None,
|
||||||
headers: Optional[dict] = None,
|
headers: Optional[dict] = None,
|
||||||
):
|
):
|
||||||
|
|
|
@ -304,6 +304,7 @@ async def acompletion(
|
||||||
or custom_llm_provider == "vertex_ai"
|
or custom_llm_provider == "vertex_ai"
|
||||||
or custom_llm_provider == "gemini"
|
or custom_llm_provider == "gemini"
|
||||||
or custom_llm_provider == "sagemaker"
|
or custom_llm_provider == "sagemaker"
|
||||||
|
or custom_llm_provider == "anthropic"
|
||||||
or custom_llm_provider in litellm.openai_compatible_providers
|
or custom_llm_provider in litellm.openai_compatible_providers
|
||||||
): # currently implemented aiohttp calls for just azure, openai, hf, ollama, vertex ai soon all.
|
): # currently implemented aiohttp calls for just azure, openai, hf, ollama, vertex ai soon all.
|
||||||
init_response = await loop.run_in_executor(None, func_with_context)
|
init_response = await loop.run_in_executor(None, func_with_context)
|
||||||
|
@ -1184,6 +1185,7 @@ def completion(
|
||||||
model=model,
|
model=model,
|
||||||
messages=messages,
|
messages=messages,
|
||||||
api_base=api_base,
|
api_base=api_base,
|
||||||
|
acompletion=acompletion,
|
||||||
custom_prompt_dict=litellm.custom_prompt_dict,
|
custom_prompt_dict=litellm.custom_prompt_dict,
|
||||||
model_response=model_response,
|
model_response=model_response,
|
||||||
print_verbose=print_verbose,
|
print_verbose=print_verbose,
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue