forked from phoenix/litellm-mirror
feat(assistants/main.py): support litellm.get_assistants()
and litellm.get_messages()
This commit is contained in:
parent
cad01fb586
commit
8fe6c9b401
3 changed files with 265 additions and 6 deletions
|
@ -15,6 +15,75 @@ openai_assistants_api = OpenAIAssistantsAPI()
|
||||||
|
|
||||||
### ASSISTANTS ###
|
### ASSISTANTS ###
|
||||||
|
|
||||||
|
|
||||||
|
def get_assistants(
|
||||||
|
custom_llm_provider: Literal["openai"],
|
||||||
|
client: Optional[OpenAI] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> SyncCursorPage[Assistant]:
|
||||||
|
optional_params = GenericLiteLLMParams(**kwargs)
|
||||||
|
|
||||||
|
### TIMEOUT LOGIC ###
|
||||||
|
timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
|
||||||
|
# set timeout for 10 minutes by default
|
||||||
|
|
||||||
|
if (
|
||||||
|
timeout is not None
|
||||||
|
and isinstance(timeout, httpx.Timeout)
|
||||||
|
and supports_httpx_timeout(custom_llm_provider) == False
|
||||||
|
):
|
||||||
|
read_timeout = timeout.read or 600
|
||||||
|
timeout = read_timeout # default 10 min timeout
|
||||||
|
elif timeout is not None and not isinstance(timeout, httpx.Timeout):
|
||||||
|
timeout = float(timeout) # type: ignore
|
||||||
|
elif timeout is None:
|
||||||
|
timeout = 600.0
|
||||||
|
|
||||||
|
response: Optional[SyncCursorPage[Assistant]] = None
|
||||||
|
if custom_llm_provider == "openai":
|
||||||
|
api_base = (
|
||||||
|
optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
|
||||||
|
or litellm.api_base
|
||||||
|
or os.getenv("OPENAI_API_BASE")
|
||||||
|
or "https://api.openai.com/v1"
|
||||||
|
)
|
||||||
|
organization = (
|
||||||
|
optional_params.organization
|
||||||
|
or litellm.organization
|
||||||
|
or os.getenv("OPENAI_ORGANIZATION", None)
|
||||||
|
or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105
|
||||||
|
)
|
||||||
|
# set API KEY
|
||||||
|
api_key = (
|
||||||
|
optional_params.api_key
|
||||||
|
or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there
|
||||||
|
or litellm.openai_key
|
||||||
|
or os.getenv("OPENAI_API_KEY")
|
||||||
|
)
|
||||||
|
response = openai_assistants_api.get_assistants(
|
||||||
|
api_base=api_base,
|
||||||
|
api_key=api_key,
|
||||||
|
timeout=timeout,
|
||||||
|
max_retries=optional_params.max_retries,
|
||||||
|
organization=organization,
|
||||||
|
client=client,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise litellm.exceptions.BadRequestError(
|
||||||
|
message="LiteLLM doesn't support {} for 'get_assistants'. Only 'openai' is supported.".format(
|
||||||
|
custom_llm_provider
|
||||||
|
),
|
||||||
|
model="n/a",
|
||||||
|
llm_provider=custom_llm_provider,
|
||||||
|
response=httpx.Response(
|
||||||
|
status_code=400,
|
||||||
|
content="Unsupported provider",
|
||||||
|
request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
### THREADS ###
|
### THREADS ###
|
||||||
|
|
||||||
|
|
||||||
|
@ -267,4 +336,160 @@ def add_message(
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
def get_messages(
|
||||||
|
custom_llm_provider: Literal["openai"],
|
||||||
|
thread_id: str,
|
||||||
|
client: Optional[OpenAI] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> SyncCursorPage[OpenAIMessage]:
|
||||||
|
optional_params = GenericLiteLLMParams(**kwargs)
|
||||||
|
|
||||||
|
### TIMEOUT LOGIC ###
|
||||||
|
timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
|
||||||
|
# set timeout for 10 minutes by default
|
||||||
|
|
||||||
|
if (
|
||||||
|
timeout is not None
|
||||||
|
and isinstance(timeout, httpx.Timeout)
|
||||||
|
and supports_httpx_timeout(custom_llm_provider) == False
|
||||||
|
):
|
||||||
|
read_timeout = timeout.read or 600
|
||||||
|
timeout = read_timeout # default 10 min timeout
|
||||||
|
elif timeout is not None and not isinstance(timeout, httpx.Timeout):
|
||||||
|
timeout = float(timeout) # type: ignore
|
||||||
|
elif timeout is None:
|
||||||
|
timeout = 600.0
|
||||||
|
|
||||||
|
response: Optional[SyncCursorPage[OpenAIMessage]] = None
|
||||||
|
if custom_llm_provider == "openai":
|
||||||
|
api_base = (
|
||||||
|
optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
|
||||||
|
or litellm.api_base
|
||||||
|
or os.getenv("OPENAI_API_BASE")
|
||||||
|
or "https://api.openai.com/v1"
|
||||||
|
)
|
||||||
|
organization = (
|
||||||
|
optional_params.organization
|
||||||
|
or litellm.organization
|
||||||
|
or os.getenv("OPENAI_ORGANIZATION", None)
|
||||||
|
or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105
|
||||||
|
)
|
||||||
|
# set API KEY
|
||||||
|
api_key = (
|
||||||
|
optional_params.api_key
|
||||||
|
or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there
|
||||||
|
or litellm.openai_key
|
||||||
|
or os.getenv("OPENAI_API_KEY")
|
||||||
|
)
|
||||||
|
response = openai_assistants_api.get_messages(
|
||||||
|
thread_id=thread_id,
|
||||||
|
api_base=api_base,
|
||||||
|
api_key=api_key,
|
||||||
|
timeout=timeout,
|
||||||
|
max_retries=optional_params.max_retries,
|
||||||
|
organization=organization,
|
||||||
|
client=client,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise litellm.exceptions.BadRequestError(
|
||||||
|
message="LiteLLM doesn't support {} for 'get_messages'. Only 'openai' is supported.".format(
|
||||||
|
custom_llm_provider
|
||||||
|
),
|
||||||
|
model="n/a",
|
||||||
|
llm_provider=custom_llm_provider,
|
||||||
|
response=httpx.Response(
|
||||||
|
status_code=400,
|
||||||
|
content="Unsupported provider",
|
||||||
|
request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
### RUNS ###
|
### RUNS ###
|
||||||
|
|
||||||
|
|
||||||
|
def run_thread(
|
||||||
|
custom_llm_provider: Literal["openai"],
|
||||||
|
thread_id: str,
|
||||||
|
assistant_id: str,
|
||||||
|
additional_instructions: Optional[str] = None,
|
||||||
|
instructions: Optional[str] = None,
|
||||||
|
metadata: Optional[dict] = None,
|
||||||
|
model: Optional[str] = None,
|
||||||
|
stream: Optional[bool] = None,
|
||||||
|
tools: Optional[Iterable[AssistantToolParam]] = None,
|
||||||
|
client: Optional[OpenAI] = None,
|
||||||
|
**kwargs,
|
||||||
|
) -> Run:
|
||||||
|
"""Run a given thread + assistant."""
|
||||||
|
optional_params = GenericLiteLLMParams(**kwargs)
|
||||||
|
|
||||||
|
### TIMEOUT LOGIC ###
|
||||||
|
timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600
|
||||||
|
# set timeout for 10 minutes by default
|
||||||
|
|
||||||
|
if (
|
||||||
|
timeout is not None
|
||||||
|
and isinstance(timeout, httpx.Timeout)
|
||||||
|
and supports_httpx_timeout(custom_llm_provider) == False
|
||||||
|
):
|
||||||
|
read_timeout = timeout.read or 600
|
||||||
|
timeout = read_timeout # default 10 min timeout
|
||||||
|
elif timeout is not None and not isinstance(timeout, httpx.Timeout):
|
||||||
|
timeout = float(timeout) # type: ignore
|
||||||
|
elif timeout is None:
|
||||||
|
timeout = 600.0
|
||||||
|
|
||||||
|
response: Optional[Run] = None
|
||||||
|
if custom_llm_provider == "openai":
|
||||||
|
api_base = (
|
||||||
|
optional_params.api_base # for deepinfra/perplexity/anyscale/groq we check in get_llm_provider and pass in the api base from there
|
||||||
|
or litellm.api_base
|
||||||
|
or os.getenv("OPENAI_API_BASE")
|
||||||
|
or "https://api.openai.com/v1"
|
||||||
|
)
|
||||||
|
organization = (
|
||||||
|
optional_params.organization
|
||||||
|
or litellm.organization
|
||||||
|
or os.getenv("OPENAI_ORGANIZATION", None)
|
||||||
|
or None # default - https://github.com/openai/openai-python/blob/284c1799070c723c6a553337134148a7ab088dd8/openai/util.py#L105
|
||||||
|
)
|
||||||
|
# set API KEY
|
||||||
|
api_key = (
|
||||||
|
optional_params.api_key
|
||||||
|
or litellm.api_key # for deepinfra/perplexity/anyscale we check in get_llm_provider and pass in the api key from there
|
||||||
|
or litellm.openai_key
|
||||||
|
or os.getenv("OPENAI_API_KEY")
|
||||||
|
)
|
||||||
|
response = openai_assistants_api.run_thread(
|
||||||
|
thread_id=thread_id,
|
||||||
|
assistant_id=assistant_id,
|
||||||
|
additional_instructions=additional_instructions,
|
||||||
|
instructions=instructions,
|
||||||
|
metadata=metadata,
|
||||||
|
model=model,
|
||||||
|
stream=stream,
|
||||||
|
tools=tools,
|
||||||
|
api_base=api_base,
|
||||||
|
api_key=api_key,
|
||||||
|
timeout=timeout,
|
||||||
|
max_retries=optional_params.max_retries,
|
||||||
|
organization=organization,
|
||||||
|
client=client,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
raise litellm.exceptions.BadRequestError(
|
||||||
|
message="LiteLLM doesn't support {} for 'run_thread'. Only 'openai' is supported.".format(
|
||||||
|
custom_llm_provider
|
||||||
|
),
|
||||||
|
model="n/a",
|
||||||
|
llm_provider=custom_llm_provider,
|
||||||
|
response=httpx.Response(
|
||||||
|
status_code=400,
|
||||||
|
content="Unsupported provider",
|
||||||
|
request=httpx.Request(method="create_thread", url="https://github.com/BerriAI/litellm"), # type: ignore
|
||||||
|
),
|
||||||
|
)
|
||||||
|
return response
|
||||||
|
|
|
@ -1282,10 +1282,10 @@ class OpenAIAssistantsAPI(BaseLLM):
|
||||||
|
|
||||||
def get_assistants(
|
def get_assistants(
|
||||||
self,
|
self,
|
||||||
api_key: str,
|
api_key: Optional[str],
|
||||||
api_base: Optional[str],
|
api_base: Optional[str],
|
||||||
timeout: Union[float, httpx.Timeout],
|
timeout: Union[float, httpx.Timeout],
|
||||||
max_retries: int,
|
max_retries: Optional[int],
|
||||||
organization: Optional[str],
|
organization: Optional[str],
|
||||||
client: Optional[OpenAI],
|
client: Optional[OpenAI],
|
||||||
) -> SyncCursorPage[Assistant]:
|
) -> SyncCursorPage[Assistant]:
|
||||||
|
@ -1340,10 +1340,10 @@ class OpenAIAssistantsAPI(BaseLLM):
|
||||||
def get_messages(
|
def get_messages(
|
||||||
self,
|
self,
|
||||||
thread_id: str,
|
thread_id: str,
|
||||||
api_key: str,
|
api_key: Optional[str],
|
||||||
api_base: Optional[str],
|
api_base: Optional[str],
|
||||||
timeout: Union[float, httpx.Timeout],
|
timeout: Union[float, httpx.Timeout],
|
||||||
max_retries: int,
|
max_retries: Optional[int],
|
||||||
organization: Optional[str],
|
organization: Optional[str],
|
||||||
client: Optional[OpenAI] = None,
|
client: Optional[OpenAI] = None,
|
||||||
) -> SyncCursorPage[OpenAIMessage]:
|
) -> SyncCursorPage[OpenAIMessage]:
|
||||||
|
@ -1440,10 +1440,10 @@ class OpenAIAssistantsAPI(BaseLLM):
|
||||||
model: Optional[str],
|
model: Optional[str],
|
||||||
stream: Optional[Literal[False]] | Literal[True],
|
stream: Optional[Literal[False]] | Literal[True],
|
||||||
tools: Optional[Iterable[AssistantToolParam]],
|
tools: Optional[Iterable[AssistantToolParam]],
|
||||||
api_key: str,
|
api_key: Optional[str],
|
||||||
api_base: Optional[str],
|
api_base: Optional[str],
|
||||||
timeout: Union[float, httpx.Timeout],
|
timeout: Union[float, httpx.Timeout],
|
||||||
max_retries: int,
|
max_retries: Optional[int],
|
||||||
organization: Optional[str],
|
organization: Optional[str],
|
||||||
client: Optional[OpenAI],
|
client: Optional[OpenAI],
|
||||||
) -> Run:
|
) -> Run:
|
||||||
|
|
|
@ -68,6 +68,40 @@ def test_add_message_litellm():
|
||||||
assert isinstance(added_message, Message)
|
assert isinstance(added_message, Message)
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_thread_litellm():
|
||||||
|
"""
|
||||||
|
- Get Assistants
|
||||||
|
- Create thread
|
||||||
|
- Create run w/ Assistants + Thread
|
||||||
|
"""
|
||||||
|
assistants = litellm.get_assistants(custom_llm_provider="openai")
|
||||||
|
|
||||||
|
## get the first assistant ###
|
||||||
|
assistant_id = assistants.data[0].id
|
||||||
|
|
||||||
|
new_thread = test_create_thread_litellm()
|
||||||
|
|
||||||
|
thread_id = new_thread.id
|
||||||
|
|
||||||
|
# add message to thread
|
||||||
|
message: MessageData = {"role": "user", "content": "Hey, how's it going?"} # type: ignore
|
||||||
|
added_message = litellm.add_message(
|
||||||
|
thread_id=new_thread.id, custom_llm_provider="openai", **message
|
||||||
|
)
|
||||||
|
|
||||||
|
run = litellm.run_thread(
|
||||||
|
custom_llm_provider="openai", thread_id=thread_id, assistant_id=assistant_id
|
||||||
|
)
|
||||||
|
|
||||||
|
if run.status == "completed":
|
||||||
|
messages = litellm.get_messages(
|
||||||
|
thread_id=new_thread.id, custom_llm_provider="openai"
|
||||||
|
)
|
||||||
|
assert isinstance(messages.data[0], Message)
|
||||||
|
else:
|
||||||
|
pytest.fail("An unexpected error occurred when running the thread")
|
||||||
|
|
||||||
|
|
||||||
def test_run_thread_openai_direct():
|
def test_run_thread_openai_direct():
|
||||||
"""
|
"""
|
||||||
- Get Assistants
|
- Get Assistants
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue