diff --git a/litellm/assistants/main.py b/litellm/assistants/main.py index 33568a5f48..acb37b1e6f 100644 --- a/litellm/assistants/main.py +++ b/litellm/assistants/main.py @@ -131,10 +131,6 @@ def get_assistants( timeout=timeout, max_retries=optional_params.max_retries, organization=organization, - order=getattr(optional_params, "order", "desc"), - limit=getattr(optional_params, "limit", 20), - before=getattr(optional_params, "before", None), - after=getattr(optional_params, "after", None), client=client, aget_assistants=aget_assistants, # type: ignore ) # type: ignore diff --git a/litellm/llms/openai/openai.py b/litellm/llms/openai/openai.py index 1a6a278a1f..a7ab3a72e0 100644 --- a/litellm/llms/openai/openai.py +++ b/litellm/llms/openai/openai.py @@ -1928,10 +1928,6 @@ class OpenAIAssistantsAPI(BaseLLM): max_retries: Optional[int], organization: Optional[str], client: Optional[AsyncOpenAI], - order: Optional[str] = 'desc', - limit: Optional[int] = 20, - before: Optional[str] = None, - after: Optional[str] = None, ) -> AsyncCursorPage[Assistant]: openai_client = self.async_get_openai_client( api_key=api_key, @@ -1941,16 +1937,8 @@ class OpenAIAssistantsAPI(BaseLLM): organization=organization, client=client, ) - request_params = { - "order": order, - "limit": limit, - } - if before: - request_params["before"] = before - if after: - request_params["after"] = after - - response = await openai_client.beta.assistants.list(**request_params) + + response = await openai_client.beta.assistants.list() return response @@ -1965,11 +1953,7 @@ class OpenAIAssistantsAPI(BaseLLM): max_retries: Optional[int], organization: Optional[str], client: Optional[AsyncOpenAI], - aget_assistants: Literal[True], - order: Optional[str] = 'desc', - limit: Optional[int] = 20, - before: Optional[str] = None, - after: Optional[str] = None, + aget_assistants: Literal[True], ) -> Coroutine[None, None, AsyncCursorPage[Assistant]]: ... @@ -1982,11 +1966,7 @@ class OpenAIAssistantsAPI(BaseLLM): max_retries: Optional[int], organization: Optional[str], client: Optional[OpenAI], - aget_assistants: Optional[Literal[False]], - order: Optional[str] = 'desc', - limit: Optional[int] = 20, - before: Optional[str] = None, - after: Optional[str] = None, + aget_assistants: Optional[Literal[False]], ) -> SyncCursorPage[Assistant]: ... @@ -2001,10 +1981,6 @@ class OpenAIAssistantsAPI(BaseLLM): organization: Optional[str], client=None, aget_assistants=None, - order: Optional[str] = 'desc', - limit: Optional[int] = 20, - before: Optional[str] = None, - after: Optional[str] = None, ): if aget_assistants is not None and aget_assistants is True: return self.async_get_assistants( @@ -2014,10 +1990,6 @@ class OpenAIAssistantsAPI(BaseLLM): max_retries=max_retries, organization=organization, client=client, - order=order, - limit=limit, - before=before, - after=after, ) openai_client = self.get_openai_client( api_key=api_key, @@ -2028,18 +2000,7 @@ class OpenAIAssistantsAPI(BaseLLM): client=client, ) - request_params = { - "order": order, - "limit": limit, - } - - if before: - request_params["before"] = before - if after: - request_params["after"] = after - - - response = openai_client.beta.assistants.list(**request_params) + response = openai_client.beta.assistants.list() return response diff --git a/litellm/proxy/proxy_server.py b/litellm/proxy/proxy_server.py index 2435fb92cf..0e614b0ae3 100644 --- a/litellm/proxy/proxy_server.py +++ b/litellm/proxy/proxy_server.py @@ -4434,10 +4434,6 @@ async def get_assistants( request: Request, fastapi_response: Response, user_api_key_dict: UserAPIKeyAuth = Depends(user_api_key_auth), - order: Optional[str] = Query(None, description="Sort order by the created_at timestamp of the objects. asc for ascending order and desc for descending order."), - limit: Optional[int] = Query(None, description="A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20."), - after: Optional[str] = Query(None, description="A cursor for use in pagination. after is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list."), - before: Optional[str] = Query(None, description="A cursor for use in pagination. before is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, starting with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list."), ): """ Returns a list of assistants. @@ -4460,28 +4456,6 @@ async def get_assistants( proxy_config=proxy_config, ) - # Validate `order` parameter - if order and order not in ["asc", "desc"]: - raise HTTPException( - status_code=400, detail={"error": "order must be 'asc' or 'desc'"} - ) - if order: - data["order"] = order - - # Validate `limit` parameter - if limit is not None: - if not (1 <= limit <= 100): - raise HTTPException( - status_code=400, detail={"error": "limit must be between 1 and 100"} - ) - data["limit"] = limit - - # Add pagination cursors if provided - if after: - data["after"] = after - if before: - data["before"] = before - # for now use custom_llm_provider=="openai" -> this will change as LiteLLM adds more providers for acreate_batch if llm_router is None: raise HTTPException(