diff --git a/litellm/assistants/main.py b/litellm/assistants/main.py index 1104f511c3..25d502c8b5 100644 --- a/litellm/assistants/main.py +++ b/litellm/assistants/main.py @@ -339,6 +339,90 @@ def create_assistants( return response +def delete_assistant( + custom_llm_provider: Literal["openai", "azure"], + assistant_id: str, + client: Optional[Any] = None, + api_key: Optional[str] = None, + api_base: Optional[str] = None, + api_version: Optional[str] = None, + **kwargs, +) -> Dict[str, Any]: + optional_params = GenericLiteLLMParams( + api_key=api_key, api_base=api_base, api_version=api_version, **kwargs + ) + + ### TIMEOUT LOGIC ### + timeout = optional_params.timeout or kwargs.get("request_timeout", 600) or 600 + # set timeout for 10 minutes by default + + if ( + timeout is not None + and isinstance(timeout, httpx.Timeout) + and supports_httpx_timeout(custom_llm_provider) == False + ): + read_timeout = timeout.read or 600 + timeout = read_timeout # default 10 min timeout + elif timeout is not None and not isinstance(timeout, httpx.Timeout): + timeout = float(timeout) # type: ignore + elif timeout is None: + timeout = 600.0 + + response: Optional[Dict[str, Any]] = None + if custom_llm_provider == "openai": + api_base = ( + optional_params.api_base + or litellm.api_base + or os.getenv("OPENAI_API_BASE") + or "https://api.openai.com/v1" + ) + organization = ( + optional_params.organization + or litellm.organization + or os.getenv("OPENAI_ORGANIZATION", None) + or None + ) + # set API KEY + api_key = ( + optional_params.api_key + or litellm.api_key + or litellm.openai_key + or os.getenv("OPENAI_API_KEY") + ) + + response = openai_assistants_api.delete_assistant( + api_base=api_base, + api_key=api_key, + timeout=timeout, + max_retries=optional_params.max_retries, + organization=organization, + assistant_id=assistant_id, + client=client, + ) + else: + raise litellm.exceptions.BadRequestError( + message="LiteLLM doesn't support {} for 'delete_assistant'. Only 'openai' is supported.".format( + custom_llm_provider + ), + model="n/a", + llm_provider=custom_llm_provider, + response=httpx.Response( + status_code=400, + content="Unsupported provider", + request=httpx.Request( + method="delete_assistant", url="https://github.com/BerriAI/litellm" + ), + ), + ) + if response is None: + raise litellm.exceptions.InternalServerError( + message="No response returned from 'delete_assistant'", + model="n/a", + llm_provider=custom_llm_provider, + ) + return response + + ### THREADS ### diff --git a/litellm/llms/openai.py b/litellm/llms/openai.py index 65d4b421ef..f5c7aef48d 100644 --- a/litellm/llms/openai.py +++ b/litellm/llms/openai.py @@ -2440,6 +2440,38 @@ class OpenAIAssistantsAPI(BaseLLM): response = openai_client.beta.assistants.create(**create_assistant_data) return response + def delete_assistant( + self, + api_key: Optional[str], + api_base: Optional[str], + timeout: Union[float, httpx.Timeout], + max_retries: Optional[int], + organization: Optional[str], + assistant_id: str, + client=None, + async_create_assistants=None, + ): + # if async_create_assistants is not None and async_create_assistants == True: + # return self.async_create_assistants( + # api_key=api_key, + # api_base=api_base, + # timeout=timeout, + # max_retries=max_retries, + # organization=organization, + # client=client, + # ) + openai_client = self.get_openai_client( + api_key=api_key, + api_base=api_base, + timeout=timeout, + max_retries=max_retries, + organization=organization, + client=client, + ) + + response = openai_client.beta.assistants.delete(assistant_id=assistant_id) + return response + ### MESSAGES ### async def a_add_message(