diff --git a/litellm/integrations/langsmith.py b/litellm/integrations/langsmith.py index b727c69e03..1ef90c1822 100644 --- a/litellm/integrations/langsmith.py +++ b/litellm/integrations/langsmith.py @@ -351,6 +351,16 @@ class LangsmithLogger(CustomBatchLogger): queue_objects=batch_group.queue_objects, ) + def _add_endpoint_to_url( + self, url: str, endpoint: str, api_version: str = "/api/v1" + ) -> str: + if api_version not in url: + url = f"{url.rstrip('/')}{api_version}" + + if url.endswith("/"): + return f"{url}{endpoint}" + return f"{url}/{endpoint}" + async def _log_batch_on_langsmith( self, credentials: LangsmithCredentialsObject, @@ -370,7 +380,7 @@ class LangsmithLogger(CustomBatchLogger): """ langsmith_api_base = credentials["LANGSMITH_BASE_URL"] langsmith_api_key = credentials["LANGSMITH_API_KEY"] - url = f"{langsmith_api_base}/runs/batch" + url = self._add_endpoint_to_url(langsmith_api_base, "runs/batch") headers = {"x-api-key": langsmith_api_key} elements_to_log = [queue_object["data"] for queue_object in queue_objects] diff --git a/litellm/proxy/_new_secret_config.yaml b/litellm/proxy/_new_secret_config.yaml index 209e86149d..b3969844a7 100644 --- a/litellm/proxy/_new_secret_config.yaml +++ b/litellm/proxy/_new_secret_config.yaml @@ -7,7 +7,9 @@ model_list: id: "1" - model_name: gpt-3.5-turbo-end-user-test litellm_params: - model: openai/random_sleep - api_base: http://0.0.0.0:8090 + model: gpt-3.5-turbo timeout: 2 num_retries: 0 + +litellm_settings: + callbacks: ["langsmith"] \ No newline at end of file