mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-26 19:24:27 +00:00
fix langsmith logging test
This commit is contained in:
parent
21b8fdf8bf
commit
e49001c5f2
3 changed files with 9 additions and 27 deletions
|
@ -8,6 +8,7 @@ from datetime import datetime
|
||||||
from typing import Any, List, Optional, Union
|
from typing import Any, List, Optional, Union
|
||||||
|
|
||||||
import dotenv # type: ignore
|
import dotenv # type: ignore
|
||||||
|
import httpx
|
||||||
import requests # type: ignore
|
import requests # type: ignore
|
||||||
from pydantic import BaseModel # type: ignore
|
from pydantic import BaseModel # type: ignore
|
||||||
|
|
||||||
|
@ -59,7 +60,9 @@ class LangsmithLogger(CustomLogger):
|
||||||
self.langsmith_base_url = os.getenv(
|
self.langsmith_base_url = os.getenv(
|
||||||
"LANGSMITH_BASE_URL", "https://api.smith.langchain.com"
|
"LANGSMITH_BASE_URL", "https://api.smith.langchain.com"
|
||||||
)
|
)
|
||||||
self.async_httpx_client = AsyncHTTPHandler()
|
self.async_httpx_client = AsyncHTTPHandler(
|
||||||
|
timeout=httpx.Timeout(timeout=600.0, connect=5.0)
|
||||||
|
)
|
||||||
|
|
||||||
def _prepare_log_data(self, kwargs, response_obj, start_time, end_time):
|
def _prepare_log_data(self, kwargs, response_obj, start_time, end_time):
|
||||||
import datetime
|
import datetime
|
||||||
|
|
|
@ -1820,26 +1820,6 @@
|
||||||
"supports_vision": true,
|
"supports_vision": true,
|
||||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
|
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
|
||||||
},
|
},
|
||||||
"medlm-medium": {
|
|
||||||
"max_tokens": 8192,
|
|
||||||
"max_input_tokens": 32768,
|
|
||||||
"max_output_tokens": 8192,
|
|
||||||
"input_cost_per_character": 0.0000005,
|
|
||||||
"output_cost_per_character": 0.000001,
|
|
||||||
"litellm_provider": "vertex_ai-language-models",
|
|
||||||
"mode": "chat",
|
|
||||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
|
|
||||||
},
|
|
||||||
"medlm-large": {
|
|
||||||
"max_tokens": 1024,
|
|
||||||
"max_input_tokens": 8192,
|
|
||||||
"max_output_tokens": 1024,
|
|
||||||
"input_cost_per_character": 0.000005,
|
|
||||||
"output_cost_per_character": 0.000015,
|
|
||||||
"litellm_provider": "vertex_ai-language-models",
|
|
||||||
"mode": "chat",
|
|
||||||
"source": "https://cloud.google.com/vertex-ai/generative-ai/docs/learn/models#foundation_models"
|
|
||||||
},
|
|
||||||
"vertex_ai/claude-3-sonnet@20240229": {
|
"vertex_ai/claude-3-sonnet@20240229": {
|
||||||
"max_tokens": 4096,
|
"max_tokens": 4096,
|
||||||
"max_input_tokens": 200000,
|
"max_input_tokens": 200000,
|
||||||
|
|
|
@ -20,13 +20,11 @@ verbose_logger.setLevel(logging.DEBUG)
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
import time
|
import time
|
||||||
|
|
||||||
test_langsmith_logger = LangsmithLogger()
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.asyncio()
|
@pytest.mark.asyncio()
|
||||||
async def test_langsmith_logging():
|
async def test_async_langsmith_logging():
|
||||||
try:
|
try:
|
||||||
|
test_langsmith_logger = LangsmithLogger()
|
||||||
run_id = str(uuid.uuid4())
|
run_id = str(uuid.uuid4())
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
litellm.callbacks = ["langsmith"]
|
litellm.callbacks = ["langsmith"]
|
||||||
|
@ -84,7 +82,7 @@ async def test_langsmith_logging():
|
||||||
# test_langsmith_logging()
|
# test_langsmith_logging()
|
||||||
|
|
||||||
|
|
||||||
def test_langsmith_logging_with_metadata():
|
def test_async_langsmith_logging_with_metadata():
|
||||||
try:
|
try:
|
||||||
litellm.success_callback = ["langsmith"]
|
litellm.success_callback = ["langsmith"]
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
|
@ -104,8 +102,9 @@ def test_langsmith_logging_with_metadata():
|
||||||
|
|
||||||
@pytest.mark.parametrize("sync_mode", [False, True])
|
@pytest.mark.parametrize("sync_mode", [False, True])
|
||||||
@pytest.mark.asyncio
|
@pytest.mark.asyncio
|
||||||
async def test_langsmith_logging_with_streaming_and_metadata(sync_mode):
|
async def test_async_langsmith_logging_with_streaming_and_metadata(sync_mode):
|
||||||
try:
|
try:
|
||||||
|
test_langsmith_logger = LangsmithLogger()
|
||||||
litellm.success_callback = ["langsmith"]
|
litellm.success_callback = ["langsmith"]
|
||||||
litellm.set_verbose = True
|
litellm.set_verbose = True
|
||||||
run_id = str(uuid.uuid4())
|
run_id = str(uuid.uuid4())
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue