feat(proxy_server.py): return litellm version in response headers

This commit is contained in:
Krrish Dholakia 2024-05-08 16:00:08 -07:00
parent 1a18480225
commit 5f93cae3ff
50 changed files with 260 additions and 140 deletions

View file

@ -1,15 +1,14 @@
#### What this does ####
# On success, logs events to Langsmith
import dotenv, os
import requests
import requests
import dotenv, os # type: ignore
import requests # type: ignore
from datetime import datetime
dotenv.load_dotenv() # Loading env variables using dotenv
import traceback
import asyncio
import types
from pydantic import BaseModel
from pydantic import BaseModel # type: ignore
def is_serializable(value):
@ -79,8 +78,6 @@ class LangsmithLogger:
except:
response_obj = response_obj.dict() # type: ignore
print(f"response_obj: {response_obj}")
data = {
"name": run_name,
"run_type": "llm", # this should always be llm, since litellm always logs llm calls. Langsmith allow us to log "chain"
@ -90,7 +87,6 @@ class LangsmithLogger:
"start_time": start_time,
"end_time": end_time,
}
print(f"data: {data}")
response = requests.post(
"https://api.smith.langchain.com/runs",