Litellm dev 01 10 2025 p3 (#7682)

* feat(langfuse.py): log the used prompt when prompt management used

* test: fix test

* docs(self_serve.md): add doc on restricting personal key creation on ui

* feat(s3.py): support s3 logging with team alias prefixes (if available)

New preview feature

* fix(main.py): remove old if block - simplify to just await if coroutine returned

fixes lm_studio async embedding error

* fix(langfuse.py): handle get prompt check
This commit is contained in:
Krish Dholakia 2025-01-10 21:56:42 -08:00 committed by GitHub
parent e54d23c919
commit 953c021aa7
11 changed files with 148 additions and 112 deletions

View file

@ -36,16 +36,15 @@ class OpenAILikeEmbeddingHandler(OpenAILikeBase):
) -> EmbeddingResponse:
response = None
try:
if client is None or isinstance(client, AsyncHTTPHandler):
self.async_client = get_async_httpx_client(
if client is None or not isinstance(client, AsyncHTTPHandler):
async_client = get_async_httpx_client(
llm_provider=litellm.LlmProviders.OPENAI,
params={"timeout": timeout},
)
else:
self.async_client = client
async_client = client
try:
response = await self.async_client.post(
response = await async_client.post(
api_base,
headers=headers,
data=json.dumps(data),