mirror of
https://github.com/BerriAI/litellm.git
synced 2025-04-24 18:24:20 +00:00
Litellm add managed files db (#9930)
* fix(openai.py): ensure openai file object shows up on logs * fix(managed_files.py): return unified file id as b64 str allows retrieve file id to work as expected * fix(managed_files.py): apply decoded file id transformation * fix: add unit test for file id + decode logic * fix: initial commit for litellm_proxy support with CRUD Endpoints * fix(managed_files.py): support retrieve file operation * fix(managed_files.py): support for DELETE endpoint for files * fix(managed_files.py): retrieve file content support supports retrieve file content api from openai * fix: fix linting error * test: update tests * fix: fix linting error * feat(managed_files.py): support reading / writing files in DB * feat(managed_files.py): support deleting file from DB on delete * test: update testing * fix(spend_tracking_utils.py): ensure each file create request is logged correctly * fix(managed_files.py): fix storing / returning managed file object from cache * fix(files/main.py): pass litellm params to azure route * test: fix test * build: add new prisma migration * build: bump requirements * test: add more testing * refactor: cleanup post merge w/ main * fix: fix code qa errors
This commit is contained in:
parent
93037ea4d3
commit
421e0a3004
19 changed files with 286 additions and 158 deletions
BIN
litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4-py3-none-any.whl
vendored
Normal file
BIN
litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4-py3-none-any.whl
vendored
Normal file
Binary file not shown.
BIN
litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4.tar.gz
vendored
Normal file
BIN
litellm-proxy-extras/dist/litellm_proxy_extras-0.1.4.tar.gz
vendored
Normal file
Binary file not shown.
|
@ -0,0 +1,18 @@
|
|||
-- CreateTable
|
||||
CREATE TABLE "LiteLLM_ManagedFileTable" (
|
||||
"id" TEXT NOT NULL,
|
||||
"unified_file_id" TEXT NOT NULL,
|
||||
"file_object" JSONB NOT NULL,
|
||||
"model_mappings" JSONB NOT NULL,
|
||||
"created_at" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
"updated_at" TIMESTAMP(3) NOT NULL,
|
||||
|
||||
CONSTRAINT "LiteLLM_ManagedFileTable_pkey" PRIMARY KEY ("id")
|
||||
);
|
||||
|
||||
-- CreateIndex
|
||||
CREATE UNIQUE INDEX "LiteLLM_ManagedFileTable_unified_file_id_key" ON "LiteLLM_ManagedFileTable"("unified_file_id");
|
||||
|
||||
-- CreateIndex
|
||||
CREATE INDEX "LiteLLM_ManagedFileTable_unified_file_id_idx" ON "LiteLLM_ManagedFileTable"("unified_file_id");
|
||||
|
|
@ -354,3 +354,14 @@ enum JobStatus {
|
|||
INACTIVE
|
||||
}
|
||||
|
||||
model LiteLLM_ManagedFileTable {
|
||||
id String @id @default(uuid())
|
||||
unified_file_id String @unique // The base64 encoded unified file ID
|
||||
file_object Json // Stores the OpenAIFileObject
|
||||
model_mappings Json // Stores the mapping of model_id -> provider_file_id
|
||||
created_at DateTime @default(now())
|
||||
updated_at DateTime @updatedAt
|
||||
|
||||
@@index([unified_file_id])
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
[tool.poetry]
|
||||
name = "litellm-proxy-extras"
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
description = "Additional files for the LiteLLM Proxy. Reduces the size of the main litellm package."
|
||||
authors = ["BerriAI"]
|
||||
readme = "README.md"
|
||||
|
@ -22,7 +22,7 @@ requires = ["poetry-core"]
|
|||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.commitizen]
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
version_files = [
|
||||
"pyproject.toml:version",
|
||||
"../requirements.txt:litellm-proxy-extras==",
|
||||
|
|
|
@ -313,13 +313,20 @@ def get_format_from_file_id(file_id: Optional[str]) -> Optional[str]:
|
|||
unified_file_id = litellm_proxy:{};unified_id,{}
|
||||
If not a unified file id, returns 'file' as default format
|
||||
"""
|
||||
from litellm.proxy.hooks.managed_files import _PROXY_LiteLLMManagedFiles
|
||||
|
||||
if not file_id:
|
||||
return None
|
||||
try:
|
||||
if file_id.startswith(SpecialEnums.LITELM_MANAGED_FILE_ID_PREFIX.value):
|
||||
transformed_file_id = (
|
||||
_PROXY_LiteLLMManagedFiles._convert_b64_uid_to_unified_uid(file_id)
|
||||
)
|
||||
if transformed_file_id.startswith(
|
||||
SpecialEnums.LITELM_MANAGED_FILE_ID_PREFIX.value
|
||||
):
|
||||
match = re.match(
|
||||
f"{SpecialEnums.LITELM_MANAGED_FILE_ID_PREFIX.value}:(.*?);unified_id",
|
||||
file_id,
|
||||
transformed_file_id,
|
||||
)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
@ -343,6 +350,7 @@ def update_messages_with_model_file_ids(
|
|||
}
|
||||
}
|
||||
"""
|
||||
|
||||
for message in messages:
|
||||
if message.get("role") == "user":
|
||||
content = message.get("content")
|
||||
|
|
|
@ -16,7 +16,7 @@ from pydantic import (
|
|||
from typing_extensions import Required, TypedDict
|
||||
|
||||
from litellm.types.integrations.slack_alerting import AlertType
|
||||
from litellm.types.llms.openai import AllMessageValues
|
||||
from litellm.types.llms.openai import AllMessageValues, OpenAIFileObject
|
||||
from litellm.types.router import RouterErrors, UpdateRouterConfig
|
||||
from litellm.types.utils import (
|
||||
CallTypes,
|
||||
|
@ -144,6 +144,7 @@ class LitellmTableNames(str, enum.Enum):
|
|||
USER_TABLE_NAME = "LiteLLM_UserTable"
|
||||
KEY_TABLE_NAME = "LiteLLM_VerificationToken"
|
||||
PROXY_MODEL_TABLE_NAME = "LiteLLM_ProxyModelTable"
|
||||
MANAGED_FILE_TABLE_NAME = "LiteLLM_ManagedFileTable"
|
||||
|
||||
|
||||
class Litellm_EntityType(enum.Enum):
|
||||
|
@ -2795,3 +2796,9 @@ class SpendUpdateQueueItem(TypedDict, total=False):
|
|||
entity_type: Litellm_EntityType
|
||||
entity_id: str
|
||||
response_cost: Optional[float]
|
||||
|
||||
|
||||
class LiteLLM_ManagedFileTable(LiteLLMPydanticObjectBase):
|
||||
unified_file_id: str
|
||||
file_object: OpenAIFileObject
|
||||
model_mappings: Dict[str, str]
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
# What is this?
|
||||
## This hook is used to check for LiteLLM managed files in the request body, and replace them with model-specific file id
|
||||
|
||||
import asyncio
|
||||
import base64
|
||||
import json
|
||||
import uuid
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import TYPE_CHECKING, Any, Dict, List, Literal, Optional, Union, cast
|
||||
|
@ -11,7 +11,7 @@ from litellm import Router, verbose_logger
|
|||
from litellm.caching.caching import DualCache
|
||||
from litellm.integrations.custom_logger import CustomLogger
|
||||
from litellm.litellm_core_utils.prompt_templates.common_utils import extract_file_data
|
||||
from litellm.proxy._types import CallTypes, UserAPIKeyAuth
|
||||
from litellm.proxy._types import CallTypes, LiteLLM_ManagedFileTable, UserAPIKeyAuth
|
||||
from litellm.types.llms.openai import (
|
||||
AllMessageValues,
|
||||
ChatCompletionFileObject,
|
||||
|
@ -19,7 +19,7 @@ from litellm.types.llms.openai import (
|
|||
OpenAIFileObject,
|
||||
OpenAIFilesPurpose,
|
||||
)
|
||||
from litellm.types.utils import LLMResponseTypes, SpecialEnums
|
||||
from litellm.types.utils import SpecialEnums
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from opentelemetry.trace import Span as _Span
|
||||
|
@ -71,44 +71,73 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
file_id: str,
|
||||
file_object: OpenAIFileObject,
|
||||
litellm_parent_otel_span: Optional[Span],
|
||||
model_mappings: Dict[str, str],
|
||||
) -> None:
|
||||
key = f"litellm_proxy/{file_id}"
|
||||
verbose_logger.info(
|
||||
f"Storing LiteLLM Managed File object with id={file_id} in cache"
|
||||
)
|
||||
litellm_managed_file_object = LiteLLM_ManagedFileTable(
|
||||
unified_file_id=file_id,
|
||||
file_object=file_object,
|
||||
model_mappings=model_mappings,
|
||||
)
|
||||
await self.internal_usage_cache.async_set_cache(
|
||||
key=key,
|
||||
value=file_object,
|
||||
key=file_id,
|
||||
value=litellm_managed_file_object.model_dump(),
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
)
|
||||
|
||||
await self.prisma_client.db.litellm_managedfiletable.create(
|
||||
data={
|
||||
"unified_file_id": file_id,
|
||||
"file_object": file_object.model_dump_json(),
|
||||
"model_mappings": json.dumps(model_mappings),
|
||||
}
|
||||
)
|
||||
|
||||
async def get_unified_file_id(
|
||||
self, file_id: str, litellm_parent_otel_span: Optional[Span] = None
|
||||
) -> Optional[OpenAIFileObject]:
|
||||
key = f"litellm_proxy/{file_id}"
|
||||
return await self.internal_usage_cache.async_get_cache(
|
||||
key=key,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
) -> Optional[LiteLLM_ManagedFileTable]:
|
||||
## CHECK CACHE
|
||||
result = cast(
|
||||
Optional[dict],
|
||||
await self.internal_usage_cache.async_get_cache(
|
||||
key=file_id,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
),
|
||||
)
|
||||
|
||||
if result:
|
||||
return LiteLLM_ManagedFileTable(**result)
|
||||
|
||||
## CHECK DB
|
||||
db_object = await self.prisma_client.db.litellm_managedfiletable.find_first(
|
||||
where={"unified_file_id": file_id}
|
||||
)
|
||||
|
||||
if db_object:
|
||||
return LiteLLM_ManagedFileTable(**db_object.model_dump())
|
||||
return None
|
||||
|
||||
async def delete_unified_file_id(
|
||||
self, file_id: str, litellm_parent_otel_span: Optional[Span] = None
|
||||
) -> OpenAIFileObject:
|
||||
key = f"litellm_proxy/{file_id}"
|
||||
## get old value
|
||||
old_value = await self.internal_usage_cache.async_get_cache(
|
||||
key=key,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
initial_value = await self.prisma_client.db.litellm_managedfiletable.find_first(
|
||||
where={"unified_file_id": file_id}
|
||||
)
|
||||
if old_value is None or not isinstance(old_value, OpenAIFileObject):
|
||||
if initial_value is None:
|
||||
raise Exception(f"LiteLLM Managed File object with id={file_id} not found")
|
||||
## delete old value
|
||||
await self.internal_usage_cache.async_set_cache(
|
||||
key=key,
|
||||
key=file_id,
|
||||
value=None,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
)
|
||||
return old_value
|
||||
await self.prisma_client.db.litellm_managedfiletable.delete(
|
||||
where={"unified_file_id": file_id}
|
||||
)
|
||||
return initial_value.file_object
|
||||
|
||||
async def async_pre_call_hook(
|
||||
self,
|
||||
|
@ -133,11 +162,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
if call_type == CallTypes.completion.value:
|
||||
messages = data.get("messages")
|
||||
if messages:
|
||||
file_ids = (
|
||||
self.get_file_ids_and_decode_b64_to_unified_uid_from_messages(
|
||||
messages
|
||||
)
|
||||
)
|
||||
file_ids = self.get_file_ids_from_messages(messages)
|
||||
if file_ids:
|
||||
model_file_id_mapping = await self.get_model_file_id_mapping(
|
||||
file_ids, user_api_key_dict.parent_otel_span
|
||||
|
@ -147,9 +172,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
|
||||
return data
|
||||
|
||||
def get_file_ids_and_decode_b64_to_unified_uid_from_messages(
|
||||
self, messages: List[AllMessageValues]
|
||||
) -> List[str]:
|
||||
def get_file_ids_from_messages(self, messages: List[AllMessageValues]) -> List[str]:
|
||||
"""
|
||||
Gets file ids from messages
|
||||
"""
|
||||
|
@ -166,16 +189,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
file_object_file_field = file_object["file"]
|
||||
file_id = file_object_file_field.get("file_id")
|
||||
if file_id:
|
||||
file_ids.append(
|
||||
_PROXY_LiteLLMManagedFiles._convert_b64_uid_to_unified_uid(
|
||||
file_id
|
||||
)
|
||||
)
|
||||
file_object_file_field[
|
||||
"file_id"
|
||||
] = _PROXY_LiteLLMManagedFiles._convert_b64_uid_to_unified_uid(
|
||||
file_id
|
||||
)
|
||||
file_ids.append(file_id)
|
||||
return file_ids
|
||||
|
||||
@staticmethod
|
||||
|
@ -236,45 +250,82 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
is_base64_unified_file_id = self._is_base64_encoded_unified_file_id(file_id)
|
||||
|
||||
if is_base64_unified_file_id:
|
||||
litellm_managed_file_ids.append(is_base64_unified_file_id)
|
||||
elif file_id.startswith(SpecialEnums.LITELM_MANAGED_FILE_ID_PREFIX.value):
|
||||
litellm_managed_file_ids.append(file_id)
|
||||
|
||||
if litellm_managed_file_ids:
|
||||
# Get all cache keys matching the pattern file_id:*
|
||||
for file_id in litellm_managed_file_ids:
|
||||
# Search for any cache key starting with this file_id
|
||||
cached_values = cast(
|
||||
Dict[str, str],
|
||||
await self.internal_usage_cache.async_get_cache(
|
||||
key=file_id, litellm_parent_otel_span=litellm_parent_otel_span
|
||||
),
|
||||
unified_file_object = await self.get_unified_file_id(
|
||||
file_id, litellm_parent_otel_span
|
||||
)
|
||||
if cached_values:
|
||||
file_id_mapping[file_id] = cached_values
|
||||
if unified_file_object:
|
||||
file_id_mapping[file_id] = unified_file_object.model_mappings
|
||||
|
||||
return file_id_mapping
|
||||
|
||||
async def async_post_call_success_hook(
|
||||
async def create_file_for_each_model(
|
||||
self,
|
||||
data: Dict,
|
||||
user_api_key_dict: UserAPIKeyAuth,
|
||||
response: LLMResponseTypes,
|
||||
) -> Any:
|
||||
if isinstance(response, OpenAIFileObject):
|
||||
asyncio.create_task(
|
||||
self.store_unified_file_id(
|
||||
response.id, response, user_api_key_dict.parent_otel_span
|
||||
)
|
||||
llm_router: Optional[Router],
|
||||
_create_file_request: CreateFileRequest,
|
||||
target_model_names_list: List[str],
|
||||
litellm_parent_otel_span: Span,
|
||||
) -> List[OpenAIFileObject]:
|
||||
if llm_router is None:
|
||||
raise Exception("LLM Router not initialized. Ensure models added to proxy.")
|
||||
responses = []
|
||||
for model in target_model_names_list:
|
||||
individual_response = await llm_router.acreate_file(
|
||||
model=model, **_create_file_request
|
||||
)
|
||||
responses.append(individual_response)
|
||||
|
||||
return None
|
||||
return responses
|
||||
|
||||
async def acreate_file(
|
||||
self,
|
||||
create_file_request: CreateFileRequest,
|
||||
llm_router: Router,
|
||||
target_model_names_list: List[str],
|
||||
litellm_parent_otel_span: Span,
|
||||
) -> OpenAIFileObject:
|
||||
responses = await self.create_file_for_each_model(
|
||||
llm_router=llm_router,
|
||||
_create_file_request=create_file_request,
|
||||
target_model_names_list=target_model_names_list,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
)
|
||||
response = await _PROXY_LiteLLMManagedFiles.return_unified_file_id(
|
||||
file_objects=responses,
|
||||
create_file_request=create_file_request,
|
||||
internal_usage_cache=self.internal_usage_cache,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
)
|
||||
|
||||
## STORE MODEL MAPPINGS IN DB
|
||||
model_mappings: Dict[str, str] = {}
|
||||
for file_object in responses:
|
||||
model_id = file_object._hidden_params.get("model_id")
|
||||
if model_id is None:
|
||||
verbose_logger.warning(
|
||||
f"Skipping file_object: {file_object} because model_id in hidden_params={file_object._hidden_params} is None"
|
||||
)
|
||||
continue
|
||||
file_id = file_object.id
|
||||
model_mappings[model_id] = file_id
|
||||
|
||||
await self.store_unified_file_id(
|
||||
file_id=response.id,
|
||||
file_object=response,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
model_mappings=model_mappings,
|
||||
)
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
async def return_unified_file_id(
|
||||
file_objects: List[OpenAIFileObject],
|
||||
create_file_request: CreateFileRequest,
|
||||
purpose: OpenAIFilesPurpose,
|
||||
internal_usage_cache: InternalUsageCache,
|
||||
litellm_parent_otel_span: Span,
|
||||
) -> OpenAIFileObject:
|
||||
|
@ -297,30 +348,13 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
response = OpenAIFileObject(
|
||||
id=base64_unified_file_id,
|
||||
object="file",
|
||||
purpose=cast(OpenAIFilesPurpose, purpose),
|
||||
purpose=create_file_request["purpose"],
|
||||
created_at=file_objects[0].created_at,
|
||||
bytes=file_objects[0].bytes,
|
||||
filename=file_objects[0].filename,
|
||||
status="uploaded",
|
||||
)
|
||||
|
||||
## STORE RESPONSE IN DB + CACHE
|
||||
stored_values: Dict[str, str] = {}
|
||||
for file_object in file_objects:
|
||||
model_id = file_object._hidden_params.get("model_id")
|
||||
if model_id is None:
|
||||
verbose_logger.warning(
|
||||
f"Skipping file_object: {file_object} because model_id in hidden_params={file_object._hidden_params} is None"
|
||||
)
|
||||
continue
|
||||
file_id = file_object.id
|
||||
stored_values[model_id] = file_id
|
||||
await internal_usage_cache.async_set_cache(
|
||||
key=unified_file_id,
|
||||
value=stored_values,
|
||||
litellm_parent_otel_span=litellm_parent_otel_span,
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
async def afile_retrieve(
|
||||
|
@ -330,7 +364,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
file_id, litellm_parent_otel_span
|
||||
)
|
||||
if stored_file_object:
|
||||
return stored_file_object
|
||||
return stored_file_object.file_object
|
||||
else:
|
||||
raise Exception(f"LiteLLM Managed File object with id={file_id} not found")
|
||||
|
||||
|
@ -376,12 +410,11 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
"""
|
||||
Get the content of a file from first model that has it
|
||||
"""
|
||||
initial_file_id = file_id
|
||||
unified_file_id = self.convert_b64_uid_to_unified_uid(file_id)
|
||||
model_file_id_mapping = await self.get_model_file_id_mapping(
|
||||
[unified_file_id], litellm_parent_otel_span
|
||||
[file_id], litellm_parent_otel_span
|
||||
)
|
||||
specific_model_file_id_mapping = model_file_id_mapping.get(unified_file_id)
|
||||
specific_model_file_id_mapping = model_file_id_mapping.get(file_id)
|
||||
|
||||
if specific_model_file_id_mapping:
|
||||
exception_dict = {}
|
||||
for model_id, file_id in specific_model_file_id_mapping.items():
|
||||
|
@ -390,9 +423,7 @@ class _PROXY_LiteLLMManagedFiles(CustomLogger):
|
|||
except Exception as e:
|
||||
exception_dict[model_id] = str(e)
|
||||
raise Exception(
|
||||
f"LiteLLM Managed File object with id={initial_file_id} not found. Checked model id's: {specific_model_file_id_mapping.keys()}. Errors: {exception_dict}"
|
||||
f"LiteLLM Managed File object with id={file_id} not found. Checked model id's: {specific_model_file_id_mapping.keys()}. Errors: {exception_dict}"
|
||||
)
|
||||
else:
|
||||
raise Exception(
|
||||
f"LiteLLM Managed File object with id={initial_file_id} not found"
|
||||
)
|
||||
raise Exception(f"LiteLLM Managed File object with id={file_id} not found")
|
||||
|
|
|
@ -128,37 +128,6 @@ async def _deprecated_loadbalanced_create_file(
|
|||
return response
|
||||
|
||||
|
||||
async def create_file_for_each_model(
|
||||
llm_router: Optional[Router],
|
||||
_create_file_request: CreateFileRequest,
|
||||
target_model_names_list: List[str],
|
||||
purpose: OpenAIFilesPurpose,
|
||||
proxy_logging_obj: ProxyLogging,
|
||||
user_api_key_dict: UserAPIKeyAuth,
|
||||
) -> OpenAIFileObject:
|
||||
if llm_router is None:
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail={
|
||||
"error": "LLM Router not initialized. Ensure models added to proxy."
|
||||
},
|
||||
)
|
||||
responses = []
|
||||
for model in target_model_names_list:
|
||||
individual_response = await llm_router.acreate_file(
|
||||
model=model, **_create_file_request
|
||||
)
|
||||
responses.append(individual_response)
|
||||
response = await _PROXY_LiteLLMManagedFiles.return_unified_file_id(
|
||||
file_objects=responses,
|
||||
create_file_request=_create_file_request,
|
||||
purpose=purpose,
|
||||
internal_usage_cache=proxy_logging_obj.internal_usage_cache,
|
||||
litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
|
||||
)
|
||||
return response
|
||||
|
||||
|
||||
async def route_create_file(
|
||||
llm_router: Optional[Router],
|
||||
_create_file_request: CreateFileRequest,
|
||||
|
@ -181,13 +150,29 @@ async def route_create_file(
|
|||
_create_file_request=_create_file_request,
|
||||
)
|
||||
elif target_model_names_list:
|
||||
response = await create_file_for_each_model(
|
||||
managed_files_obj = cast(
|
||||
Optional[_PROXY_LiteLLMManagedFiles],
|
||||
proxy_logging_obj.get_proxy_hook("managed_files"),
|
||||
)
|
||||
if managed_files_obj is None:
|
||||
raise ProxyException(
|
||||
message="Managed files hook not found",
|
||||
type="None",
|
||||
param="None",
|
||||
code=500,
|
||||
)
|
||||
if llm_router is None:
|
||||
raise ProxyException(
|
||||
message="LLM Router not found",
|
||||
type="None",
|
||||
param="None",
|
||||
code=500,
|
||||
)
|
||||
response = await managed_files_obj.acreate_file(
|
||||
llm_router=llm_router,
|
||||
_create_file_request=_create_file_request,
|
||||
create_file_request=_create_file_request,
|
||||
target_model_names_list=target_model_names_list,
|
||||
purpose=purpose,
|
||||
proxy_logging_obj=proxy_logging_obj,
|
||||
user_api_key_dict=user_api_key_dict,
|
||||
litellm_parent_otel_span=user_api_key_dict.parent_otel_span,
|
||||
)
|
||||
else:
|
||||
# get configs for custom_llm_provider
|
||||
|
|
|
@ -354,3 +354,14 @@ enum JobStatus {
|
|||
INACTIVE
|
||||
}
|
||||
|
||||
model LiteLLM_ManagedFileTable {
|
||||
id String @id @default(uuid())
|
||||
unified_file_id String @unique // The base64 encoded unified file ID
|
||||
file_object Json // Stores the OpenAIFileObject
|
||||
model_mappings Json // Stores the mapping of model_id -> provider_file_id
|
||||
created_at DateTime @default(now())
|
||||
updated_at DateTime @updatedAt
|
||||
|
||||
@@index([unified_file_id])
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ def generate_hash_from_response(response_obj: Any) -> str:
|
|||
def get_spend_logs_id(
|
||||
call_type: str, response_obj: dict, kwargs: dict
|
||||
) -> Optional[str]:
|
||||
if call_type == "aretrieve_batch":
|
||||
if call_type == "aretrieve_batch" or call_type == "acreate_file":
|
||||
# Generate a hash from the response object
|
||||
id: Optional[str] = generate_hash_from_response(response_obj)
|
||||
else:
|
||||
|
|
3
litellm/types/proxy/README.md
Normal file
3
litellm/types/proxy/README.md
Normal file
|
@ -0,0 +1,3 @@
|
|||
These are file-specific types for the proxy.
|
||||
|
||||
For Types you expect to be used across the proxy, put them in `litellm/proxy/_types.py`
|
34
poetry.lock
generated
34
poetry.lock
generated
|
@ -1307,13 +1307,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "httpcore"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
description = "A minimal low-level HTTP client."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
|
||||
{file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
|
||||
{file = "httpcore-1.0.8-py3-none-any.whl", hash = "sha256:5254cf149bcb5f75e9d1b2b9f729ea4a4b883d1ad7379fc632b727cec23674be"},
|
||||
{file = "httpcore-1.0.8.tar.gz", hash = "sha256:86e94505ed24ea06514883fd44d2bc02d90e77e7979c8eb71b90f41d364a1bad"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -1363,13 +1363,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "huggingface-hub"
|
||||
version = "0.30.1"
|
||||
version = "0.30.2"
|
||||
description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
|
||||
optional = false
|
||||
python-versions = ">=3.8.0"
|
||||
files = [
|
||||
{file = "huggingface_hub-0.30.1-py3-none-any.whl", hash = "sha256:0f6aa5ec5a4e68e5b9e45d556b4e5ea180c58f5a5ffa734e7f38c9d573028959"},
|
||||
{file = "huggingface_hub-0.30.1.tar.gz", hash = "sha256:f379e8b8d0791295602538856638460ae3cf679c7f304201eb80fb98c771950e"},
|
||||
{file = "huggingface_hub-0.30.2-py3-none-any.whl", hash = "sha256:68ff05969927058cfa41df4f2155d4bb48f5f54f719dd0390103eefa9b191e28"},
|
||||
{file = "huggingface_hub-0.30.2.tar.gz", hash = "sha256:9a7897c5b6fd9dad3168a794a8998d6378210f5b9688d0dfc180b1a228dc2466"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -1680,13 +1680,13 @@ referencing = ">=0.31.0"
|
|||
|
||||
[[package]]
|
||||
name = "litellm-proxy-extras"
|
||||
version = "0.1.3"
|
||||
version = "0.1.4"
|
||||
description = "Additional files for the LiteLLM Proxy. Reduces the size of the main litellm package."
|
||||
optional = true
|
||||
python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
|
||||
files = [
|
||||
{file = "litellm_proxy_extras-0.1.3-py3-none-any.whl", hash = "sha256:7025e876d866776304a1171612c6676714426ae15ae36840cbf5481df8686283"},
|
||||
{file = "litellm_proxy_extras-0.1.3.tar.gz", hash = "sha256:4df7036592f4d434db841a2b19c64c9bc50b9a80de45afc94c409b81698db8c3"},
|
||||
{file = "litellm_proxy_extras-0.1.4-py3-none-any.whl", hash = "sha256:fdf9c4acf844cfe63e6f3ab17ca700b526d37e0b865b96a992ec99e28b7e3d59"},
|
||||
{file = "litellm_proxy_extras-0.1.4.tar.gz", hash = "sha256:1b88b295f5e9684f9982ebe5d93da55a8f80ba96302281613f4d91477cec45ac"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -2180,13 +2180,13 @@ signedtoken = ["cryptography (>=3.0.0)", "pyjwt (>=2.0.0,<3)"]
|
|||
|
||||
[[package]]
|
||||
name = "openai"
|
||||
version = "1.70.0"
|
||||
version = "1.72.0"
|
||||
description = "The official Python library for the openai API"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "openai-1.70.0-py3-none-any.whl", hash = "sha256:f6438d053fd8b2e05fd6bef70871e832d9bbdf55e119d0ac5b92726f1ae6f614"},
|
||||
{file = "openai-1.70.0.tar.gz", hash = "sha256:e52a8d54c3efeb08cf58539b5b21a5abef25368b5432965e4de88cdf4e091b2b"},
|
||||
{file = "openai-1.72.0-py3-none-any.whl", hash = "sha256:34f5496ba5c8cb06c592831d69e847e2d164526a2fb92afdc3b5cf2891c328c3"},
|
||||
{file = "openai-1.72.0.tar.gz", hash = "sha256:f51de971448905cc90ed5175a5b19e92fd94e31f68cde4025762f9f5257150db"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
|
@ -2201,7 +2201,7 @@ typing-extensions = ">=4.11,<5"
|
|||
|
||||
[package.extras]
|
||||
datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
|
||||
realtime = ["websockets (>=13,<15)"]
|
||||
realtime = ["websockets (>=13,<16)"]
|
||||
voice-helpers = ["numpy (>=2.0.2)", "sounddevice (>=0.5.1)"]
|
||||
|
||||
[[package]]
|
||||
|
@ -3763,13 +3763,13 @@ files = [
|
|||
|
||||
[[package]]
|
||||
name = "typing-extensions"
|
||||
version = "4.13.1"
|
||||
version = "4.13.2"
|
||||
description = "Backported and Experimental Type Hints for Python 3.8+"
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "typing_extensions-4.13.1-py3-none-any.whl", hash = "sha256:4b6cf02909eb5495cfbc3f6e8fd49217e6cc7944e145cdda8caa3734777f9e69"},
|
||||
{file = "typing_extensions-4.13.1.tar.gz", hash = "sha256:98795af00fb9640edec5b8e31fc647597b4691f099ad75f469a2616be1a76dff"},
|
||||
{file = "typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c"},
|
||||
{file = "typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
@ -4151,4 +4151,4 @@ proxy = ["PyJWT", "apscheduler", "backoff", "boto3", "cryptography", "fastapi",
|
|||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = ">=3.8.1,<4.0, !=3.9.7"
|
||||
content-hash = "ee904a214356eedc35187d8efb0ceeec92ce23f552ff4785324aea3e919b0b46"
|
||||
content-hash = "2f4fd9493faeb5c30d5afc15b6babfc42c5ba28930888b30c6ebaa264e683ed6"
|
||||
|
|
|
@ -55,7 +55,7 @@ websockets = {version = "^13.1.0", optional = true}
|
|||
boto3 = {version = "1.34.34", optional = true}
|
||||
redisvl = {version = "^0.4.1", optional = true, markers = "python_version >= '3.9' and python_version < '3.14'"}
|
||||
mcp = {version = "1.5.0", optional = true, python = ">=3.10"}
|
||||
litellm-proxy-extras = {version = "0.1.3", optional = true}
|
||||
litellm-proxy-extras = {version = "0.1.4", optional = true}
|
||||
|
||||
[tool.poetry.extras]
|
||||
proxy = [
|
||||
|
|
|
@ -37,7 +37,7 @@ sentry_sdk==2.21.0 # for sentry error handling
|
|||
detect-secrets==1.5.0 # Enterprise - secret detection / masking in LLM requests
|
||||
cryptography==43.0.1
|
||||
tzdata==2025.1 # IANA time zone database
|
||||
litellm-proxy-extras==0.1.3 # for proxy extras - e.g. prisma migrations
|
||||
litellm-proxy-extras==0.1.4 # for proxy extras - e.g. prisma migrations
|
||||
|
||||
### LITELLM PACKAGE DEPENDENCIES
|
||||
python-dotenv==1.0.0 # for env
|
||||
|
|
|
@ -354,3 +354,14 @@ enum JobStatus {
|
|||
INACTIVE
|
||||
}
|
||||
|
||||
model LiteLLM_ManagedFileTable {
|
||||
id String @id @default(uuid())
|
||||
unified_file_id String @unique // The base64 encoded unified file ID
|
||||
file_object Json // Stores the OpenAIFileObject
|
||||
model_mappings Json // Stores the mapping of model_id -> provider_file_id
|
||||
created_at DateTime @default(now())
|
||||
updated_at DateTime @updatedAt
|
||||
|
||||
@@index([unified_file_id])
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ from litellm.litellm_core_utils.prompt_templates.common_utils import (
|
|||
)
|
||||
|
||||
|
||||
def test_update_messages_with_model_file_ids():
|
||||
def test_get_format_from_file_id():
|
||||
unified_file_id = (
|
||||
"litellm_proxy:application/pdf;unified_id,cbbe3534-8bf8-4386-af00-f5f6b7e370bf"
|
||||
)
|
||||
|
@ -23,3 +23,44 @@ def test_update_messages_with_model_file_ids():
|
|||
format = get_format_from_file_id(unified_file_id)
|
||||
|
||||
assert format == "application/pdf"
|
||||
|
||||
|
||||
def test_update_messages_with_model_file_ids():
|
||||
file_id = "bGl0ZWxsbV9wcm94eTphcHBsaWNhdGlvbi9wZGY7dW5pZmllZF9pZCxmYzdmMmVhNS0wZjUwLTQ5ZjYtODljMS03ZTZhNTRiMTIxMzg"
|
||||
model_id = "my_model_id"
|
||||
messages = [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What is in this recording?"},
|
||||
{
|
||||
"type": "file",
|
||||
"file": {
|
||||
"file_id": file_id,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
]
|
||||
|
||||
model_file_id_mapping = {file_id: {"my_model_id": "provider_file_id"}}
|
||||
|
||||
updated_messages = update_messages_with_model_file_ids(
|
||||
messages, model_id, model_file_id_mapping
|
||||
)
|
||||
|
||||
assert updated_messages == [
|
||||
{
|
||||
"role": "user",
|
||||
"content": [
|
||||
{"type": "text", "text": "What is in this recording?"},
|
||||
{
|
||||
"type": "file",
|
||||
"file": {
|
||||
"file_id": "provider_file_id",
|
||||
"format": "application/pdf",
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
|
|
|
@ -16,7 +16,7 @@ from litellm.proxy.hooks.managed_files import _PROXY_LiteLLMManagedFiles
|
|||
from litellm.types.utils import SpecialEnums
|
||||
|
||||
|
||||
def test_get_file_ids_and_decode_b64_to_unified_uid_from_messages():
|
||||
def test_get_file_ids_from_messages():
|
||||
proxy_managed_files = _PROXY_LiteLLMManagedFiles(
|
||||
DualCache(), prisma_client=MagicMock()
|
||||
)
|
||||
|
@ -34,20 +34,11 @@ def test_get_file_ids_and_decode_b64_to_unified_uid_from_messages():
|
|||
],
|
||||
},
|
||||
]
|
||||
file_ids = (
|
||||
proxy_managed_files.get_file_ids_and_decode_b64_to_unified_uid_from_messages(
|
||||
messages
|
||||
)
|
||||
)
|
||||
file_ids = proxy_managed_files.get_file_ids_from_messages(messages)
|
||||
assert file_ids == [
|
||||
"litellm_proxy:application/pdf;unified_id,fc7f2ea5-0f50-49f6-89c1-7e6a54b12138"
|
||||
"bGl0ZWxsbV9wcm94eTphcHBsaWNhdGlvbi9wZGY7dW5pZmllZF9pZCxmYzdmMmVhNS0wZjUwLTQ5ZjYtODljMS03ZTZhNTRiMTIxMzg"
|
||||
]
|
||||
|
||||
## in place update
|
||||
assert messages[0]["content"][1]["file"]["file_id"].startswith(
|
||||
SpecialEnums.LITELM_MANAGED_FILE_ID_PREFIX.value
|
||||
)
|
||||
|
||||
|
||||
# def test_list_managed_files():
|
||||
# proxy_managed_files = _PROXY_LiteLLMManagedFiles(DualCache())
|
||||
|
|
|
@ -104,10 +104,20 @@ def test_mock_create_audio_file(mocker: MockerFixture, monkeypatch, llm_router:
|
|||
Asserts 'create_file' is called with the correct arguments
|
||||
"""
|
||||
from litellm import Router
|
||||
from litellm.proxy.utils import ProxyLogging
|
||||
|
||||
mock_create_file = mocker.patch("litellm.files.main.create_file")
|
||||
|
||||
proxy_logging_obj = ProxyLogging(
|
||||
user_api_key_cache=DualCache(default_in_memory_ttl=1)
|
||||
)
|
||||
|
||||
proxy_logging_obj._add_proxy_hooks(llm_router)
|
||||
|
||||
monkeypatch.setattr("litellm.proxy.proxy_server.llm_router", llm_router)
|
||||
monkeypatch.setattr(
|
||||
"litellm.proxy.proxy_server.proxy_logging_obj", proxy_logging_obj
|
||||
)
|
||||
|
||||
# Create a simple test file content
|
||||
test_file_content = b"test audio content"
|
||||
|
@ -306,6 +316,7 @@ def test_create_file_and_call_chat_completion_e2e(
|
|||
mock.stop()
|
||||
|
||||
|
||||
@pytest.mark.skip(reason="function migrated to litellm/proxy/hooks/managed_files.py")
|
||||
def test_create_file_for_each_model(
|
||||
mocker: MockerFixture, monkeypatch, llm_router: Router
|
||||
):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue