From 8e42eb624b3427de051a4589c2d26e9d5c520ae7 Mon Sep 17 00:00:00 2001 From: nick-rackauckas Date: Thu, 13 Jun 2024 15:00:16 -0700 Subject: [PATCH 01/18] Fix file type handling of uppercase extensions --- litellm/types/files.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/litellm/types/files.py b/litellm/types/files.py index 2da08e76c4..577b9b55cc 100644 --- a/litellm/types/files.py +++ b/litellm/types/files.py @@ -151,23 +151,16 @@ Util Functions """ -def get_file_mime_type_from_extension(extension: str) -> str: - for file_type, extensions in FILE_EXTENSIONS.items(): - if extension in extensions: - return FILE_MIME_TYPES[file_type] - raise ValueError(f"Unknown mime type for extension: {extension}") - - def get_file_extension_from_mime_type(mime_type: str) -> str: for file_type, mime in FILE_MIME_TYPES.items(): - if mime == mime_type: + if mime.lower() == mime_type.lower(): return FILE_EXTENSIONS[file_type][0] raise ValueError(f"Unknown extension for mime type: {mime_type}") def get_file_type_from_extension(extension: str) -> FileType: for file_type, extensions in FILE_EXTENSIONS.items(): - if extension in extensions: + if extension.lower() in extensions: return file_type raise ValueError(f"Unknown file type for extension: {extension}") @@ -181,6 +174,11 @@ def get_file_mime_type_for_file_type(file_type: FileType) -> str: return FILE_MIME_TYPES[file_type] +def get_file_mime_type_from_extension(extension: str) -> str: + file_type = get_file_type_from_extension(extension) + return get_file_mime_type_for_file_type(file_type) + + """ FileType Type Groupings (Videos, Images, etc) """ From 6cffd0b62de2c0c263972c7050813461e8e1f2c1 Mon Sep 17 00:00:00 2001 From: = <=> Date: Fri, 14 Jun 2024 15:19:11 +0200 Subject: [PATCH 02/18] Add mock_tool_calls --- litellm/main.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/litellm/main.py b/litellm/main.py index 31cb8e364f..90feb9aa85 100644 --- a/litellm/main.py +++ b/litellm/main.py @@ -41,6 +41,7 @@ from litellm.utils import ( get_optional_params_embeddings, get_optional_params_image_gen, supports_httpx_timeout, + ChatCompletionMessageToolCall, ) from .llms import ( anthropic_text, @@ -398,6 +399,7 @@ def mock_completion( messages: List, stream: Optional[bool] = False, mock_response: Union[str, Exception] = "This is a mock request", + mock_tool_calls: Optional[List] = None, logging=None, **kwargs, ): @@ -465,6 +467,11 @@ def mock_completion( model_response["created"] = int(time.time()) model_response["model"] = model + if mock_tool_calls: + model_response["choices"][0]["message"]["tool_calls"] = [ + ChatCompletionMessageToolCall(**tool_call) for tool_call in mock_tool_calls + ] + setattr( model_response, "usage", @@ -578,6 +585,7 @@ def completion( args = locals() api_base = kwargs.get("api_base", None) mock_response = kwargs.get("mock_response", None) + mock_tool_calls = kwargs.get("mock_tool_calls", None) force_timeout = kwargs.get("force_timeout", 600) ## deprecated logger_fn = kwargs.get("logger_fn", None) verbose = kwargs.get("verbose", False) @@ -896,12 +904,13 @@ def completion( litellm_params=litellm_params, custom_llm_provider=custom_llm_provider, ) - if mock_response: + if mock_response or mock_tool_calls: return mock_completion( model, messages, stream=stream, mock_response=mock_response, + mock_tool_calls=mock_tool_calls, logging=logging, acompletion=acompletion, mock_delay=kwargs.get("mock_delay", None), From 464a6e6256f9f532585471dc826dcb2c9468dd13 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 17 Jun 2024 17:21:43 -0700 Subject: [PATCH 03/18] fix - refactor logfire --- litellm/litellm_core_utils/litellm_logging.py | 54 +++---------------- litellm/proxy/proxy_config.yaml | 2 +- 2 files changed, 9 insertions(+), 47 deletions(-) diff --git a/litellm/litellm_core_utils/litellm_logging.py b/litellm/litellm_core_utils/litellm_logging.py index f54bf1d2ff..a66d978e62 100644 --- a/litellm/litellm_core_utils/litellm_logging.py +++ b/litellm/litellm_core_utils/litellm_logging.py @@ -64,6 +64,7 @@ from ..integrations.litedebugger import LiteDebugger from ..integrations.logfire_logger import LogfireLevel, LogfireLogger from ..integrations.lunary import LunaryLogger from ..integrations.openmeter import OpenMeterLogger +from ..integrations.opentelemetry import OpenTelemetry, OpenTelemetryConfig from ..integrations.prometheus import PrometheusLogger from ..integrations.prometheus_services import PrometheusServicesLogger from ..integrations.prompt_layer import PromptLayerLogger @@ -664,32 +665,6 @@ class Logging: end_time=end_time, print_verbose=print_verbose, ) - if callback == "logfire": - global logfireLogger - verbose_logger.debug("reaches logfire for success logging!") - kwargs = {} - for k, v in self.model_call_details.items(): - if ( - k != "original_response" - ): # copy.deepcopy raises errors as this could be a coroutine - kwargs[k] = v - - # this only logs streaming once, complete_streaming_response exists i.e when stream ends - if self.stream: - if "complete_streaming_response" not in kwargs: - continue - else: - print_verbose("reaches logfire for streaming logging!") - result = kwargs["complete_streaming_response"] - - logfireLogger.log_event( - kwargs=self.model_call_details, - response_obj=result, - start_time=start_time, - end_time=end_time, - print_verbose=print_verbose, - level=LogfireLevel.INFO.value, - ) if callback == "lunary": print_verbose("reaches lunary for logging!") @@ -1594,24 +1569,6 @@ class Logging: print_verbose=print_verbose, ) - if callback == "logfire": - verbose_logger.debug("reaches logfire for failure logging!") - kwargs = {} - for k, v in self.model_call_details.items(): - if ( - k != "original_response" - ): # copy.deepcopy raises errors as this could be a coroutine - kwargs[k] = v - kwargs["exception"] = exception - - logfireLogger.log_event( - kwargs=kwargs, - response_obj=result, - start_time=start_time, - end_time=end_time, - level=LogfireLevel.ERROR.value, - print_verbose=print_verbose, - ) except Exception as e: print_verbose( f"LiteLLM.LoggingError: [Non-Blocking] Exception occurred while failure logging with integrations {str(e)}" @@ -1754,8 +1711,6 @@ def set_callbacks(callback_list, function_id=None): weightsBiasesLogger = WeightsBiasesLogger() elif callback == "langsmith": langsmithLogger = LangsmithLogger() - elif callback == "logfire": - logfireLogger = LogfireLogger() elif callback == "aispend": aispendLogger = AISpendLogger() elif callback == "berrispend": @@ -1789,3 +1744,10 @@ def _init_custom_logger_compatible_class( return LagoLogger() # type: ignore elif logging_integration == "openmeter": return OpenMeterLogger() # type: ignore + elif logging_integration == "logfire": + otel_config = OpenTelemetryConfig( + exporter="otlp_http", + endpoint="https://logfire-api.pydantic.dev/v1/traces", + headers=f"Authorization={os.getenv('LOGFIRE_TOKEN')}", + ) + return OpenTelemetry(config=otel_config) # type: ignore diff --git a/litellm/proxy/proxy_config.yaml b/litellm/proxy/proxy_config.yaml index 27d49a33dd..d5190455f1 100644 --- a/litellm/proxy/proxy_config.yaml +++ b/litellm/proxy/proxy_config.yaml @@ -24,9 +24,9 @@ general_settings: litellm_settings: success_callback: ["prometheus"] + callbacks: ["otel"] failure_callback: ["prometheus"] store_audit_logs: true - turn_off_message_logging: true redact_messages_in_exceptions: True enforced_params: - user From a4b1b9a3aca416789853d9f1b161027baa546830 Mon Sep 17 00:00:00 2001 From: Ishaan Jaff Date: Mon, 17 Jun 2024 17:31:58 -0700 Subject: [PATCH 04/18] update logging docs --- .../docs/observability/athina_integration.md | 9 +++++++++ .../docs/observability/greenscale_integration.md | 9 +++++++++ .../docs/observability/helicone_integration.md | 9 +++++++++ .../docs/observability/langfuse_integration.md | 2 +- .../docs/observability/langsmith_integration.md | 10 ++++++++++ .../docs/observability/logfire_integration.md | 2 +- .../docs/observability/lunary_integration.md | 8 ++++++++ .../docs/observability/promptlayer_integration.md | 9 +++++++++ docs/my-website/docs/observability/sentry.md | 9 +++++++++ .../docs/observability/supabase_integration.md | 8 ++++++++ .../my-website/docs/observability/wandb_integration.md | 10 ++++++++++ docs/my-website/sidebars.js | 8 ++++---- 12 files changed, 87 insertions(+), 6 deletions(-) diff --git a/docs/my-website/docs/observability/athina_integration.md b/docs/my-website/docs/observability/athina_integration.md index 62c8897518..cd1442f35a 100644 --- a/docs/my-website/docs/observability/athina_integration.md +++ b/docs/my-website/docs/observability/athina_integration.md @@ -2,6 +2,15 @@ import Image from '@theme/IdealImage'; # Athina + +:::tip + +This is community maintained, Please make an issue if you run into a bug +https://github.com/BerriAI/litellm + +::: + + [Athina](https://athina.ai/) is an evaluation framework and production monitoring platform for your LLM-powered app. Athina is designed to enhance the performance and reliability of AI applications through real-time monitoring, granular analytics, and plug-and-play evaluations. diff --git a/docs/my-website/docs/observability/greenscale_integration.md b/docs/my-website/docs/observability/greenscale_integration.md index 0dd673226e..49eadc6453 100644 --- a/docs/my-website/docs/observability/greenscale_integration.md +++ b/docs/my-website/docs/observability/greenscale_integration.md @@ -1,5 +1,14 @@ # Greenscale - Track LLM Spend and Responsible Usage + +:::tip + +This is community maintained, Please make an issue if you run into a bug +https://github.com/BerriAI/litellm + +::: + + [Greenscale](https://greenscale.ai/) is a production monitoring platform for your LLM-powered app that provides you granular key insights into your GenAI spending and responsible usage. Greenscale only captures metadata to minimize the exposure risk of personally identifiable information (PII). ## Getting Started diff --git a/docs/my-website/docs/observability/helicone_integration.md b/docs/my-website/docs/observability/helicone_integration.md index de89ba8da8..f7fd330c30 100644 --- a/docs/my-website/docs/observability/helicone_integration.md +++ b/docs/my-website/docs/observability/helicone_integration.md @@ -1,4 +1,13 @@ # Helicone Tutorial + +:::tip + +This is community maintained, Please make an issue if you run into a bug +https://github.com/BerriAI/litellm + +::: + + [Helicone](https://helicone.ai/) is an open source observability platform that proxies your OpenAI traffic and provides you key insights into your spend, latency and usage. ## Use Helicone to log requests across all LLM Providers (OpenAI, Azure, Anthropic, Cohere, Replicate, PaLM) diff --git a/docs/my-website/docs/observability/langfuse_integration.md b/docs/my-website/docs/observability/langfuse_integration.md index 0fb411dd3f..dbd03620dc 100644 --- a/docs/my-website/docs/observability/langfuse_integration.md +++ b/docs/my-website/docs/observability/langfuse_integration.md @@ -1,6 +1,6 @@ import Image from '@theme/IdealImage'; -# Langfuse - Logging LLM Input/Output +# 🔥 Langfuse - Logging LLM Input/Output LangFuse is open Source Observability & Analytics for LLM Apps Detailed production traces and a granular view on quality, cost and latency diff --git a/docs/my-website/docs/observability/langsmith_integration.md b/docs/my-website/docs/observability/langsmith_integration.md index b115866d54..c038abd821 100644 --- a/docs/my-website/docs/observability/langsmith_integration.md +++ b/docs/my-website/docs/observability/langsmith_integration.md @@ -1,6 +1,16 @@ import Image from '@theme/IdealImage'; # Langsmith - Logging LLM Input/Output + + +:::tip + +This is community maintained, Please make an issue if you run into a bug +https://github.com/BerriAI/litellm + +::: + + An all-in-one developer platform for every step of the application lifecycle https://smith.langchain.com/ diff --git a/docs/my-website/docs/observability/logfire_integration.md b/docs/my-website/docs/observability/logfire_integration.md index c1f425f425..55c15295aa 100644 --- a/docs/my-website/docs/observability/logfire_integration.md +++ b/docs/my-website/docs/observability/logfire_integration.md @@ -1,6 +1,6 @@ import Image from '@theme/IdealImage'; -# Logfire - Logging LLM Input/Output +# 🔥 Logfire - Logging LLM Input/Output Logfire is open Source Observability & Analytics for LLM Apps Detailed production traces and a granular view on quality, cost and latency diff --git a/docs/my-website/docs/observability/lunary_integration.md b/docs/my-website/docs/observability/lunary_integration.md index 9b8e90df7b..56e74132f7 100644 --- a/docs/my-website/docs/observability/lunary_integration.md +++ b/docs/my-website/docs/observability/lunary_integration.md @@ -1,5 +1,13 @@ # Lunary - Logging and tracing LLM input/output +:::tip + +This is community maintained, Please make an issue if you run into a bug +https://github.com/BerriAI/litellm + +::: + + [Lunary](https://lunary.ai/) is an open-source AI developer platform providing observability, prompt management, and evaluation tools for AI developers.