mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-25 09:05:37 +00:00
Some checks failed
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 0s
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 1s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Integration Tests (Replay) / Integration Tests (, , , client=, ) (push) Failing after 3s
Python Package Build Test / build (3.12) (push) Failing after 2s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 4s
Python Package Build Test / build (3.13) (push) Failing after 3s
Test External API and Providers / test-external (venv) (push) Failing after 4s
Vector IO Integration Tests / test-matrix (push) Failing after 6s
Unit Tests / unit-tests (3.12) (push) Failing after 4s
Unit Tests / unit-tests (3.13) (push) Failing after 4s
API Conformance Tests / check-schema-compatibility (push) Successful in 14s
UI Tests / ui-tests (22) (push) Successful in 43s
Pre-commit / pre-commit (push) Successful in 1m35s
# What does this PR do? Clean up telemetry code since the telemetry API has been remove. - moved telemetry files out of providers to core - removed from Api ## Test Plan ❯ OTEL_SERVICE_NAME=llama_stack OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 uv run llama stack run starter ❯ curl http://localhost:8321/v1/chat/completions \ -H "Content-Type: application/json" \ -d '{ "model": "openai/gpt-4o-mini", "messages": [ { "role": "user", "content": "Hello!" } ] }' -> verify traces in Grafana CI
52 lines
1.9 KiB
Python
52 lines
1.9 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import asyncio
|
|
|
|
from llama_stack.apis.inference import Message
|
|
from llama_stack.apis.safety import Safety, SafetyViolation, ViolationLevel
|
|
from llama_stack.core.telemetry import tracing
|
|
from llama_stack.log import get_logger
|
|
|
|
log = get_logger(name=__name__, category="agents::meta_reference")
|
|
|
|
|
|
class SafetyException(Exception): # noqa: N818
|
|
def __init__(self, violation: SafetyViolation):
|
|
self.violation = violation
|
|
super().__init__(violation.user_message)
|
|
|
|
|
|
class ShieldRunnerMixin:
|
|
def __init__(
|
|
self,
|
|
safety_api: Safety,
|
|
input_shields: list[str] | None = None,
|
|
output_shields: list[str] | None = None,
|
|
):
|
|
self.safety_api = safety_api
|
|
self.input_shields = input_shields
|
|
self.output_shields = output_shields
|
|
|
|
async def run_multiple_shields(self, messages: list[Message], identifiers: list[str]) -> None:
|
|
async def run_shield_with_span(identifier: str):
|
|
async with tracing.span(f"run_shield_{identifier}"):
|
|
return await self.safety_api.run_shield(
|
|
shield_id=identifier,
|
|
messages=messages,
|
|
params={},
|
|
)
|
|
|
|
responses = await asyncio.gather(*[run_shield_with_span(identifier) for identifier in identifiers])
|
|
for identifier, response in zip(identifiers, responses, strict=False):
|
|
if not response.violation:
|
|
continue
|
|
|
|
violation = response.violation
|
|
if violation.violation_level == ViolationLevel.ERROR:
|
|
raise SafetyException(violation)
|
|
elif violation.violation_level == ViolationLevel.WARN:
|
|
log.warning(f"[Warn]{identifier} raised a warning")
|