feat: make telemetry attributes be dict[str,PrimitiveType] (#1055)

# What does this PR do?
Make attributes in telemetry be only primitive types and avoid arbitrary
nesting.

## Test Plan
```
 LLAMA_STACK_DISABLE_VERSION_CHECK=true llama stack run ~/.llama/distributions/fireworks/fireworks-run.yaml
LLAMA_STACK_BASE_URL=http://localhost:8321 pytest -v tests/client-sdk/agents/test_agents.py  -k "test_builtin_tool_web_search"
# Verified that attributes still show up correclty in jaeger
```
This commit is contained in:
Dinesh Yeduguru 2025-02-11 15:10:17 -08:00 committed by GitHub
parent ab7f802698
commit d8a20e034b
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 29 additions and 45 deletions

View file

@ -3148,22 +3148,19 @@
"additionalProperties": { "additionalProperties": {
"oneOf": [ "oneOf": [
{ {
"type": "null" "type": "string"
}, },
{ {
"type": "boolean" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{ {
"type": "string" "type": "boolean"
}, },
{ {
"type": "array" "type": "null"
},
{
"type": "object"
} }
] ]
} }
@ -3683,8 +3680,7 @@
"auto", "auto",
"required" "required"
], ],
"description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model.", "description": "Whether tool use is required or automatic. This is a hint to the model which may not be followed. It depends on the Instruction Following capabilities of the model."
"default": "auto"
}, },
"tool_prompt_format": { "tool_prompt_format": {
"type": "string", "type": "string",
@ -6514,22 +6510,19 @@
"additionalProperties": { "additionalProperties": {
"oneOf": [ "oneOf": [
{ {
"type": "null" "type": "string"
}, },
{ {
"type": "boolean" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{ {
"type": "string" "type": "boolean"
}, },
{ {
"type": "array" "type": "null"
},
{
"type": "object"
} }
] ]
} }
@ -6587,22 +6580,19 @@
"additionalProperties": { "additionalProperties": {
"oneOf": [ "oneOf": [
{ {
"type": "null" "type": "string"
}, },
{ {
"type": "boolean" "type": "integer"
}, },
{ {
"type": "number" "type": "number"
}, },
{ {
"type": "string" "type": "boolean"
}, },
{ {
"type": "array" "type": "null"
},
{
"type": "object"
} }
] ]
} }

View file

@ -1956,12 +1956,11 @@ components:
type: object type: object
additionalProperties: additionalProperties:
oneOf: oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string - type: string
- type: array - type: integer
- type: object - type: number
- type: boolean
- type: 'null'
type: type:
type: string type: string
const: metric const: metric
@ -2387,7 +2386,6 @@ components:
Whether tool use is required or automatic. This is a hint to the model Whether tool use is required or automatic. This is a hint to the model
which may not be followed. It depends on the Instruction Following capabilities which may not be followed. It depends on the Instruction Following capabilities
of the model. of the model.
default: auto
tool_prompt_format: tool_prompt_format:
type: string type: string
enum: enum:
@ -4161,12 +4159,11 @@ components:
type: object type: object
additionalProperties: additionalProperties:
oneOf: oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string - type: string
- type: array - type: integer
- type: object - type: number
- type: boolean
- type: 'null'
type: type:
type: string type: string
const: structured_log const: structured_log
@ -4203,12 +4200,11 @@ components:
type: object type: object
additionalProperties: additionalProperties:
oneOf: oneOf:
- type: 'null'
- type: boolean
- type: number
- type: string - type: string
- type: array - type: integer
- type: object - type: number
- type: boolean
- type: 'null'
type: type:
type: string type: string
const: unstructured_log const: unstructured_log

View file

@ -17,6 +17,7 @@ from typing import (
runtime_checkable, runtime_checkable,
) )
from llama_models.llama3.api.datatypes import Primitive
from llama_models.schema_utils import json_schema_type, register_schema, webmethod from llama_models.schema_utils import json_schema_type, register_schema, webmethod
from pydantic import BaseModel, Field from pydantic import BaseModel, Field
from typing_extensions import Annotated from typing_extensions import Annotated
@ -76,7 +77,7 @@ class EventCommon(BaseModel):
trace_id: str trace_id: str
span_id: str span_id: str
timestamp: datetime timestamp: datetime
attributes: Optional[Dict[str, Any]] = Field(default_factory=dict) attributes: Optional[Dict[str, Primitive]] = Field(default_factory=dict)
@json_schema_type @json_schema_type

View file

@ -9,12 +9,13 @@ import inspect
from functools import wraps from functools import wraps
from typing import Any, AsyncGenerator, Callable, Type, TypeVar from typing import Any, AsyncGenerator, Callable, Type, TypeVar
from llama_models.llama3.api.datatypes import Primitive
from pydantic import BaseModel from pydantic import BaseModel
T = TypeVar("T") T = TypeVar("T")
def serialize_value(value: Any) -> Any: def serialize_value(value: Any) -> Primitive:
"""Serialize a single value into JSON-compatible format.""" """Serialize a single value into JSON-compatible format."""
if value is None: if value is None:
return "" return ""
@ -24,10 +25,6 @@ def serialize_value(value: Any) -> Any:
return value._name_ return value._name_
elif isinstance(value, BaseModel): elif isinstance(value, BaseModel):
return value.model_dump_json() return value.model_dump_json()
elif isinstance(value, (list, tuple, set)):
return [serialize_value(item) for item in value]
elif isinstance(value, dict):
return {str(k): serialize_value(v) for k, v in value.items()}
else: else:
return str(value) return str(value)