tracing for APIs

This commit is contained in:
Dinesh Yeduguru 2024-11-26 14:07:48 -08:00
parent c2a4850a79
commit af8a1fe5b3
8 changed files with 126 additions and 63 deletions

View file

@ -23,6 +23,7 @@ from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, ConfigDict, Field
from typing_extensions import Annotated
from llama_stack.distribution.tracing import trace_protocol, traced
from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.apis.common.deployment_types import * # noqa: F403
from llama_stack.apis.inference import * # noqa: F403
@ -418,6 +419,7 @@ class AgentStepResponse(BaseModel):
@runtime_checkable
@trace_protocol
class Agents(Protocol):
@webmethod(route="/agents/create")
async def create_agent(
@ -426,6 +428,7 @@ class Agents(Protocol):
) -> AgentCreateResponse: ...
@webmethod(route="/agents/turn/create")
@traced(input="messages")
async def create_agent_turn(
self,
agent_id: str,

View file

@ -21,7 +21,7 @@ from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from typing_extensions import Annotated
from llama_stack.distribution.tracing import trace_protocol
from llama_stack.distribution.tracing import trace_protocol, traced
from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.apis.models import * # noqa: F403
@ -227,6 +227,7 @@ class Inference(Protocol):
model_store: ModelStore
@webmethod(route="/inference/completion")
@traced(input="content")
async def completion(
self,
model_id: str,
@ -238,6 +239,7 @@ class Inference(Protocol):
) -> Union[CompletionResponse, AsyncIterator[CompletionResponseStreamChunk]]: ...
@webmethod(route="/inference/chat-completion")
@traced(input="messages")
async def chat_completion(
self,
model_id: str,
@ -255,6 +257,7 @@ class Inference(Protocol):
]: ...
@webmethod(route="/inference/embeddings")
@traced(input="contents")
async def embeddings(
self,
model_id: str,

View file

@ -16,7 +16,7 @@ from pydantic import BaseModel, Field
from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.apis.memory_banks import * # noqa: F403
from llama_stack.distribution.tracing import trace_protocol
from llama_stack.distribution.tracing import trace_protocol, traced
@json_schema_type
@ -50,6 +50,7 @@ class Memory(Protocol):
# this will just block now until documents are inserted, but it should
# probably return a Job instance which can be polled for completion
@traced(input="documents")
@webmethod(route="/memory/insert")
async def insert_documents(
self,
@ -59,6 +60,7 @@ class Memory(Protocol):
) -> None: ...
@webmethod(route="/memory/query")
@traced(input="query")
async def query_documents(
self,
bank_id: str,

View file

@ -20,6 +20,7 @@ from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, Field
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.distribution.tracing import trace_protocol
@json_schema_type
@ -129,6 +130,7 @@ class MemoryBankInput(BaseModel):
@runtime_checkable
@trace_protocol
class MemoryBanks(Protocol):
@webmethod(route="/memory-banks/list", method="GET")
async def list_memory_banks(self) -> List[MemoryBank]: ...

View file

@ -10,6 +10,7 @@ from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel, ConfigDict, Field
from llama_stack.apis.resource import Resource, ResourceType
from llama_stack.distribution.tracing import trace_protocol
class CommonModelFields(BaseModel):
@ -43,6 +44,7 @@ class ModelInput(CommonModelFields):
@runtime_checkable
@trace_protocol
class Models(Protocol):
@webmethod(route="/models/list", method="GET")
async def list_models(self) -> List[Model]: ...

View file

@ -10,6 +10,8 @@ from typing import Any, Dict, List, Protocol, runtime_checkable
from llama_models.schema_utils import json_schema_type, webmethod
from pydantic import BaseModel
from llama_stack.distribution.tracing import trace_protocol, traced
from llama_models.llama3.api.datatypes import * # noqa: F403
from llama_stack.apis.shields import * # noqa: F403
@ -43,10 +45,12 @@ class ShieldStore(Protocol):
@runtime_checkable
@trace_protocol
class Safety(Protocol):
shield_store: ShieldStore
@webmethod(route="/safety/run-shield")
@traced(input="messages")
async def run_shield(
self,
shield_id: str,