forked from phoenix-oss/llama-stack-mirror
# What does this PR do? - as title, cleaning up `import *`'s - upgrade tests to make them more robust to bad model outputs - remove import *'s in llama_stack/apis/* (skip __init__ modules) <img width="465" alt="image" src="https://github.com/user-attachments/assets/d8339c13-3b40-4ba5-9c53-0d2329726ee2" /> - run `sh run_openapi_generator.sh`, no types gets affected ## Test Plan ### Providers Tests **agents** ``` pytest -v -s llama_stack/providers/tests/agents/test_agents.py -m "together" --safety-shield meta-llama/Llama-Guard-3-8B --inference-model meta-llama/Llama-3.1-405B-Instruct-FP8 ``` **inference** ```bash # meta-reference torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py torchrun $CONDA_PREFIX/bin/pytest -v -s -k "meta_reference" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py # together pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.1-8B-Instruct" ./llama_stack/providers/tests/inference/test_text_inference.py pytest -v -s -k "together" --inference-model="meta-llama/Llama-3.2-11B-Vision-Instruct" ./llama_stack/providers/tests/inference/test_vision_inference.py pytest ./llama_stack/providers/tests/inference/test_prompt_adapter.py ``` **safety** ``` pytest -v -s llama_stack/providers/tests/safety/test_safety.py -m together --safety-shield meta-llama/Llama-Guard-3-8B ``` **memory** ``` pytest -v -s llama_stack/providers/tests/memory/test_memory.py -m "sentence_transformers" --env EMBEDDING_DIMENSION=384 ``` **scoring** ``` pytest -v -s -m llm_as_judge_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py --judge-model meta-llama/Llama-3.2-3B-Instruct pytest -v -s -m basic_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py pytest -v -s -m braintrust_scoring_together_inference llama_stack/providers/tests/scoring/test_scoring.py ``` **datasetio** ``` pytest -v -s -m localfs llama_stack/providers/tests/datasetio/test_datasetio.py pytest -v -s -m huggingface llama_stack/providers/tests/datasetio/test_datasetio.py ``` **eval** ``` pytest -v -s -m meta_reference_eval_together_inference llama_stack/providers/tests/eval/test_eval.py pytest -v -s -m meta_reference_eval_together_inference_huggingface_datasetio llama_stack/providers/tests/eval/test_eval.py ``` ### Client-SDK Tests ``` LLAMA_STACK_BASE_URL=http://localhost:5000 pytest -v ./tests/client-sdk ``` ### llama-stack-apps ``` PORT=5000 LOCALHOST=localhost python -m examples.agents.hello $LOCALHOST $PORT python -m examples.agents.inflation $LOCALHOST $PORT python -m examples.agents.podcast_transcript $LOCALHOST $PORT python -m examples.agents.rag_as_attachments $LOCALHOST $PORT python -m examples.agents.rag_with_memory_bank $LOCALHOST $PORT python -m examples.safety.llama_guard_demo_mm $LOCALHOST $PORT python -m examples.agents.e2e_loop_with_custom_tools $LOCALHOST $PORT # Vision model python -m examples.interior_design_assistant.app python -m examples.agent_store.app $LOCALHOST $PORT ``` ### CLI ``` which llama llama model prompt-format -m Llama3.2-11B-Vision-Instruct llama model list llama stack list-apis llama stack list-providers inference llama stack build --template ollama --image-type conda ``` ### Distributions Tests **ollama** ``` llama stack build --template ollama --image-type conda ollama run llama3.2:1b-instruct-fp16 llama stack run ./llama_stack/templates/ollama/run.yaml --env INFERENCE_MODEL=meta-llama/Llama-3.2-1B-Instruct ``` **fireworks** ``` llama stack build --template fireworks --image-type conda llama stack run ./llama_stack/templates/fireworks/run.yaml ``` **together** ``` llama stack build --template together --image-type conda llama stack run ./llama_stack/templates/together/run.yaml ``` **tgi** ``` llama stack run ./llama_stack/templates/tgi/run.yaml --env TGI_URL=http://0.0.0.0:5009 --env INFERENCE_MODEL=meta-llama/Llama-3.1-8B-Instruct ``` ## Sources Please link relevant resources if necessary. ## Before submitting - [ ] This PR fixes a typo or improves the docs (you can dismiss the other checks if that's the case). - [ ] Ran pre-commit to handle lint / formatting issues. - [ ] Read the [contributor guideline](https://github.com/meta-llama/llama-stack/blob/main/CONTRIBUTING.md), Pull Request section? - [ ] Updated relevant documentation. - [ ] Wrote necessary unit or integration tests.
187 lines
7 KiB
Python
187 lines
7 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Optional
|
|
|
|
from llama_models.llama3.api.datatypes import ToolPromptFormat
|
|
from llama_models.llama3.api.tool_utils import ToolUtils
|
|
from termcolor import cprint
|
|
|
|
from llama_stack.apis.agents import AgentTurnResponseEventType, StepType
|
|
|
|
from llama_stack.apis.inference import ToolResponseMessage
|
|
|
|
|
|
class LogEvent:
|
|
def __init__(
|
|
self,
|
|
role: Optional[str] = None,
|
|
content: str = "",
|
|
end: str = "\n",
|
|
color="white",
|
|
):
|
|
self.role = role
|
|
self.content = content
|
|
self.color = color
|
|
self.end = "\n" if end is None else end
|
|
|
|
def __str__(self):
|
|
if self.role is not None:
|
|
return f"{self.role}> {self.content}"
|
|
else:
|
|
return f"{self.content}"
|
|
|
|
def print(self, flush=True):
|
|
cprint(f"{str(self)}", color=self.color, end=self.end, flush=flush)
|
|
|
|
|
|
EventType = AgentTurnResponseEventType
|
|
|
|
|
|
class EventLogger:
|
|
async def log(
|
|
self,
|
|
event_generator,
|
|
stream=True,
|
|
tool_prompt_format: ToolPromptFormat = ToolPromptFormat.json,
|
|
):
|
|
previous_event_type = None
|
|
previous_step_type = None
|
|
|
|
async for chunk in event_generator:
|
|
if not hasattr(chunk, "event"):
|
|
# Need to check for custom tool first
|
|
# since it does not produce event but instead
|
|
# a Message
|
|
if isinstance(chunk, ToolResponseMessage):
|
|
yield chunk, LogEvent(
|
|
role="CustomTool", content=chunk.content, color="grey"
|
|
)
|
|
continue
|
|
|
|
event = chunk.event
|
|
event_type = event.payload.event_type
|
|
if event_type in {
|
|
EventType.turn_start.value,
|
|
EventType.turn_complete.value,
|
|
}:
|
|
# Currently not logging any turn realted info
|
|
yield event, None
|
|
continue
|
|
|
|
step_type = event.payload.step_type
|
|
# handle safety
|
|
if (
|
|
step_type == StepType.shield_call
|
|
and event_type == EventType.step_complete.value
|
|
):
|
|
violation = event.payload.step_details.violation
|
|
if not violation:
|
|
yield event, LogEvent(
|
|
role=step_type, content="No Violation", color="magenta"
|
|
)
|
|
else:
|
|
yield event, LogEvent(
|
|
role=step_type,
|
|
content=f"{violation.metadata} {violation.user_message}",
|
|
color="red",
|
|
)
|
|
|
|
# handle inference
|
|
if step_type == StepType.inference:
|
|
if stream:
|
|
if event_type == EventType.step_start.value:
|
|
# TODO: Currently this event is never received
|
|
yield event, LogEvent(
|
|
role=step_type, content="", end="", color="yellow"
|
|
)
|
|
elif event_type == EventType.step_progress.value:
|
|
# HACK: if previous was not step/event was not inference's step_progress
|
|
# this is the first time we are getting model inference response
|
|
# aka equivalent to step_start for inference. Hence,
|
|
# start with "Model>".
|
|
if (
|
|
previous_event_type != EventType.step_progress.value
|
|
and previous_step_type != StepType.inference
|
|
):
|
|
yield event, LogEvent(
|
|
role=step_type, content="", end="", color="yellow"
|
|
)
|
|
|
|
if event.payload.tool_call_delta:
|
|
if isinstance(event.payload.tool_call_delta.content, str):
|
|
yield event, LogEvent(
|
|
role=None,
|
|
content=event.payload.tool_call_delta.content,
|
|
end="",
|
|
color="cyan",
|
|
)
|
|
else:
|
|
yield event, LogEvent(
|
|
role=None,
|
|
content=event.payload.text_delta,
|
|
end="",
|
|
color="yellow",
|
|
)
|
|
else:
|
|
# step_complete
|
|
yield event, LogEvent(role=None, content="")
|
|
|
|
else:
|
|
# Not streaming
|
|
if event_type == EventType.step_complete.value:
|
|
response = event.payload.step_details.model_response
|
|
if response.tool_calls:
|
|
content = ToolUtils.encode_tool_call(
|
|
response.tool_calls[0], tool_prompt_format
|
|
)
|
|
else:
|
|
content = response.content
|
|
yield event, LogEvent(
|
|
role=step_type,
|
|
content=content,
|
|
color="yellow",
|
|
)
|
|
|
|
# handle tool_execution
|
|
if (
|
|
step_type == StepType.tool_execution
|
|
and
|
|
# Only print tool calls and responses at the step_complete event
|
|
event_type == EventType.step_complete.value
|
|
):
|
|
details = event.payload.step_details
|
|
for t in details.tool_calls:
|
|
yield event, LogEvent(
|
|
role=step_type,
|
|
content=f"Tool:{t.tool_name} Args:{t.arguments}",
|
|
color="green",
|
|
)
|
|
for r in details.tool_responses:
|
|
yield event, LogEvent(
|
|
role=step_type,
|
|
content=f"Tool:{r.tool_name} Response:{r.content}",
|
|
color="green",
|
|
)
|
|
|
|
if (
|
|
step_type == StepType.memory_retrieval
|
|
and event_type == EventType.step_complete.value
|
|
):
|
|
details = event.payload.step_details
|
|
inserted_context = interleaved_text_media_as_str(
|
|
details.inserted_context
|
|
)
|
|
content = f"fetched {len(inserted_context)} bytes from {details.memory_bank_ids}"
|
|
|
|
yield event, LogEvent(
|
|
role=step_type,
|
|
content=content,
|
|
color="cyan",
|
|
)
|
|
|
|
previous_event_type = event_type
|
|
previous_step_type = step_type
|