mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
* add tools to chat completion request * use templates for generating system prompts * Moved ToolPromptFormat and jinja templates to llama_models.llama3.api * <WIP> memory changes - inlined AgenticSystemInstanceConfig so API feels more ergonomic - renamed it to AgentConfig, AgentInstance -> Agent - added a MemoryConfig and `memory` parameter - added `attachments` to input and `output_attachments` to the response - some naming changes * InterleavedTextAttachment -> InterleavedTextMedia, introduce memory tool * flesh out memory banks API * agentic loop has a RAG implementation * faiss provider implementation * memory client works * re-work tool definitions, fix FastAPI issues, fix tool regressions * fix agentic_system utils * basic RAG seems to work * small bug fixes for inline attachments * Refactor custom tool execution utilities * Bug fix, show memory retrieval steps in EventLogger * No need for api_key for Remote providers * add special unicode character ↵ to showcase newlines in model prompt templates * remove api.endpoints imports * combine datatypes.py and endpoints.py into api.py * Attachment / add TTL api * split batch_inference from inference * minor import fixes * use a single impl for ChatFormat.decode_assistant_mesage * use interleaved_text_media_as_str() utilityt * Fix api.datatypes imports * Add blobfile for tiktoken * Add ToolPromptFormat to ChatFormat.encode_message so that tools are encoded properly * templates take optional --format={json,function_tag} * Rag Updates * Add `api build` subcommand -- WIP * fix * build + run image seems to work * <WIP> adapters * bunch more work to make adapters work * api build works for conda now * ollama remote adapter works * Several smaller fixes to make adapters work Also, reorganized the pattern of __init__ inside providers so configuration can stay lightweight * llama distribution -> llama stack + containers (WIP) * All the new CLI for api + stack work * Make Fireworks and Together into the Adapter format * Some quick fixes to the CLI behavior to make it consistent * Updated README phew * Update cli_reference.md * llama_toolchain/distribution -> llama_toolchain/core * Add termcolor * update paths * Add a log just for consistency * chmod +x scripts * Fix api dependencies not getting added to configuration * missing import lol * Delete utils.py; move to agentic system * Support downloading of URLs for attachments for code interpreter * Simplify and generalize `llama api build` yay * Update `llama stack configure` to be very simple also * Fix stack start * Allow building an "adhoc" distribution * Remote `llama api []` subcommands * Fixes to llama stack commands and update docs * Update documentation again and add error messages to llama stack start * llama stack start -> llama stack run * Change name of build for less confusion * Add pyopenapi fork to the repository, update RFC assets * Remove conflicting annotation * Added a "--raw" option for model template printing --------- Co-authored-by: Hardik Shah <hjshah@fb.com> Co-authored-by: Ashwin Bharambe <ashwin@meta.com> Co-authored-by: Dalton Flanagan <6599399+dltn@users.noreply.github.com>
65 lines
2.1 KiB
Python
65 lines
2.1 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import List
|
|
|
|
from llama_models.llama3.api.datatypes import Message, Role, UserMessage
|
|
from termcolor import cprint
|
|
|
|
from llama_toolchain.safety.api import (
|
|
OnViolationAction,
|
|
RunShieldRequest,
|
|
Safety,
|
|
ShieldDefinition,
|
|
ShieldResponse,
|
|
)
|
|
|
|
|
|
class SafetyException(Exception): # noqa: N818
|
|
def __init__(self, response: ShieldResponse):
|
|
self.response = response
|
|
super().__init__(response.violation_return_message)
|
|
|
|
|
|
class ShieldRunnerMixin:
|
|
def __init__(
|
|
self,
|
|
safety_api: Safety,
|
|
input_shields: List[ShieldDefinition] = None,
|
|
output_shields: List[ShieldDefinition] = None,
|
|
):
|
|
self.safety_api = safety_api
|
|
self.input_shields = input_shields
|
|
self.output_shields = output_shields
|
|
|
|
async def run_shields(
|
|
self, messages: List[Message], shields: List[ShieldDefinition]
|
|
) -> List[ShieldResponse]:
|
|
messages = messages.copy()
|
|
# some shields like llama-guard require the first message to be a user message
|
|
# since this might be a tool call, first role might not be user
|
|
if len(messages) > 0 and messages[0].role != Role.user.value:
|
|
messages[0] = UserMessage(content=messages[0].content)
|
|
|
|
res = await self.safety_api.run_shields(
|
|
RunShieldRequest(
|
|
messages=messages,
|
|
shields=shields,
|
|
)
|
|
)
|
|
|
|
results = res.responses
|
|
for shield, r in zip(shields, results):
|
|
if r.is_violation:
|
|
if shield.on_violation_action == OnViolationAction.RAISE:
|
|
raise SafetyException(r)
|
|
elif shield.on_violation_action == OnViolationAction.WARN:
|
|
cprint(
|
|
f"[Warn]{shield.__class__.__name__} raised a warning",
|
|
color="red",
|
|
)
|
|
|
|
return results
|