mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-05 13:40:30 +00:00
* add tools to chat completion request * use templates for generating system prompts * Moved ToolPromptFormat and jinja templates to llama_models.llama3.api * <WIP> memory changes - inlined AgenticSystemInstanceConfig so API feels more ergonomic - renamed it to AgentConfig, AgentInstance -> Agent - added a MemoryConfig and `memory` parameter - added `attachments` to input and `output_attachments` to the response - some naming changes * InterleavedTextAttachment -> InterleavedTextMedia, introduce memory tool * flesh out memory banks API * agentic loop has a RAG implementation * faiss provider implementation * memory client works * re-work tool definitions, fix FastAPI issues, fix tool regressions * fix agentic_system utils * basic RAG seems to work * small bug fixes for inline attachments * Refactor custom tool execution utilities * Bug fix, show memory retrieval steps in EventLogger * No need for api_key for Remote providers * add special unicode character ↵ to showcase newlines in model prompt templates * remove api.endpoints imports * combine datatypes.py and endpoints.py into api.py * Attachment / add TTL api * split batch_inference from inference * minor import fixes * use a single impl for ChatFormat.decode_assistant_mesage * use interleaved_text_media_as_str() utilityt * Fix api.datatypes imports * Add blobfile for tiktoken * Add ToolPromptFormat to ChatFormat.encode_message so that tools are encoded properly * templates take optional --format={json,function_tag} * Rag Updates * Add `api build` subcommand -- WIP * fix * build + run image seems to work * <WIP> adapters * bunch more work to make adapters work * api build works for conda now * ollama remote adapter works * Several smaller fixes to make adapters work Also, reorganized the pattern of __init__ inside providers so configuration can stay lightweight * llama distribution -> llama stack + containers (WIP) * All the new CLI for api + stack work * Make Fireworks and Together into the Adapter format * Some quick fixes to the CLI behavior to make it consistent * Updated README phew * Update cli_reference.md * llama_toolchain/distribution -> llama_toolchain/core * Add termcolor * update paths * Add a log just for consistency * chmod +x scripts * Fix api dependencies not getting added to configuration * missing import lol * Delete utils.py; move to agentic system * Support downloading of URLs for attachments for code interpreter * Simplify and generalize `llama api build` yay * Update `llama stack configure` to be very simple also * Fix stack start * Allow building an "adhoc" distribution * Remote `llama api []` subcommands * Fixes to llama stack commands and update docs * Update documentation again and add error messages to llama stack start * llama stack start -> llama stack run * Change name of build for less confusion * Add pyopenapi fork to the repository, update RFC assets * Remove conflicting annotation * Added a "--raw" option for model template printing --------- Co-authored-by: Hardik Shah <hjshah@fb.com> Co-authored-by: Ashwin Bharambe <ashwin@meta.com> Co-authored-by: Dalton Flanagan <6599399+dltn@users.noreply.github.com>
96 lines
3.5 KiB
Python
96 lines
3.5 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
import asyncio
|
|
|
|
from llama_models.sku_list import resolve_model
|
|
|
|
from llama_toolchain.common.model_utils import model_local_dir
|
|
from llama_toolchain.safety.api import * # noqa
|
|
|
|
from .config import SafetyConfig
|
|
from .shields import (
|
|
CodeScannerShield,
|
|
InjectionShield,
|
|
JailbreakShield,
|
|
LlamaGuardShield,
|
|
PromptGuardShield,
|
|
ShieldBase,
|
|
ThirdPartyShield,
|
|
)
|
|
|
|
|
|
def resolve_and_get_path(model_name: str) -> str:
|
|
model = resolve_model(model_name)
|
|
assert model is not None, f"Could not resolve model {model_name}"
|
|
model_dir = model_local_dir(model.descriptor())
|
|
return model_dir
|
|
|
|
|
|
class MetaReferenceSafetyImpl(Safety):
|
|
def __init__(self, config: SafetyConfig) -> None:
|
|
self.config = config
|
|
|
|
async def initialize(self) -> None:
|
|
shield_cfg = self.config.llama_guard_shield
|
|
if shield_cfg is not None:
|
|
model_dir = resolve_and_get_path(shield_cfg.model)
|
|
_ = LlamaGuardShield.instance(
|
|
model_dir=model_dir,
|
|
excluded_categories=shield_cfg.excluded_categories,
|
|
disable_input_check=shield_cfg.disable_input_check,
|
|
disable_output_check=shield_cfg.disable_output_check,
|
|
)
|
|
|
|
shield_cfg = self.config.prompt_guard_shield
|
|
if shield_cfg is not None:
|
|
model_dir = resolve_and_get_path(shield_cfg.model)
|
|
_ = PromptGuardShield.instance(model_dir)
|
|
|
|
async def run_shields(
|
|
self,
|
|
request: RunShieldRequest,
|
|
) -> RunShieldResponse:
|
|
shields = [shield_config_to_shield(c, self.config) for c in request.shields]
|
|
|
|
responses = await asyncio.gather(
|
|
*[shield.run(request.messages) for shield in shields]
|
|
)
|
|
|
|
return RunShieldResponse(responses=responses)
|
|
|
|
|
|
def shield_type_equals(a: ShieldType, b: ShieldType):
|
|
return a == b or a == b.value
|
|
|
|
|
|
def shield_config_to_shield(
|
|
sc: ShieldDefinition, safety_config: SafetyConfig
|
|
) -> ShieldBase:
|
|
if shield_type_equals(sc.shield_type, BuiltinShield.llama_guard):
|
|
assert (
|
|
safety_config.llama_guard_shield is not None
|
|
), "Cannot use LlamaGuardShield since not present in config"
|
|
model_dir = resolve_and_get_path(safety_config.llama_guard_shield.model)
|
|
return LlamaGuardShield.instance(model_dir=model_dir)
|
|
elif shield_type_equals(sc.shield_type, BuiltinShield.jailbreak_shield):
|
|
assert (
|
|
safety_config.prompt_guard_shield is not None
|
|
), "Cannot use Jailbreak Shield since Prompt Guard not present in config"
|
|
model_dir = resolve_and_get_path(safety_config.prompt_guard_shield.model)
|
|
return JailbreakShield.instance(model_dir)
|
|
elif shield_type_equals(sc.shield_type, BuiltinShield.injection_shield):
|
|
assert (
|
|
safety_config.prompt_guard_shield is not None
|
|
), "Cannot use PromptGuardShield since not present in config"
|
|
model_dir = resolve_and_get_path(safety_config.prompt_guard_shield.model)
|
|
return InjectionShield.instance(model_dir)
|
|
elif shield_type_equals(sc.shield_type, BuiltinShield.code_scanner_guard):
|
|
return CodeScannerShield.instance()
|
|
elif shield_type_equals(sc.shield_type, BuiltinShield.third_party_shield):
|
|
return ThirdPartyShield.instance()
|
|
else:
|
|
raise ValueError(f"Unknown shield type: {sc.shield_type}")
|