mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-23 04:53:14 +00:00
API Updates: fleshing out RAG APIs, introduce "llama stack" CLI command (#51)
* add tools to chat completion request * use templates for generating system prompts * Moved ToolPromptFormat and jinja templates to llama_models.llama3.api * <WIP> memory changes - inlined AgenticSystemInstanceConfig so API feels more ergonomic - renamed it to AgentConfig, AgentInstance -> Agent - added a MemoryConfig and `memory` parameter - added `attachments` to input and `output_attachments` to the response - some naming changes * InterleavedTextAttachment -> InterleavedTextMedia, introduce memory tool * flesh out memory banks API * agentic loop has a RAG implementation * faiss provider implementation * memory client works * re-work tool definitions, fix FastAPI issues, fix tool regressions * fix agentic_system utils * basic RAG seems to work * small bug fixes for inline attachments * Refactor custom tool execution utilities * Bug fix, show memory retrieval steps in EventLogger * No need for api_key for Remote providers * add special unicode character ↵ to showcase newlines in model prompt templates * remove api.endpoints imports * combine datatypes.py and endpoints.py into api.py * Attachment / add TTL api * split batch_inference from inference * minor import fixes * use a single impl for ChatFormat.decode_assistant_mesage * use interleaved_text_media_as_str() utilityt * Fix api.datatypes imports * Add blobfile for tiktoken * Add ToolPromptFormat to ChatFormat.encode_message so that tools are encoded properly * templates take optional --format={json,function_tag} * Rag Updates * Add `api build` subcommand -- WIP * fix * build + run image seems to work * <WIP> adapters * bunch more work to make adapters work * api build works for conda now * ollama remote adapter works * Several smaller fixes to make adapters work Also, reorganized the pattern of __init__ inside providers so configuration can stay lightweight * llama distribution -> llama stack + containers (WIP) * All the new CLI for api + stack work * Make Fireworks and Together into the Adapter format * Some quick fixes to the CLI behavior to make it consistent * Updated README phew * Update cli_reference.md * llama_toolchain/distribution -> llama_toolchain/core * Add termcolor * update paths * Add a log just for consistency * chmod +x scripts * Fix api dependencies not getting added to configuration * missing import lol * Delete utils.py; move to agentic system * Support downloading of URLs for attachments for code interpreter * Simplify and generalize `llama api build` yay * Update `llama stack configure` to be very simple also * Fix stack start * Allow building an "adhoc" distribution * Remote `llama api []` subcommands * Fixes to llama stack commands and update docs * Update documentation again and add error messages to llama stack start * llama stack start -> llama stack run * Change name of build for less confusion * Add pyopenapi fork to the repository, update RFC assets * Remove conflicting annotation * Added a "--raw" option for model template printing --------- Co-authored-by: Hardik Shah <hjshah@fb.com> Co-authored-by: Ashwin Bharambe <ashwin@meta.com> Co-authored-by: Dalton Flanagan <6599399+dltn@users.noreply.github.com>
This commit is contained in:
parent
35093c0b6f
commit
7bc7785b0d
141 changed files with 8252 additions and 4032 deletions
|
@ -4,5 +4,4 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .datatypes import * # noqa
|
||||
from .endpoints import * # noqa
|
||||
from .api import * # noqa: F401 F403
|
||||
|
|
|
@ -5,13 +5,12 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
from enum import Enum
|
||||
from typing import Dict, Optional, Union
|
||||
|
||||
from llama_models.llama3.api.datatypes import ToolParamDefinition
|
||||
from llama_models.schema_utils import json_schema_type
|
||||
from typing import Dict, List, Optional, Protocol, Union
|
||||
|
||||
from llama_models.schema_utils import json_schema_type, webmethod
|
||||
from pydantic import BaseModel, validator
|
||||
|
||||
from llama_models.llama3.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.common.deployment_types import RestAPIExecutionConfig
|
||||
|
||||
|
||||
|
@ -70,3 +69,22 @@ class ShieldResponse(BaseModel):
|
|||
except ValueError:
|
||||
return v
|
||||
return v
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RunShieldRequest(BaseModel):
|
||||
messages: List[Message]
|
||||
shields: List[ShieldDefinition]
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RunShieldResponse(BaseModel):
|
||||
responses: List[ShieldResponse]
|
||||
|
||||
|
||||
class Safety(Protocol):
|
||||
@webmethod(route="/safety/run_shields")
|
||||
async def run_shields(
|
||||
self,
|
||||
request: RunShieldRequest,
|
||||
) -> RunShieldResponse: ...
|
|
@ -1,32 +0,0 @@
|
|||
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
||||
# All rights reserved.
|
||||
#
|
||||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .datatypes import * # noqa: F403
|
||||
from typing import List, Protocol
|
||||
|
||||
from llama_models.llama3.api.datatypes import Message
|
||||
|
||||
# this dependency is annoying and we need a forked up version anyway
|
||||
from llama_models.schema_utils import webmethod
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RunShieldRequest(BaseModel):
|
||||
messages: List[Message]
|
||||
shields: List[ShieldDefinition]
|
||||
|
||||
|
||||
@json_schema_type
|
||||
class RunShieldResponse(BaseModel):
|
||||
responses: List[ShieldResponse]
|
||||
|
||||
|
||||
class Safety(Protocol):
|
||||
@webmethod(route="/safety/run_shields")
|
||||
async def run_shields(
|
||||
self,
|
||||
request: RunShieldRequest,
|
||||
) -> RunShieldResponse: ...
|
|
@ -5,29 +5,32 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
|
||||
from typing import Any
|
||||
|
||||
import fire
|
||||
import httpx
|
||||
|
||||
from llama_models.llama3.api.datatypes import UserMessage
|
||||
from pydantic import BaseModel
|
||||
from termcolor import cprint
|
||||
|
||||
from .api import (
|
||||
BuiltinShield,
|
||||
RunShieldRequest,
|
||||
RunShieldResponse,
|
||||
Safety,
|
||||
ShieldDefinition,
|
||||
)
|
||||
from llama_toolchain.core.datatypes import RemoteProviderConfig
|
||||
|
||||
from .api import * # noqa: F403
|
||||
|
||||
|
||||
async def get_client_impl(base_url: str):
|
||||
return SafetyClient(base_url)
|
||||
async def get_client_impl(config: RemoteProviderConfig, _deps: Any) -> Safety:
|
||||
return SafetyClient(config.url)
|
||||
|
||||
|
||||
def encodable_dict(d: BaseModel):
|
||||
return json.loads(d.json())
|
||||
|
||||
|
||||
class SafetyClient(Safety):
|
||||
def __init__(self, base_url: str):
|
||||
print(f"Initializing client for {base_url}")
|
||||
self.base_url = base_url
|
||||
|
||||
async def initialize(self) -> None:
|
||||
|
@ -40,7 +43,9 @@ class SafetyClient(Safety):
|
|||
async with httpx.AsyncClient() as client:
|
||||
response = await client.post(
|
||||
f"{self.base_url}/safety/run_shields",
|
||||
data=request.json(),
|
||||
json={
|
||||
"request": encodable_dict(request),
|
||||
},
|
||||
headers={"Content-Type": "application/json"},
|
||||
timeout=20,
|
||||
)
|
||||
|
|
|
@ -4,5 +4,14 @@
|
|||
# This source code is licensed under the terms described in the LICENSE file in
|
||||
# the root directory of this source tree.
|
||||
|
||||
from .config import SafetyConfig # noqa
|
||||
from .safety import get_provider_impl # noqa
|
||||
from .config import SafetyConfig
|
||||
|
||||
|
||||
async def get_provider_impl(config: SafetyConfig, _deps):
|
||||
from .safety import MetaReferenceSafetyImpl
|
||||
|
||||
assert isinstance(config, SafetyConfig), f"Unexpected config type: {type(config)}"
|
||||
|
||||
impl = MetaReferenceSafetyImpl(config)
|
||||
await impl.initialize()
|
||||
return impl
|
||||
|
|
|
@ -5,12 +5,10 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
import asyncio
|
||||
from typing import Dict
|
||||
|
||||
from llama_models.sku_list import resolve_model
|
||||
|
||||
from llama_toolchain.common.model_utils import model_local_dir
|
||||
from llama_toolchain.distribution.datatypes import Api, ProviderSpec
|
||||
from llama_toolchain.safety.api import * # noqa
|
||||
|
||||
from .config import SafetyConfig
|
||||
|
@ -25,14 +23,6 @@ from .shields import (
|
|||
)
|
||||
|
||||
|
||||
async def get_provider_impl(config: SafetyConfig, _deps: Dict[Api, ProviderSpec]):
|
||||
assert isinstance(config, SafetyConfig), f"Unexpected config type: {type(config)}"
|
||||
|
||||
impl = MetaReferenceSafetyImpl(config)
|
||||
await impl.initialize()
|
||||
return impl
|
||||
|
||||
|
||||
def resolve_and_get_path(model_name: str) -> str:
|
||||
model = resolve_model(model_name)
|
||||
assert model is not None, f"Could not resolve model {model_name}"
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
# the root directory of this source tree.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import List, Union
|
||||
from typing import List
|
||||
|
||||
from llama_models.llama3.api.datatypes import Attachment, Message
|
||||
from llama_toolchain.safety.api.datatypes import * # noqa: F403
|
||||
from llama_models.llama3.api.datatypes import interleaved_text_media_as_str, Message
|
||||
from llama_toolchain.safety.api import * # noqa: F403
|
||||
|
||||
CANNED_RESPONSE_TEXT = "I can't answer that. Can I help with something else?"
|
||||
|
||||
|
@ -30,18 +30,7 @@ class ShieldBase(ABC):
|
|||
|
||||
|
||||
def message_content_as_str(message: Message) -> str:
|
||||
def _to_str(content: Union[str, Attachment]) -> str:
|
||||
if isinstance(content, str):
|
||||
return content
|
||||
elif isinstance(content, Attachment):
|
||||
return f"File: {str(content.url)}"
|
||||
else:
|
||||
raise
|
||||
|
||||
if isinstance(message.content, list) or isinstance(message.content, tuple):
|
||||
return "\n".join([_to_str(c) for c in message.content])
|
||||
else:
|
||||
return _to_str(message.content)
|
||||
return interleaved_text_media_as_str(message.content)
|
||||
|
||||
|
||||
# For shields that operate on simple strings
|
||||
|
|
|
@ -8,7 +8,7 @@ from codeshield.cs import CodeShield
|
|||
from termcolor import cprint
|
||||
|
||||
from .base import ShieldResponse, TextShield
|
||||
from llama_toolchain.safety.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.safety.api import * # noqa: F403
|
||||
|
||||
|
||||
class CodeScannerShield(TextShield):
|
||||
|
|
|
@ -14,7 +14,7 @@ from llama_models.llama3.api.datatypes import Message, Role
|
|||
from transformers import AutoModelForCausalLM, AutoTokenizer
|
||||
|
||||
from .base import CANNED_RESPONSE_TEXT, OnViolationAction, ShieldBase, ShieldResponse
|
||||
from llama_toolchain.safety.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.safety.api import * # noqa: F403
|
||||
|
||||
SAFE_RESPONSE = "safe"
|
||||
_INSTANCE = None
|
||||
|
|
|
@ -14,7 +14,7 @@ from termcolor import cprint
|
|||
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
||||
|
||||
from .base import message_content_as_str, OnViolationAction, ShieldResponse, TextShield
|
||||
from llama_toolchain.safety.api.datatypes import * # noqa: F403
|
||||
from llama_toolchain.safety.api import * # noqa: F403
|
||||
|
||||
|
||||
class PromptGuardShield(TextShield):
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
from typing import List
|
||||
|
||||
from llama_toolchain.distribution.datatypes import Api, InlineProviderSpec, ProviderSpec
|
||||
from llama_toolchain.core.datatypes import Api, InlineProviderSpec, ProviderSpec
|
||||
|
||||
|
||||
def available_safety_providers() -> List[ProviderSpec]:
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue