mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
use agent.inference_api instead of passing host/port again
This commit is contained in:
parent
4a70f3d2ba
commit
c2b7b462e9
3 changed files with 15 additions and 21 deletions
|
@ -133,8 +133,6 @@ class LLMMemoryQueryGeneratorConfig(BaseModel):
|
||||||
type: Literal[MemoryQueryGenerator.llm.value] = MemoryQueryGenerator.llm.value
|
type: Literal[MemoryQueryGenerator.llm.value] = MemoryQueryGenerator.llm.value
|
||||||
model: str
|
model: str
|
||||||
template: str
|
template: str
|
||||||
host: str = "localhost"
|
|
||||||
port: int = 5000
|
|
||||||
|
|
||||||
|
|
||||||
class CustomMemoryQueryGeneratorConfig(BaseModel):
|
class CustomMemoryQueryGeneratorConfig(BaseModel):
|
||||||
|
@ -157,7 +155,7 @@ class MemoryToolDefinition(ToolDefinitionCommon):
|
||||||
# This config defines how a query is generated using the messages
|
# This config defines how a query is generated using the messages
|
||||||
# for memory bank retrieval.
|
# for memory bank retrieval.
|
||||||
query_generator_config: MemoryQueryGeneratorConfig = Field(
|
query_generator_config: MemoryQueryGeneratorConfig = Field(
|
||||||
default=DefaultMemoryQueryGeneratorConfig
|
default=DefaultMemoryQueryGeneratorConfig()
|
||||||
)
|
)
|
||||||
max_tokens_in_context: int = 4096
|
max_tokens_in_context: int = 4096
|
||||||
max_chunks: int = 10
|
max_chunks: int = 10
|
||||||
|
|
|
@ -31,7 +31,7 @@ from llama_toolchain.tools.builtin import (
|
||||||
SingleMessageBuiltinTool,
|
SingleMessageBuiltinTool,
|
||||||
)
|
)
|
||||||
|
|
||||||
from .context_retriever import generate_rag_query
|
from .rag.context_retriever import generate_rag_query
|
||||||
from .safety import SafetyException, ShieldRunnerMixin
|
from .safety import SafetyException, ShieldRunnerMixin
|
||||||
|
|
||||||
|
|
||||||
|
@ -665,7 +665,9 @@ class ChatAgent(ShieldRunnerMixin):
|
||||||
# (i.e., no prior turns uploaded an Attachment)
|
# (i.e., no prior turns uploaded an Attachment)
|
||||||
return None, []
|
return None, []
|
||||||
|
|
||||||
query = await generate_rag_query(memory.query_generator_config, messages)
|
query = await generate_rag_query(
|
||||||
|
memory.query_generator_config, messages, inference_api=self.inference_api
|
||||||
|
)
|
||||||
tasks = [
|
tasks = [
|
||||||
self.memory_api.query_documents(
|
self.memory_api.query_documents(
|
||||||
bank_id=bank_id,
|
bank_id=bank_id,
|
||||||
|
|
|
@ -10,38 +10,37 @@ from jinja2 import Template
|
||||||
from llama_models.llama3.api import * # noqa: F403
|
from llama_models.llama3.api import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
from termcolor import cprint
|
|
||||||
|
|
||||||
from llama_toolchain.agentic_system.api import (
|
from llama_toolchain.agentic_system.api import (
|
||||||
DefaultMemoryQueryGeneratorConfig,
|
DefaultMemoryQueryGeneratorConfig,
|
||||||
LLMMemoryQueryGeneratorConfig,
|
LLMMemoryQueryGeneratorConfig,
|
||||||
MemoryQueryGenerator,
|
MemoryQueryGenerator,
|
||||||
MemoryQueryGeneratorConfig,
|
MemoryQueryGeneratorConfig,
|
||||||
)
|
)
|
||||||
|
from termcolor import cprint # noqa: F401
|
||||||
from llama_toolchain.inference.api import * # noqa: F403
|
from llama_toolchain.inference.api import * # noqa: F403
|
||||||
from llama_toolchain.inference.client import InferenceClient
|
|
||||||
|
|
||||||
|
|
||||||
async def generate_rag_query(
|
async def generate_rag_query(
|
||||||
generator_config: MemoryQueryGeneratorConfig,
|
generator_config: MemoryQueryGeneratorConfig,
|
||||||
messages: List[Message],
|
messages: List[Message],
|
||||||
|
**kwargs,
|
||||||
):
|
):
|
||||||
if generator_config.type == MemoryQueryGenerator.default.value:
|
if generator_config.type == MemoryQueryGenerator.default.value:
|
||||||
generator = DefaultRAGQueryGenerator(generator_config)
|
generator = DefaultRAGQueryGenerator(generator_config, **kwargs)
|
||||||
elif generator_config.type == MemoryQueryGenerator.llm.value:
|
elif generator_config.type == MemoryQueryGenerator.llm.value:
|
||||||
generator = LLMRAGQueryGenerator(generator_config)
|
generator = LLMRAGQueryGenerator(generator_config, **kwargs)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(
|
||||||
f"Unsupported memory query generator {generator_config.type}"
|
f"Unsupported memory query generator {generator_config.type}"
|
||||||
)
|
)
|
||||||
|
|
||||||
query = await generator.gen(messages)
|
query = await generator.gen(messages)
|
||||||
cprint(f"Generated query >>>: {query}", color="green")
|
# cprint(f"Generated query >>>: {query}", color="green")
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
class DefaultRAGQueryGenerator:
|
class DefaultRAGQueryGenerator:
|
||||||
def __init__(self, config: DefaultMemoryQueryGeneratorConfig):
|
def __init__(self, config: DefaultMemoryQueryGeneratorConfig, **kwargs):
|
||||||
self.config = config
|
self.config = config
|
||||||
|
|
||||||
async def gen(self, messages: List[Message]) -> InterleavedTextMedia:
|
async def gen(self, messages: List[Message]) -> InterleavedTextMedia:
|
||||||
|
@ -52,11 +51,12 @@ class DefaultRAGQueryGenerator:
|
||||||
|
|
||||||
|
|
||||||
class LLMRAGQueryGenerator:
|
class LLMRAGQueryGenerator:
|
||||||
def __init__(self, config: LLMMemoryQueryGeneratorConfig):
|
def __init__(self, config: LLMMemoryQueryGeneratorConfig, **kwargs):
|
||||||
self.config = config
|
self.config = config
|
||||||
|
assert "inference_api" in kwargs, "LLMRAGQueryGenerator needs inference_api"
|
||||||
|
self.inference_api = kwargs["inference_api"]
|
||||||
|
|
||||||
async def gen(self, messages: List[Message]) -> InterleavedTextMedia:
|
async def gen(self, messages: List[Message]) -> InterleavedTextMedia:
|
||||||
# params will have
|
|
||||||
"""
|
"""
|
||||||
Generates a query that will be used for
|
Generates a query that will be used for
|
||||||
retrieving relevant information from the memory bank.
|
retrieving relevant information from the memory bank.
|
||||||
|
@ -69,15 +69,9 @@ class LLMRAGQueryGenerator:
|
||||||
template = Template(self.config.template)
|
template = Template(self.config.template)
|
||||||
content = template.render(m_dict)
|
content = template.render(m_dict)
|
||||||
|
|
||||||
cprint(f"Rendered Template >>>: {content}", color="yellow")
|
|
||||||
# TODO: How to manage these config params better ?
|
|
||||||
host = self.config.host
|
|
||||||
port = self.config.port
|
|
||||||
client = InferenceClient(f"http://{host}:{port}")
|
|
||||||
|
|
||||||
model = self.config.model
|
model = self.config.model
|
||||||
message = UserMessage(content=content)
|
message = UserMessage(content=content)
|
||||||
response = client.chat_completion(
|
response = self.inference_api.chat_completion(
|
||||||
ChatCompletionRequest(
|
ChatCompletionRequest(
|
||||||
model=model,
|
model=model,
|
||||||
messages=[message],
|
messages=[message],
|
Loading…
Add table
Add a link
Reference in a new issue