mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-29 15:23:51 +00:00
drop classes for functions
This commit is contained in:
parent
c2b7b462e9
commit
95a5982524
1 changed files with 41 additions and 50 deletions
|
@ -21,65 +21,56 @@ from llama_toolchain.inference.api import * # noqa: F403
|
||||||
|
|
||||||
|
|
||||||
async def generate_rag_query(
|
async def generate_rag_query(
|
||||||
generator_config: MemoryQueryGeneratorConfig,
|
config: MemoryQueryGeneratorConfig,
|
||||||
messages: List[Message],
|
messages: List[Message],
|
||||||
**kwargs,
|
**kwargs,
|
||||||
):
|
):
|
||||||
if generator_config.type == MemoryQueryGenerator.default.value:
|
"""
|
||||||
generator = DefaultRAGQueryGenerator(generator_config, **kwargs)
|
Generates a query that will be used for
|
||||||
elif generator_config.type == MemoryQueryGenerator.llm.value:
|
retrieving relevant information from the memory bank.
|
||||||
generator = LLMRAGQueryGenerator(generator_config, **kwargs)
|
"""
|
||||||
|
if config.type == MemoryQueryGenerator.default.value:
|
||||||
|
query = await default_rag_query_generator(config, messages, **kwargs)
|
||||||
|
elif config.type == MemoryQueryGenerator.llm.value:
|
||||||
|
query = await llm_rag_query_generator(config, messages, **kwargs)
|
||||||
else:
|
else:
|
||||||
raise NotImplementedError(
|
raise NotImplementedError(f"Unsupported memory query generator {config.type}")
|
||||||
f"Unsupported memory query generator {generator_config.type}"
|
|
||||||
)
|
|
||||||
|
|
||||||
query = await generator.gen(messages)
|
|
||||||
# cprint(f"Generated query >>>: {query}", color="green")
|
# cprint(f"Generated query >>>: {query}", color="green")
|
||||||
return query
|
return query
|
||||||
|
|
||||||
|
|
||||||
class DefaultRAGQueryGenerator:
|
async def default_rag_query_generator(
|
||||||
def __init__(self, config: DefaultMemoryQueryGeneratorConfig, **kwargs):
|
config: DefaultMemoryQueryGeneratorConfig,
|
||||||
self.config = config
|
messages: List[Message],
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
return config.sep.join(interleaved_text_media_as_str(m.content) for m in messages)
|
||||||
|
|
||||||
async def gen(self, messages: List[Message]) -> InterleavedTextMedia:
|
|
||||||
query = self.config.sep.join(
|
async def llm_rag_query_generator(
|
||||||
interleaved_text_media_as_str(m.content) for m in messages
|
config: LLMMemoryQueryGeneratorConfig,
|
||||||
|
messages: List[Message],
|
||||||
|
**kwargs,
|
||||||
|
):
|
||||||
|
assert "inference_api" in kwargs, "LLMRAGQueryGenerator needs inference_api"
|
||||||
|
inference_api = kwargs["inference_api"]
|
||||||
|
|
||||||
|
m_dict = {"messages": [m.model_dump() for m in messages]}
|
||||||
|
|
||||||
|
template = Template(config.template)
|
||||||
|
content = template.render(m_dict)
|
||||||
|
|
||||||
|
model = config.model
|
||||||
|
message = UserMessage(content=content)
|
||||||
|
response = inference_api.chat_completion(
|
||||||
|
ChatCompletionRequest(
|
||||||
|
model=model,
|
||||||
|
messages=[message],
|
||||||
|
stream=False,
|
||||||
)
|
)
|
||||||
return query
|
)
|
||||||
|
|
||||||
|
async for chunk in response:
|
||||||
|
query = chunk.completion_message.content
|
||||||
|
|
||||||
class LLMRAGQueryGenerator:
|
return query
|
||||||
def __init__(self, config: LLMMemoryQueryGeneratorConfig, **kwargs):
|
|
||||||
self.config = config
|
|
||||||
assert "inference_api" in kwargs, "LLMRAGQueryGenerator needs inference_api"
|
|
||||||
self.inference_api = kwargs["inference_api"]
|
|
||||||
|
|
||||||
async def gen(self, messages: List[Message]) -> InterleavedTextMedia:
|
|
||||||
"""
|
|
||||||
Generates a query that will be used for
|
|
||||||
retrieving relevant information from the memory bank.
|
|
||||||
"""
|
|
||||||
# get template from user
|
|
||||||
# user template will assume data has the format of
|
|
||||||
# pydantic object representing List[Message]
|
|
||||||
m_dict = {"messages": [m.model_dump() for m in messages]}
|
|
||||||
|
|
||||||
template = Template(self.config.template)
|
|
||||||
content = template.render(m_dict)
|
|
||||||
|
|
||||||
model = self.config.model
|
|
||||||
message = UserMessage(content=content)
|
|
||||||
response = self.inference_api.chat_completion(
|
|
||||||
ChatCompletionRequest(
|
|
||||||
model=model,
|
|
||||||
messages=[message],
|
|
||||||
stream=False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
async for chunk in response:
|
|
||||||
query = chunk.completion_message.content
|
|
||||||
|
|
||||||
return query
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue