mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 10:54:19 +00:00
This is yet another of those large PRs (hopefully we will have less and less of them as things mature fast). This one introduces substantial improvements and some simplifications to the stack. Most important bits: * Agents reference implementation now has support for session / turn persistence. The default implementation uses sqlite but there's also support for using Redis. * We have re-architected the structure of the Stack APIs to allow for more flexible routing. The motivating use cases are: - routing model A to ollama and model B to a remote provider like Together - routing shield A to local impl while shield B to a remote provider like Bedrock - routing a vector memory bank to Weaviate while routing a keyvalue memory bank to Redis * Support for provider specific parameters to be passed from the clients. A client can pass data using `x_llamastack_provider_data` parameter which can be type-checked and provided to the Adapter implementations.
55 lines
1.8 KiB
Python
55 lines
1.8 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from typing import Optional
|
|
|
|
from llama_models.datatypes import * # noqa: F403
|
|
from llama_models.sku_list import all_registered_models, resolve_model
|
|
|
|
from llama_stack.apis.inference import * # noqa: F401, F403
|
|
|
|
from pydantic import BaseModel, Field, field_validator
|
|
|
|
|
|
class MetaReferenceImplConfig(BaseModel):
|
|
model: str = Field(
|
|
default="Meta-Llama3.1-8B-Instruct",
|
|
description="Model descriptor from `llama model list`",
|
|
)
|
|
quantization: Optional[QuantizationConfig] = None
|
|
torch_seed: Optional[int] = None
|
|
max_seq_len: int = 4096
|
|
max_batch_size: int = 1
|
|
|
|
@field_validator("model")
|
|
@classmethod
|
|
def validate_model(cls, model: str) -> str:
|
|
permitted_models = [
|
|
m.descriptor()
|
|
for m in all_registered_models()
|
|
if m.model_family == ModelFamily.llama3_1
|
|
or m.core_model_id == CoreModelId.llama_guard_3_8b
|
|
]
|
|
if model not in permitted_models:
|
|
model_list = "\n\t".join(permitted_models)
|
|
raise ValueError(
|
|
f"Unknown model: `{model}`. Choose from [\n\t{model_list}\n]"
|
|
)
|
|
return model
|
|
|
|
@property
|
|
def model_parallel_size(self) -> int:
|
|
# HUGE HACK ALERT: this will be fixed when we move inference configuration
|
|
# to ModelsRegistry and we can explicitly ask for `model_parallel_size`
|
|
# as configuration there
|
|
gpu_count = 1
|
|
resolved = resolve_model(self.model)
|
|
assert resolved is not None
|
|
descriptor = resolved.descriptor().lower()
|
|
if "-70b" in descriptor or "-405b" in descriptor:
|
|
gpu_count = 8
|
|
|
|
return gpu_count
|