Merge branch 'main' into patch-1

This commit is contained in:
Yuan Tang 2025-01-17 23:49:05 -05:00 committed by GitHub
commit f5edd07b29
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
91 changed files with 995 additions and 632 deletions

View file

@ -30,6 +30,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
name = "bedrock"
@ -70,7 +71,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use AWS Bedrock for running LLM inference and safety",
docker_image=None,
container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -81,6 +81,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/bedrock}/registry.db

View file

@ -92,7 +92,7 @@ def get_distribution_template() -> DistributionTemplate:
name="cerebras",
distro_type="self_hosted",
description="Use Cerebras for running LLM inference",
docker_image=None,
container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,

View file

@ -2,7 +2,7 @@ version: '2'
name: experimental-post-training
distribution_spec:
description: Experimental template for post training
docker_image: null
container_image: null
providers:
inference:
- inline::meta-reference

View file

@ -1,6 +1,6 @@
version: '2'
image_name: experimental-post-training
docker_image: null
container_image: null
conda_env: experimental-post-training
apis:
- agents

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -39,6 +39,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
@ -98,7 +99,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use Fireworks.AI for running LLM inference",
docker_image=None,
container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,

View file

@ -92,6 +92,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db

View file

@ -86,6 +86,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/fireworks}/registry.db

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -34,6 +34,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
name = "hf-endpoint"
@ -88,7 +89,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
docker_image=None,
container_image=None,
template_path=None,
providers=providers,
default_models=[inference_model, safety_model],

View file

@ -91,6 +91,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db

View file

@ -86,6 +86,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-endpoint}/registry.db

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -34,6 +34,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
@ -89,7 +90,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) Hugging Face Inference Endpoint for running LLM inference",
docker_image=None,
container_image=None,
template_path=None,
providers=providers,
default_models=[inference_model, safety_model],

View file

@ -91,6 +91,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db

View file

@ -86,6 +86,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/hf-serverless}/registry.db

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -38,6 +38,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
name = "meta-reference-gpu"

View file

@ -93,6 +93,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db

View file

@ -87,6 +87,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-gpu}/registry.db

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -33,6 +33,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
default_tool_groups = [

View file

@ -89,6 +89,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/meta-reference-quantized-gpu}/registry.db

View file

@ -26,4 +26,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -29,6 +29,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
@ -68,7 +69,7 @@ def get_distribution_template() -> DistributionTemplate:
name="nvidia",
distro_type="remote_hosted",
description="Use NVIDIA NIM for running LLM inference",
docker_image=None,
container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,

View file

@ -83,6 +83,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/nvidia}/registry.db

View file

@ -90,7 +90,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) Ollama server for running LLM inference",
docker_image=None,
container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=[inference_model, safety_model],

View file

@ -12,6 +12,15 @@ distribution_spec:
- inline::llama-guard
agents:
- inline::meta-reference
eval:
- inline::meta-reference
datasetio:
- remote::huggingface
- inline::localfs
scoring:
- inline::basic
- inline::llm-as-judge
- inline::braintrust
telemetry:
- inline::meta-reference
tool_runtime:
@ -19,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -2,9 +2,12 @@ version: '2'
image_name: remote-vllm
apis:
- agents
- datasetio
- eval
- inference
- memory
- safety
- scoring
- telemetry
- tool_runtime
providers:
@ -44,6 +47,28 @@ providers:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db
eval:
- provider_id: meta-reference
provider_type: inline::meta-reference
config: {}
datasetio:
- provider_id: huggingface
provider_type: remote::huggingface
config: {}
- provider_id: localfs
provider_type: inline::localfs
config: {}
scoring:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: llm-as-judge
provider_type: inline::llm-as-judge
config: {}
- provider_id: braintrust
provider_type: inline::braintrust
config:
openai_api_key: ${env.OPENAI_API_KEY:}
telemetry:
- provider_id: meta-reference
provider_type: inline::meta-reference
@ -68,6 +93,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db

View file

@ -2,9 +2,12 @@ version: '2'
image_name: remote-vllm
apis:
- agents
- datasetio
- eval
- inference
- memory
- safety
- scoring
- telemetry
- tool_runtime
providers:
@ -38,6 +41,28 @@ providers:
type: sqlite
namespace: null
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/agents_store.db
eval:
- provider_id: meta-reference
provider_type: inline::meta-reference
config: {}
datasetio:
- provider_id: huggingface
provider_type: remote::huggingface
config: {}
- provider_id: localfs
provider_type: inline::localfs
config: {}
scoring:
- provider_id: basic
provider_type: inline::basic
config: {}
- provider_id: llm-as-judge
provider_type: inline::llm-as-judge
config: {}
- provider_id: braintrust
provider_type: inline::braintrust
config:
openai_api_key: ${env.OPENAI_API_KEY:}
telemetry:
- provider_id: meta-reference
provider_type: inline::meta-reference
@ -62,6 +87,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/remote-vllm}/registry.db

View file

@ -27,12 +27,16 @@ def get_distribution_template() -> DistributionTemplate:
"memory": ["inline::faiss", "remote::chromadb", "remote::pgvector"],
"safety": ["inline::llama-guard"],
"agents": ["inline::meta-reference"],
"eval": ["inline::meta-reference"],
"datasetio": ["remote::huggingface", "inline::localfs"],
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
"telemetry": ["inline::meta-reference"],
"tool_runtime": [
"remote::brave-search",
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
name = "remote-vllm"

View file

@ -37,7 +37,7 @@ class RunConfigSettings(BaseModel):
self,
name: str,
providers: Dict[str, List[str]],
docker_image: Optional[str] = None,
container_image: Optional[str] = None,
) -> StackRunConfig:
provider_registry = get_provider_registry()
@ -83,7 +83,7 @@ class RunConfigSettings(BaseModel):
return StackRunConfig(
image_name=name,
docker_image=docker_image,
container_image=container_image,
apis=apis,
providers=provider_configs,
metadata_store=SqliteKVStoreConfig.sample_run_config(
@ -112,7 +112,7 @@ class DistributionTemplate(BaseModel):
# Optional configuration
run_config_env_vars: Optional[Dict[str, Tuple[str, str]]] = None
docker_image: Optional[str] = None
container_image: Optional[str] = None
default_models: Optional[List[ModelInput]] = None
@ -121,7 +121,7 @@ class DistributionTemplate(BaseModel):
name=self.name,
distribution_spec=DistributionSpec(
description=self.description,
docker_image=self.docker_image,
container_image=self.container_image,
providers=self.providers,
),
image_type="conda", # default to conda, can be overridden
@ -169,7 +169,7 @@ class DistributionTemplate(BaseModel):
for yaml_pth, settings in self.run_configs.items():
run_config = settings.run_config(
self.name, self.providers, self.docker_image
self.name, self.providers, self.container_image
)
with open(yaml_output_dir / yaml_pth, "w") as f:
yaml.safe_dump(

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -86,6 +86,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db

View file

@ -85,6 +85,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/tgi}/registry.db

View file

@ -36,6 +36,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
name = "tgi"
@ -92,7 +93,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use (an external) TGI server for running LLM inference",
docker_image=None,
container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=[inference_model, safety_model],

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -92,6 +92,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db

View file

@ -86,6 +86,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/together}/registry.db

View file

@ -39,6 +39,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
name = "together"
@ -96,7 +97,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use Together.AI for running LLM inference",
docker_image=None,
container_image=None,
template_path=Path(__file__).parent / "doc_template.md",
providers=providers,
default_models=default_models,

View file

@ -28,4 +28,5 @@ distribution_spec:
- remote::tavily-search
- inline::code-interpreter
- inline::memory-runtime
- remote::model-context-protocol
image_type: conda

View file

@ -89,6 +89,9 @@ providers:
- provider_id: memory-runtime
provider_type: inline::memory-runtime
config: {}
- provider_id: model-context-protocol
provider_type: remote::model-context-protocol
config: {}
metadata_store:
type: sqlite
db_path: ${env.SQLITE_STORE_DIR:~/.llama/distributions/vllm-gpu}/registry.db

View file

@ -33,6 +33,7 @@ def get_distribution_template() -> DistributionTemplate:
"remote::tavily-search",
"inline::code-interpreter",
"inline::memory-runtime",
"remote::model-context-protocol",
],
}
@ -84,7 +85,7 @@ def get_distribution_template() -> DistributionTemplate:
name=name,
distro_type="self_hosted",
description="Use a built-in vLLM engine for running LLM inference",
docker_image=None,
container_image=None,
template_path=None,
providers=providers,
default_models=[inference_model],