This commit is contained in:
Zooey Nguyen 2025-11-18 23:22:37 +00:00 committed by GitHub
commit 62839a91d8
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
9 changed files with 510 additions and 229 deletions

View file

@ -35,6 +35,7 @@ distribution_spec:
- provider_type: inline::code-scanner - provider_type: inline::code-scanner
agents: agents:
- provider_type: inline::meta-reference - provider_type: inline::meta-reference
- provider_type: inline::dana
post_training: post_training:
- provider_type: inline::torchtune-cpu - provider_type: inline::torchtune-cpu
eval: eval:

View file

@ -1,235 +1,247 @@
version: 2 version: 2
image_name: starter image_name: starter
apis: apis:
- agents - agents
- batches - batches
- datasetio - datasetio
- eval - eval
- files - files
- inference - inference
- post_training - post_training
- safety - safety
- scoring - scoring
- tool_runtime - tool_runtime
- vector_io - vector_io
providers: providers:
inference: inference:
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras} - provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
provider_type: remote::cerebras provider_type: remote::cerebras
config: config:
base_url: https://api.cerebras.ai base_url: https://api.cerebras.ai
api_key: ${env.CEREBRAS_API_KEY:=} api_key: ${env.CEREBRAS_API_KEY:=}
- provider_id: ${env.OLLAMA_URL:+ollama} - provider_id: ${env.OLLAMA_URL:+ollama}
provider_type: remote::ollama provider_type: remote::ollama
config: config:
url: ${env.OLLAMA_URL:=http://localhost:11434} url: ${env.OLLAMA_URL:=http://localhost:11434}
- provider_id: ${env.VLLM_URL:+vllm} - provider_id: ${env.VLLM_URL:+vllm}
provider_type: remote::vllm provider_type: remote::vllm
config: config:
url: ${env.VLLM_URL:=} url: ${env.VLLM_URL:=}
max_tokens: ${env.VLLM_MAX_TOKENS:=4096} max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
api_token: ${env.VLLM_API_TOKEN:=fake} api_token: ${env.VLLM_API_TOKEN:=fake}
tls_verify: ${env.VLLM_TLS_VERIFY:=true} tls_verify: ${env.VLLM_TLS_VERIFY:=true}
- provider_id: ${env.TGI_URL:+tgi} - provider_id: ${env.TGI_URL:+tgi}
provider_type: remote::tgi provider_type: remote::tgi
config: config:
url: ${env.TGI_URL:=} url: ${env.TGI_URL:=}
- provider_id: fireworks - provider_id: fireworks
provider_type: remote::fireworks provider_type: remote::fireworks
config: config:
url: https://api.fireworks.ai/inference/v1 url: https://api.fireworks.ai/inference/v1
api_key: ${env.FIREWORKS_API_KEY:=} api_key: ${env.FIREWORKS_API_KEY:=}
- provider_id: together - provider_id: together
provider_type: remote::together provider_type: remote::together
config: config:
url: https://api.together.xyz/v1 url: https://api.together.xyz/v1
api_key: ${env.TOGETHER_API_KEY:=} api_key: ${env.TOGETHER_API_KEY:=}
- provider_id: bedrock - provider_id: bedrock
provider_type: remote::bedrock provider_type: remote::bedrock
config: config:
api_key: ${env.AWS_BEDROCK_API_KEY:=} api_key: ${env.AWS_BEDROCK_API_KEY:=}
region_name: ${env.AWS_DEFAULT_REGION:=us-east-2} region_name: ${env.AWS_DEFAULT_REGION:=us-east-2}
- provider_id: ${env.NVIDIA_API_KEY:+nvidia} - provider_id: ${env.NVIDIA_API_KEY:+nvidia}
provider_type: remote::nvidia provider_type: remote::nvidia
config: config:
url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com} url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
api_key: ${env.NVIDIA_API_KEY:=} api_key: ${env.NVIDIA_API_KEY:=}
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True} append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
- provider_id: openai - provider_id: openai
provider_type: remote::openai provider_type: remote::openai
config: config:
api_key: ${env.OPENAI_API_KEY:=} api_key: ${env.OPENAI_API_KEY:=}
base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1} base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1}
- provider_id: anthropic - provider_id: anthropic
provider_type: remote::anthropic provider_type: remote::anthropic
config: config:
api_key: ${env.ANTHROPIC_API_KEY:=} api_key: ${env.ANTHROPIC_API_KEY:=}
- provider_id: gemini - provider_id: gemini
provider_type: remote::gemini provider_type: remote::gemini
config: config:
api_key: ${env.GEMINI_API_KEY:=} api_key: ${env.GEMINI_API_KEY:=}
- provider_id: ${env.VERTEX_AI_PROJECT:+vertexai} - provider_id: ${env.VERTEX_AI_PROJECT:+vertexai}
provider_type: remote::vertexai provider_type: remote::vertexai
config: config:
project: ${env.VERTEX_AI_PROJECT:=} project: ${env.VERTEX_AI_PROJECT:=}
location: ${env.VERTEX_AI_LOCATION:=us-central1} location: ${env.VERTEX_AI_LOCATION:=us-central1}
- provider_id: groq - provider_id: groq
provider_type: remote::groq provider_type: remote::groq
config: config:
url: https://api.groq.com url: https://api.groq.com
api_key: ${env.GROQ_API_KEY:=} api_key: ${env.GROQ_API_KEY:=}
- provider_id: sambanova - provider_id: sambanova
provider_type: remote::sambanova provider_type: remote::sambanova
config: config:
url: https://api.sambanova.ai/v1 url: https://api.sambanova.ai/v1
api_key: ${env.SAMBANOVA_API_KEY:=} api_key: ${env.SAMBANOVA_API_KEY:=}
- provider_id: ${env.AZURE_API_KEY:+azure} - provider_id: ${env.AZURE_API_KEY:+azure}
provider_type: remote::azure provider_type: remote::azure
config: config:
api_key: ${env.AZURE_API_KEY:=} api_key: ${env.AZURE_API_KEY:=}
api_base: ${env.AZURE_API_BASE:=} api_base: ${env.AZURE_API_BASE:=}
api_version: ${env.AZURE_API_VERSION:=} api_version: ${env.AZURE_API_VERSION:=}
api_type: ${env.AZURE_API_TYPE:=} api_type: ${env.AZURE_API_TYPE:=}
- provider_id: sentence-transformers - provider_id: sentence-transformers
provider_type: inline::sentence-transformers provider_type: inline::sentence-transformers
vector_io: vector_io:
- provider_id: faiss - provider_id: faiss
provider_type: inline::faiss provider_type: inline::faiss
config: config:
persistence: persistence:
namespace: vector_io::faiss namespace: vector_io::faiss
backend: kv_default
- provider_id: sqlite-vec
provider_type: inline::sqlite-vec
config:
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/sqlite_vec.db
persistence:
namespace: vector_io::sqlite_vec
backend: kv_default
- provider_id: ${env.MILVUS_URL:+milvus}
provider_type: inline::milvus
config:
db_path: ${env.MILVUS_DB_PATH:=~/.llama/distributions/starter}/milvus.db
persistence:
namespace: vector_io::milvus
backend: kv_default
- provider_id: ${env.CHROMADB_URL:+chromadb}
provider_type: remote::chromadb
config:
url: ${env.CHROMADB_URL:=}
persistence:
namespace: vector_io::chroma_remote
backend: kv_default
- provider_id: ${env.PGVECTOR_DB:+pgvector}
provider_type: remote::pgvector
config:
host: ${env.PGVECTOR_HOST:=localhost}
port: ${env.PGVECTOR_PORT:=5432}
db: ${env.PGVECTOR_DB:=}
user: ${env.PGVECTOR_USER:=}
password: ${env.PGVECTOR_PASSWORD:=}
persistence:
namespace: vector_io::pgvector
backend: kv_default
- provider_id: ${env.QDRANT_URL:+qdrant}
provider_type: remote::qdrant
config:
api_key: ${env.QDRANT_API_KEY:=}
persistence:
namespace: vector_io::qdrant_remote
backend: kv_default
- provider_id: ${env.WEAVIATE_CLUSTER_URL:+weaviate}
provider_type: remote::weaviate
config:
weaviate_api_key: null
weaviate_cluster_url: ${env.WEAVIATE_CLUSTER_URL:=localhost:8080}
persistence:
namespace: vector_io::weaviate
backend: kv_default
files:
- provider_id: meta-reference-files
provider_type: inline::localfs
config:
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
metadata_store:
table_name: files_metadata
backend: sql_default
safety:
- provider_id: llama-guard
provider_type: inline::llama-guard
config:
excluded_categories: []
- provider_id: code-scanner
provider_type: inline::code-scanner
agents:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
persistence:
agent_state:
namespace: agents
backend: kv_default backend: kv_default
responses: - provider_id: sqlite-vec
table_name: responses provider_type: inline::sqlite-vec
config:
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/sqlite_vec.db
persistence:
namespace: vector_io::sqlite_vec
backend: kv_default
- provider_id: ${env.MILVUS_URL:+milvus}
provider_type: inline::milvus
config:
db_path: ${env.MILVUS_DB_PATH:=~/.llama/distributions/starter}/milvus.db
persistence:
namespace: vector_io::milvus
backend: kv_default
- provider_id: ${env.CHROMADB_URL:+chromadb}
provider_type: remote::chromadb
config:
url: ${env.CHROMADB_URL:=}
persistence:
namespace: vector_io::chroma_remote
backend: kv_default
- provider_id: ${env.PGVECTOR_DB:+pgvector}
provider_type: remote::pgvector
config:
host: ${env.PGVECTOR_HOST:=localhost}
port: ${env.PGVECTOR_PORT:=5432}
db: ${env.PGVECTOR_DB:=}
user: ${env.PGVECTOR_USER:=}
password: ${env.PGVECTOR_PASSWORD:=}
persistence:
namespace: vector_io::pgvector
backend: kv_default
- provider_id: ${env.QDRANT_URL:+qdrant}
provider_type: remote::qdrant
config:
api_key: ${env.QDRANT_API_KEY:=}
persistence:
namespace: vector_io::qdrant_remote
backend: kv_default
- provider_id: ${env.WEAVIATE_CLUSTER_URL:+weaviate}
provider_type: remote::weaviate
config:
weaviate_api_key: null
weaviate_cluster_url: ${env.WEAVIATE_CLUSTER_URL:=localhost:8080}
persistence:
namespace: vector_io::weaviate
backend: kv_default
files:
- provider_id: meta-reference-files
provider_type: inline::localfs
config:
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
metadata_store:
table_name: files_metadata
backend: sql_default backend: sql_default
max_write_queue_size: 10000 safety:
num_writers: 4 - provider_id: llama-guard
provider_type: inline::llama-guard
config:
excluded_categories: []
- provider_id: code-scanner
provider_type: inline::code-scanner
agents:
- provider_id: meta-reference
provider_type: inline::meta-reference
config:
persistence:
agent_state:
namespace: agents
backend: kv_default
responses:
table_name: responses
backend: sql_default
max_write_queue_size: 10000
num_writers: 4
- provider_id: dana
provider_type: inline::dana
config:
persistence:
agent_state:
namespace: agents
backend: kv_default
responses:
table_name: responses
backend: sql_default
max_write_queue_size: 10000
num_writers: 4
post_training: post_training:
- provider_id: torchtune-cpu - provider_id: torchtune-cpu
provider_type: inline::torchtune-cpu provider_type: inline::torchtune-cpu
config: config:
checkpoint_format: meta checkpoint_format: meta
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: config:
kvstore: kvstore:
namespace: eval namespace: eval
backend: kv_default backend: kv_default
datasetio: datasetio:
- provider_id: huggingface - provider_id: huggingface
provider_type: remote::huggingface provider_type: remote::huggingface
config: config:
kvstore: kvstore:
namespace: datasetio::huggingface namespace: datasetio::huggingface
backend: kv_default backend: kv_default
- provider_id: localfs - provider_id: localfs
provider_type: inline::localfs provider_type: inline::localfs
config: config:
kvstore: kvstore:
namespace: datasetio::localfs namespace: datasetio::localfs
backend: kv_default backend: kv_default
scoring: scoring:
- provider_id: basic - provider_id: basic
provider_type: inline::basic provider_type: inline::basic
- provider_id: llm-as-judge - provider_id: llm-as-judge
provider_type: inline::llm-as-judge provider_type: inline::llm-as-judge
- provider_id: braintrust - provider_id: braintrust
provider_type: inline::braintrust provider_type: inline::braintrust
config: config:
openai_api_key: ${env.OPENAI_API_KEY:=} openai_api_key: ${env.OPENAI_API_KEY:=}
tool_runtime: tool_runtime:
- provider_id: brave-search - provider_id: brave-search
provider_type: remote::brave-search provider_type: remote::brave-search
config: config:
api_key: ${env.BRAVE_SEARCH_API_KEY:=} api_key: ${env.BRAVE_SEARCH_API_KEY:=}
max_results: 3 max_results: 3
- provider_id: tavily-search - provider_id: tavily-search
provider_type: remote::tavily-search provider_type: remote::tavily-search
config: config:
api_key: ${env.TAVILY_SEARCH_API_KEY:=} api_key: ${env.TAVILY_SEARCH_API_KEY:=}
max_results: 3 max_results: 3
- provider_id: rag-runtime - provider_id: rag-runtime
provider_type: inline::rag-runtime provider_type: inline::rag-runtime
- provider_id: model-context-protocol - provider_id: model-context-protocol
provider_type: remote::model-context-protocol provider_type: remote::model-context-protocol
batches: batches:
- provider_id: reference - provider_id: reference
provider_type: inline::reference provider_type: inline::reference
config: config:
kvstore: kvstore:
namespace: batches namespace: batches
backend: kv_default backend: kv_default
storage: storage:
backends: backends:
kv_default: kv_default:
@ -256,21 +268,21 @@ storage:
registered_resources: registered_resources:
models: [] models: []
shields: shields:
- shield_id: llama-guard - shield_id: llama-guard
provider_id: ${env.SAFETY_MODEL:+llama-guard} provider_id: ${env.SAFETY_MODEL:+llama-guard}
provider_shield_id: ${env.SAFETY_MODEL:=} provider_shield_id: ${env.SAFETY_MODEL:=}
- shield_id: code-scanner - shield_id: code-scanner
provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner} provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner}
provider_shield_id: ${env.CODE_SCANNER_MODEL:=} provider_shield_id: ${env.CODE_SCANNER_MODEL:=}
vector_dbs: [] vector_dbs: []
datasets: [] datasets: []
scoring_fns: [] scoring_fns: []
benchmarks: [] benchmarks: []
tool_groups: tool_groups:
- toolgroup_id: builtin::websearch - toolgroup_id: builtin::websearch
provider_id: tavily-search provider_id: tavily-search
- toolgroup_id: builtin::rag - toolgroup_id: builtin::rag
provider_id: rag-runtime provider_id: rag-runtime
server: server:
port: 8321 port: 8321
telemetry: telemetry:

View file

@ -127,7 +127,10 @@ def get_distribution_template(name: str = "starter") -> DistributionTemplate:
BuildProvider(provider_type="inline::llama-guard"), BuildProvider(provider_type="inline::llama-guard"),
BuildProvider(provider_type="inline::code-scanner"), BuildProvider(provider_type="inline::code-scanner"),
], ],
"agents": [BuildProvider(provider_type="inline::meta-reference")], "agents": [
BuildProvider(provider_type="inline::meta-reference"),
BuildProvider(provider_type="inline::dana"),
],
"post_training": [BuildProvider(provider_type="inline::torchtune-cpu")], "post_training": [BuildProvider(provider_type="inline::torchtune-cpu")],
"eval": [BuildProvider(provider_type="inline::meta-reference")], "eval": [BuildProvider(provider_type="inline::meta-reference")],
"datasetio": [ "datasetio": [

View file

@ -0,0 +1,34 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any
from llama_stack.core.datatypes import AccessRule, Api
from .config import DanaAgentsImplConfig
async def get_provider_impl(
config: DanaAgentsImplConfig,
deps: dict[Api, Any],
policy: list[AccessRule],
telemetry_enabled: bool = False,
):
from .agents import DanaAgentsImpl
impl = DanaAgentsImpl(
config,
deps[Api.inference],
deps[Api.vector_io],
deps[Api.safety],
deps[Api.tool_runtime],
deps[Api.tool_groups],
deps[Api.conversations],
policy,
telemetry_enabled,
)
await impl.initialize()
return impl

View file

@ -0,0 +1,122 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from collections.abc import AsyncIterator
from llama_stack.apis.agents import (
Agents,
ListOpenAIResponseInputItem,
ListOpenAIResponseObject,
OpenAIDeleteResponseObject,
OpenAIResponseInput,
OpenAIResponseInputTool,
OpenAIResponseObject,
OpenAIResponseObjectStream,
Order,
)
from llama_stack.apis.agents.agents import ResponseGuardrail
from llama_stack.apis.agents.openai_responses import OpenAIResponsePrompt, OpenAIResponseText
from llama_stack.apis.conversations import Conversations
from llama_stack.apis.inference import Inference
from llama_stack.apis.safety import Safety
from llama_stack.apis.tools import ToolGroups, ToolRuntime
from llama_stack.apis.vector_io import VectorIO
from llama_stack.core.datatypes import AccessRule
from llama_stack.log import get_logger
from .config import DanaAgentsImplConfig
logger = get_logger(name=__name__, category="agents::dana")
class DanaAgentsImpl(Agents):
"""Stub implementation of the Agents API using the Dana library."""
def __init__(
self,
config: DanaAgentsImplConfig,
inference_api: Inference,
vector_io_api: VectorIO,
safety_api: Safety,
tool_runtime_api: ToolRuntime,
tool_groups_api: ToolGroups,
conversations_api: Conversations,
policy: list[AccessRule],
telemetry_enabled: bool = False,
):
self.config = config
self.inference_api = inference_api
self.vector_io_api = vector_io_api
self.safety_api = safety_api
self.tool_runtime_api = tool_runtime_api
self.tool_groups_api = tool_groups_api
self.conversations_api = conversations_api
self.telemetry_enabled = telemetry_enabled
self.policy = policy
async def initialize(self) -> None:
"""Initialize the Dana agents implementation."""
# TODO: Initialize Dana library here
logger.info("Dana agents implementation initialized (stub)")
async def shutdown(self) -> None:
"""Shutdown the Dana agents implementation."""
# TODO: Cleanup Dana library here
pass
# OpenAI responses
async def get_openai_response(
self,
response_id: str,
) -> OpenAIResponseObject:
"""Get a model response."""
raise NotImplementedError("Dana provider is not yet implemented")
async def create_openai_response(
self,
input: str | list[OpenAIResponseInput],
model: str,
prompt: OpenAIResponsePrompt | None = None,
instructions: str | None = None,
previous_response_id: str | None = None,
conversation: str | None = None,
store: bool | None = True,
stream: bool | None = False,
temperature: float | None = None,
text: OpenAIResponseText | None = None,
tools: list[OpenAIResponseInputTool] | None = None,
include: list[str] | None = None,
max_infer_iters: int | None = 10,
guardrails: list[ResponseGuardrail] | None = None,
) -> OpenAIResponseObject | AsyncIterator[OpenAIResponseObjectStream]:
"""Create a model response."""
raise NotImplementedError("Dana provider is not yet implemented")
async def list_openai_responses(
self,
after: str | None = None,
limit: int | None = 50,
model: str | None = None,
order: Order | None = Order.desc,
) -> ListOpenAIResponseObject:
"""List all responses."""
raise NotImplementedError("Dana provider is not yet implemented")
async def list_openai_response_input_items(
self,
response_id: str,
after: str | None = None,
before: str | None = None,
include: list[str] | None = None,
limit: int | None = 20,
order: Order | None = Order.desc,
) -> ListOpenAIResponseInputItem:
"""List input items."""
raise NotImplementedError("Dana provider is not yet implemented")
async def delete_openai_response(self, response_id: str) -> OpenAIDeleteResponseObject:
"""Delete a response."""
raise NotImplementedError("Dana provider is not yet implemented")

View file

@ -0,0 +1,38 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
from typing import Any
from pydantic import BaseModel
from llama_stack.core.storage.datatypes import KVStoreReference, ResponsesStoreReference
class AgentPersistenceConfig(BaseModel):
"""Nested persistence configuration for agents."""
agent_state: KVStoreReference
responses: ResponsesStoreReference
class DanaAgentsImplConfig(BaseModel):
persistence: AgentPersistenceConfig
@classmethod
def sample_run_config(cls, __distro_dir__: str) -> dict[str, Any]:
return {
"persistence": {
"agent_state": KVStoreReference(
backend="kv_default",
namespace="agents",
).model_dump(exclude_none=True),
"responses": ResponsesStoreReference(
backend="sql_default",
table_name="responses",
).model_dump(exclude_none=True),
}
}

View file

@ -38,4 +38,23 @@ def available_providers() -> list[ProviderSpec]:
], ],
description="Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks.", description="Meta's reference implementation of an agent system that can use tools, access vector databases, and perform complex reasoning tasks.",
), ),
InlineProviderSpec(
api=Api.agents,
provider_type="inline::dana",
pip_packages=[
"dana",
]
+ kvstore_dependencies(), # TODO make this dynamic based on the kvstore config
module="llama_stack.providers.inline.agents.dana",
config_class="llama_stack.providers.inline.agents.dana.DanaAgentsImplConfig",
api_dependencies=[
Api.inference,
Api.safety,
Api.vector_io,
Api.tool_runtime,
Api.tool_groups,
Api.conversations,
],
description="Dana library implementation of an agent system (stub).",
),
] ]

View file

@ -0,0 +1,6 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.

View file

@ -0,0 +1,46 @@
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
"""
Test suite for the Dana agent provider implementation (stub).
TODO: Add tests when implementation is complete.
"""
from llama_stack.core.datatypes import Api
from llama_stack.core.distribution import get_provider_registry
from llama_stack.providers.inline.agents.dana.agents import DanaAgentsImpl
from llama_stack.providers.inline.agents.dana.config import DanaAgentsImplConfig
def test_dana_provider_in_registry():
"""Test that the Dana provider is registered and can be found in the registry."""
provider_registry = get_provider_registry()
agents_providers = provider_registry.get(Api.agents, {})
# Verify the provider is in the registry
assert "inline::dana" in agents_providers, "Dana provider should be registered"
provider_spec = agents_providers["inline::dana"]
assert provider_spec.provider_type == "inline::dana"
assert provider_spec.api == Api.agents
assert provider_spec.module == "llama_stack.providers.inline.agents.dana"
assert provider_spec.config_class == "llama_stack.providers.inline.agents.dana.DanaAgentsImplConfig"
def test_dana_provider_config():
"""Test that the Dana provider config can be instantiated."""
config = DanaAgentsImplConfig.sample_run_config(__distro_dir__="test")
assert isinstance(config, dict)
assert "persistence" in config
assert "agent_state" in config["persistence"]
assert "responses" in config["persistence"]
def test_dana_provider_class_exists():
"""Test that Dana provider class exists."""
assert DanaAgentsImpl is not None
# TODO: Add actual tests when the provider is implemented