Miscellaneous fixes around telemetry, library client and run yaml autogen

Also add a `venv` image-type for llama stack build
This commit is contained in:
Ashwin Bharambe 2024-12-08 19:11:22 -08:00
parent 224e62290f
commit e951852848
28 changed files with 274 additions and 34 deletions

View file

@ -16,6 +16,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -45,6 +47,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -75,6 +79,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -103,6 +109,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -133,6 +141,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -164,6 +174,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -194,6 +206,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -226,6 +240,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -262,6 +278,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -292,6 +310,8 @@
"matplotlib", "matplotlib",
"nltk", "nltk",
"numpy", "numpy",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -323,6 +343,8 @@
"numpy", "numpy",
"ollama", "ollama",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",
@ -354,6 +376,8 @@
"nltk", "nltk",
"numpy", "numpy",
"openai", "openai",
"opentelemetry-exporter-otlp-proto-http",
"opentelemetry-sdk",
"pandas", "pandas",
"pillow", "pillow",
"psycopg2-binary", "psycopg2-binary",

View file

@ -3,3 +3,5 @@
# #
# This source code is licensed under the terms described in the LICENSE file in # This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree. # the root directory of this source tree.
#
# from .distribution.library_client import LlamaStackAsLibraryClient, AsyncLlamaStackAsLibraryClient

View file

@ -73,7 +73,7 @@ class StackBuild(Subcommand):
"--image-type", "--image-type",
type=str, type=str,
help="Image Type to use for the build. This can be either conda or docker. If not specified, will use the image type from the template config.", help="Image Type to use for the build. This can be either conda or docker. If not specified, will use the image type from the template config.",
choices=["conda", "docker"], choices=["conda", "docker", "venv"],
default="conda", default="conda",
) )
@ -124,8 +124,8 @@ class StackBuild(Subcommand):
image_type = prompt( image_type = prompt(
"> Enter the image type you want your Llama Stack to be built as (docker or conda): ", "> Enter the image type you want your Llama Stack to be built as (docker or conda): ",
validator=Validator.from_callable( validator=Validator.from_callable(
lambda x: x in ["docker", "conda"], lambda x: x in ["docker", "conda", "venv"],
error_message="Invalid image type, please enter conda or docker", error_message="Invalid image type, please enter conda or docker or venv",
), ),
default="conda", default="conda",
) )

View file

@ -38,6 +38,7 @@ SERVER_DEPENDENCIES = [
class ImageType(Enum): class ImageType(Enum):
docker = "docker" docker = "docker"
conda = "conda" conda = "conda"
venv = "venv"
class ApiInput(BaseModel): class ApiInput(BaseModel):
@ -120,7 +121,7 @@ def build_image(build_config: BuildConfig, build_file_path: Path):
str(BUILDS_BASE_DIR / ImageType.docker.value), str(BUILDS_BASE_DIR / ImageType.docker.value),
" ".join(normal_deps), " ".join(normal_deps),
] ]
else: elif build_config.image_type == ImageType.conda.value:
script = pkg_resources.resource_filename( script = pkg_resources.resource_filename(
"llama_stack", "distribution/build_conda_env.sh" "llama_stack", "distribution/build_conda_env.sh"
) )
@ -130,6 +131,16 @@ def build_image(build_config: BuildConfig, build_file_path: Path):
str(build_file_path), str(build_file_path),
" ".join(normal_deps), " ".join(normal_deps),
] ]
elif build_config.image_type == ImageType.venv.value:
script = pkg_resources.resource_filename(
"llama_stack", "distribution/build_venv.sh"
)
args = [
script,
build_config.name,
str(build_file_path),
" ".join(normal_deps),
]
if special_deps: if special_deps:
args.append("#".join(special_deps)) args.append("#".join(special_deps))

View file

@ -0,0 +1,105 @@
#!/bin/bash
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the terms described in the LICENSE file in
# the root directory of this source tree.
# TODO: combine this with build_conda_env.sh since it is almost identical
# the only difference is that we don't do any conda-specific setup
LLAMA_MODELS_DIR=${LLAMA_MODELS_DIR:-}
LLAMA_STACK_DIR=${LLAMA_STACK_DIR:-}
TEST_PYPI_VERSION=${TEST_PYPI_VERSION:-}
if [ -n "$LLAMA_STACK_DIR" ]; then
echo "Using llama-stack-dir=$LLAMA_STACK_DIR"
fi
if [ -n "$LLAMA_MODELS_DIR" ]; then
echo "Using llama-models-dir=$LLAMA_MODELS_DIR"
fi
if [ "$#" -lt 3 ]; then
echo "Usage: $0 <distribution_type> <build_name> <build_file_path> <pip_dependencies> [<special_pip_deps>]" >&2
echo "Example: $0 <distribution_type> mybuild ./my-stack-build.yaml 'numpy pandas scipy'" >&2
exit 1
fi
special_pip_deps="$4"
set -euo pipefail
build_name="$1"
env_name="llamastack-$build_name"
build_file_path="$2"
pip_dependencies="$3"
# Define color codes
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
# this is set if we actually create a new conda in which case we need to clean up
ENVNAME=""
SCRIPT_DIR=$(dirname "$(readlink -f "$0")")
source "$SCRIPT_DIR/common.sh"
run() {
local env_name="$1"
local pip_dependencies="$2"
local special_pip_deps="$3"
if [ -n "$TEST_PYPI_VERSION" ]; then
# these packages are damaged in test-pypi, so install them first
pip install fastapi libcst
pip install --extra-index-url https://test.pypi.org/simple/ \
llama-models==$TEST_PYPI_VERSION llama-stack==$TEST_PYPI_VERSION \
$pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
pip install $part
done
fi
else
# Re-installing llama-stack in the new conda environment
if [ -n "$LLAMA_STACK_DIR" ]; then
if [ ! -d "$LLAMA_STACK_DIR" ]; then
printf "${RED}Warning: LLAMA_STACK_DIR is set but directory does not exist: $LLAMA_STACK_DIR${NC}\n" >&2
exit 1
fi
printf "Installing from LLAMA_STACK_DIR: $LLAMA_STACK_DIR\n"
pip install --no-cache-dir -e "$LLAMA_STACK_DIR"
else
pip install --no-cache-dir llama-stack
fi
if [ -n "$LLAMA_MODELS_DIR" ]; then
if [ ! -d "$LLAMA_MODELS_DIR" ]; then
printf "${RED}Warning: LLAMA_MODELS_DIR is set but directory does not exist: $LLAMA_MODELS_DIR${NC}\n" >&2
exit 1
fi
printf "Installing from LLAMA_MODELS_DIR: $LLAMA_MODELS_DIR\n"
pip uninstall -y llama-models
pip install --no-cache-dir -e "$LLAMA_MODELS_DIR"
fi
# Install pip dependencies
printf "Installing pip dependencies\n"
pip install $pip_dependencies
if [ -n "$special_pip_deps" ]; then
IFS='#' read -ra parts <<<"$special_pip_deps"
for part in "${parts[@]}"; do
echo "$part"
pip install $part
done
fi
fi
}
run "$env_name" "$pip_dependencies" "$special_pip_deps"

View file

@ -165,5 +165,5 @@ class BuildConfig(BaseModel):
) )
image_type: str = Field( image_type: str = Field(
default="conda", default="conda",
description="Type of package to build (conda | container)", description="Type of package to build (conda | docker | venv)",
) )

View file

@ -6,6 +6,7 @@
import asyncio import asyncio
import inspect import inspect
import os
import queue import queue
import threading import threading
from concurrent.futures import ThreadPoolExecutor from concurrent.futures import ThreadPoolExecutor
@ -32,6 +33,18 @@ from llama_stack.distribution.stack import (
T = TypeVar("T") T = TypeVar("T")
def is_jupyter():
"""Check if we're running in a Jupyter notebook"""
try:
shell = get_ipython().__class__.__name__ # type: ignore
if shell == "ZMQInteractiveShell": # Jupyter notebook or qtconsole
return True
else:
return False
except NameError: # Probably standard Python interpreter
return False
def stream_across_asyncio_run_boundary( def stream_across_asyncio_run_boundary(
async_gen_maker, async_gen_maker,
pool_executor: ThreadPoolExecutor, pool_executor: ThreadPoolExecutor,
@ -102,7 +115,12 @@ class LlamaStackAsLibraryClient(LlamaStackClient):
self.pool_executor = ThreadPoolExecutor(max_workers=4) self.pool_executor = ThreadPoolExecutor(max_workers=4)
def initialize(self): def initialize(self):
asyncio.run(self.async_client.initialize()) if is_jupyter():
import nest_asyncio
nest_asyncio.apply()
return asyncio.run(self.async_client.initialize())
def get(self, *args, **kwargs): def get(self, *args, **kwargs):
if kwargs.get("stream"): if kwargs.get("stream"):
@ -131,6 +149,10 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
): ):
super().__init__() super().__init__()
# when using the library client, we should not log to console since many
# of our logs are intended for server-side usage
os.environ["TELEMETRY_SINKS"] = "sqlite"
if config_path_or_template_name.endswith(".yaml"): if config_path_or_template_name.endswith(".yaml"):
config_path = Path(config_path_or_template_name) config_path = Path(config_path_or_template_name)
if not config_path.exists(): if not config_path.exists():
@ -150,13 +172,19 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
self.impls = await construct_stack( self.impls = await construct_stack(
self.config, self.custom_provider_registry self.config, self.custom_provider_registry
) )
except ModuleNotFoundError as e: except ModuleNotFoundError as _e:
cprint( cprint(
"Using llama-stack as a library requires installing dependencies depending on the template (providers) you choose.\n", "Using llama-stack as a library requires installing dependencies depending on the template (providers) you choose.\n",
"yellow", "yellow",
) )
print_pip_install_help(self.config.providers) if self.config_path_or_template_name.endswith(".yaml"):
raise e print_pip_install_help(self.config.providers)
else:
cprint(
f"Please run:\n\nllama stack build --template {self.config_path_or_template_name} --image-type venv\n\n",
"yellow",
)
return False
console = Console() console = Console()
console.print(f"Using config [blue]{self.config_path_or_template_name}[/blue]:") console.print(f"Using config [blue]{self.config_path_or_template_name}[/blue]:")
@ -171,6 +199,7 @@ class AsyncLlamaStackAsLibraryClient(AsyncLlamaStackClient):
endpoint_impls[endpoint.route] = func endpoint_impls[endpoint.route] = func
self.endpoint_impls = endpoint_impls self.endpoint_impls = endpoint_impls
return True
async def get( async def get(
self, self,

View file

@ -17,7 +17,8 @@ from llama_stack_client.types.agent_create_params import AgentConfig
def main(config_path: str): def main(config_path: str):
client = LlamaStackAsLibraryClient(config_path) client = LlamaStackAsLibraryClient(config_path)
client.initialize() if not client.initialize():
return
models = client.models.list() models = client.models.list()
print("\nModels:") print("\nModels:")

View file

@ -7,12 +7,13 @@
from typing import Any, Dict from typing import Any, Dict
from .config import TelemetryConfig, TelemetrySink from .config import TelemetryConfig, TelemetrySink
from .telemetry import TelemetryAdapter
__all__ = ["TelemetryConfig", "TelemetryAdapter", "TelemetrySink"] __all__ = ["TelemetryConfig", "TelemetrySink"]
async def get_provider_impl(config: TelemetryConfig, deps: Dict[str, Any]): async def get_provider_impl(config: TelemetryConfig, deps: Dict[str, Any]):
from .telemetry import TelemetryAdapter
impl = TelemetryAdapter(config, deps) impl = TelemetryAdapter(config, deps)
await impl.initialize() await impl.initialize()
return impl return impl

View file

@ -7,7 +7,7 @@
from enum import Enum from enum import Enum
from typing import Any, Dict, List from typing import Any, Dict, List
from pydantic import BaseModel, Field from pydantic import BaseModel, Field, field_validator
from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR from llama_stack.distribution.utils.config_dirs import RUNTIME_BASE_DIR
@ -36,10 +36,23 @@ class TelemetryConfig(BaseModel):
description="The path to the SQLite database to use for storing traces", description="The path to the SQLite database to use for storing traces",
) )
@field_validator("sinks", mode="before")
@classmethod @classmethod
def sample_run_config(cls, **kwargs) -> Dict[str, Any]: def validate_sinks(cls, v):
if isinstance(v, str):
return [TelemetrySink(sink.strip()) for sink in v.split(",")]
return v
@classmethod
def sample_run_config(
cls, __distro_dir__: str = "runtime", db_name: str = "trace_store.db"
) -> Dict[str, Any]:
return { return {
"service_name": "${env.OTEL_SERVICE_NAME:llama-stack}", "service_name": "${env.OTEL_SERVICE_NAME:llama-stack}",
"sinks": "${env.TELEMETRY_SINKS:['console', 'sqlite']}", "sinks": "${env.TELEMETRY_SINKS:console,sqlite}",
"sqlite_db_path": "${env.SQLITE_DB_PATH:${runtime.base_dir}/trace_store.db}", "sqlite_db_path": "${env.SQLITE_DB_PATH:~/.llama/"
+ __distro_dir__
+ "/"
+ db_name
+ "}",
} }

View file

@ -39,7 +39,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/bedrock/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -38,7 +38,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/cerebras/trace_store.db}
metadata_store: metadata_store:
namespace: null namespace: null
type: sqlite type: sqlite

View file

@ -41,7 +41,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/fireworks/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -46,7 +46,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-endpoint/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -41,7 +41,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-endpoint/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -46,7 +46,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-serverless/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -41,7 +41,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/hf-serverless/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -48,7 +48,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/meta-reference-gpu/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -42,7 +42,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/meta-reference-gpu/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -44,7 +44,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/meta-reference-quantized-gpu/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -40,7 +40,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/ollama/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -40,7 +40,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/ollama/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -45,7 +45,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/remote-vllm/trace_store.db}
metadata_store: metadata_store:
namespace: null namespace: null
type: sqlite type: sqlite

View file

@ -39,7 +39,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/remote-vllm/trace_store.db}
metadata_store: metadata_store:
namespace: null namespace: null
type: sqlite type: sqlite

View file

@ -44,7 +44,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/tgi/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -40,7 +40,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/tgi/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -41,7 +41,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/together/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference

View file

@ -44,7 +44,10 @@ providers:
telemetry: telemetry:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference
config: {} config:
service_name: ${env.OTEL_SERVICE_NAME:llama-stack}
sinks: ${env.TELEMETRY_SINKS:console,sqlite}
sqlite_db_path: ${env.SQLITE_DB_PATH:~/.llama/distributions/vllm-gpu/trace_store.db}
eval: eval:
- provider_id: meta-reference - provider_id: meta-reference
provider_type: inline::meta-reference provider_type: inline::meta-reference