mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-10-04 12:07:34 +00:00
# What does this PR do? Llama-stack now supports a new OpenAI compatible endpoint with Azure OpenAI. The starter distro has been updated to add the new remote inference provider. A few tests have been modified and improved. ## Test Plan Deploy a model in the Aure portal then: ``` $ AZURE_API_KEY=... AZURE_API_BASE=... uv run llama stack build --image-type venv --providers inference=remote::azure --run ... $ LLAMA_STACK_CONFIG=http://localhost:8321 uv run --group test pytest -v -ra --text-model azure/gpt-4.1 tests/integration/inference/test_openai_completion.py ... Results: ``` ============================================= test session starts ============================================== platform darwin -- Python 3.12.8, pytest-8.4.1, pluggy-1.6.0 -- /Users/leseb/Documents/AI/llama-stack/.venv/bin/python3 cachedir: .pytest_cache metadata: {'Python': '3.12.8', 'Platform': 'macOS-15.6.1-arm64-arm-64bit', 'Packages': {'pytest': '8.4.1', 'pluggy': '1.6.0'}, 'Plugins': {'anyio': '4.9.0', 'html': '4.1.1', 'socket': '0.7.0', 'asyncio': '1.1.0', 'json-report': '1.5.0', 'timeout': '2.4.0', 'metadata': '3.1.1', 'cov': '6.2.1', 'nbval': '0.11.0', 'hydra-core': '1.3.2'}} rootdir: /Users/leseb/Documents/AI/llama-stack configfile: pyproject.toml plugins: anyio-4.9.0, html-4.1.1, socket-0.7.0, asyncio-1.1.0, json-report-1.5.0, timeout-2.4.0, metadata-3.1.1, cov-6.2.1, nbval-0.11.0, hydra-core-1.3.2 asyncio: mode=Mode.AUTO, asyncio_default_fixture_loop_scope=None, asyncio_default_test_loop_scope=function collected 27 items tests/integration/inference/test_openai_completion.py::test_openai_completion_non_streaming[txt=azure/gpt-5-mini-inference:completion:sanity] SKIPPED [ 3%] tests/integration/inference/test_openai_completion.py::test_openai_completion_non_streaming_suffix[txt=azure/gpt-5-mini-inference:completion:suffix] SKIPPED [ 7%] tests/integration/inference/test_openai_completion.py::test_openai_completion_streaming[txt=azure/gpt-5-mini-inference:completion:sanity] SKIPPED [ 11%] tests/integration/inference/test_openai_completion.py::test_openai_completion_prompt_logprobs[txt=azure/gpt-5-mini-1] SKIPPED [ 14%] tests/integration/inference/test_openai_completion.py::test_openai_completion_guided_choice[txt=azure/gpt-5-mini] SKIPPED [ 18%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[openai_client-txt=azure/gpt-5-mini-inference:chat_completion:non_streaming_01] PASSED [ 22%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[openai_client-txt=azure/gpt-5-mini-inference:chat_completion:streaming_01] PASSED [ 25%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming_with_n[openai_client-txt=azure/gpt-5-mini-inference:chat_completion:streaming_01] PASSED [ 29%] tests/integration/inference/test_openai_completion.py::test_inference_store[openai_client-txt=azure/gpt-5-mini-True] PASSED [ 33%] tests/integration/inference/test_openai_completion.py::test_inference_store_tool_calls[openai_client-txt=azure/gpt-5-mini-True] PASSED [ 37%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming_with_file[txt=azure/gpt-5-mini] SKIPPEDed files.) [ 40%] tests/integration/inference/test_openai_completion.py::test_openai_completion_prompt_logprobs[txt=azure/gpt-5-mini-0] SKIPPED [ 44%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[openai_client-txt=azure/gpt-5-mini-inference:chat_completion:non_streaming_02] PASSED [ 48%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[openai_client-txt=azure/gpt-5-mini-inference:chat_completion:streaming_02] PASSED [ 51%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming_with_n[openai_client-txt=azure/gpt-5-mini-inference:chat_completion:streaming_02] PASSED [ 55%] tests/integration/inference/test_openai_completion.py::test_inference_store[openai_client-txt=azure/gpt-5-mini-False] PASSED [ 59%] tests/integration/inference/test_openai_completion.py::test_inference_store_tool_calls[openai_client-txt=azure/gpt-5-mini-False] PASSED [ 62%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=azure/gpt-5-mini-inference:chat_completion:non_streaming_01] PASSED [ 66%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=azure/gpt-5-mini-inference:chat_completion:streaming_01] PASSED [ 70%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming_with_n[client_with_models-txt=azure/gpt-5-mini-inference:chat_completion:streaming_01] PASSED [ 74%] tests/integration/inference/test_openai_completion.py::test_inference_store[client_with_models-txt=azure/gpt-5-mini-True] PASSED [ 77%] tests/integration/inference/test_openai_completion.py::test_inference_store_tool_calls[client_with_models-txt=azure/gpt-5-mini-True] PASSED [ 81%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_non_streaming[client_with_models-txt=azure/gpt-5-mini-inference:chat_completion:non_streaming_02] PASSED [ 85%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming[client_with_models-txt=azure/gpt-5-mini-inference:chat_completion:streaming_02] PASSED [ 88%] tests/integration/inference/test_openai_completion.py::test_openai_chat_completion_streaming_with_n[client_with_models-txt=azure/gpt-5-mini-inference:chat_completion:streaming_02] PASSED [ 92%] tests/integration/inference/test_openai_completion.py::test_inference_store[client_with_models-txt=azure/gpt-5-mini-False] PASSED [ 96%] tests/integration/inference/test_openai_completion.py::test_inference_store_tool_calls[client_with_models-txt=azure/gpt-5-mini-False] PASSED [100%] =========================================== short test summary info ============================================ SKIPPED [3] tests/integration/inference/test_openai_completion.py:63: Model azure/gpt-5-mini hosted by remote::azure doesn't support OpenAI completions. SKIPPED [3] tests/integration/inference/test_openai_completion.py:118: Model azure/gpt-5-mini hosted by remote::azure doesn't support vllm extra_body parameters. SKIPPED [1] tests/integration/inference/test_openai_completion.py:124: Model azure/gpt-5-mini hosted by remote::azure doesn't support chat completion calls with base64 encoded files. ================================== 20 passed, 7 skipped, 2 warnings in 51.77s ================================== ``` Signed-off-by: Sébastien Han <seb@redhat.com>
245 lines
7.7 KiB
YAML
245 lines
7.7 KiB
YAML
version: 2
|
|
image_name: starter
|
|
apis:
|
|
- agents
|
|
- batches
|
|
- datasetio
|
|
- eval
|
|
- files
|
|
- inference
|
|
- post_training
|
|
- safety
|
|
- scoring
|
|
- telemetry
|
|
- tool_runtime
|
|
- vector_io
|
|
providers:
|
|
inference:
|
|
- provider_id: ${env.CEREBRAS_API_KEY:+cerebras}
|
|
provider_type: remote::cerebras
|
|
config:
|
|
base_url: https://api.cerebras.ai
|
|
api_key: ${env.CEREBRAS_API_KEY:=}
|
|
- provider_id: ${env.OLLAMA_URL:+ollama}
|
|
provider_type: remote::ollama
|
|
config:
|
|
url: ${env.OLLAMA_URL:=http://localhost:11434}
|
|
- provider_id: ${env.VLLM_URL:+vllm}
|
|
provider_type: remote::vllm
|
|
config:
|
|
url: ${env.VLLM_URL:=}
|
|
max_tokens: ${env.VLLM_MAX_TOKENS:=4096}
|
|
api_token: ${env.VLLM_API_TOKEN:=fake}
|
|
tls_verify: ${env.VLLM_TLS_VERIFY:=true}
|
|
- provider_id: ${env.TGI_URL:+tgi}
|
|
provider_type: remote::tgi
|
|
config:
|
|
url: ${env.TGI_URL:=}
|
|
- provider_id: fireworks
|
|
provider_type: remote::fireworks
|
|
config:
|
|
url: https://api.fireworks.ai/inference/v1
|
|
api_key: ${env.FIREWORKS_API_KEY:=}
|
|
- provider_id: together
|
|
provider_type: remote::together
|
|
config:
|
|
url: https://api.together.xyz/v1
|
|
api_key: ${env.TOGETHER_API_KEY:=}
|
|
- provider_id: bedrock
|
|
provider_type: remote::bedrock
|
|
- provider_id: ${env.NVIDIA_API_KEY:+nvidia}
|
|
provider_type: remote::nvidia
|
|
config:
|
|
url: ${env.NVIDIA_BASE_URL:=https://integrate.api.nvidia.com}
|
|
api_key: ${env.NVIDIA_API_KEY:=}
|
|
append_api_version: ${env.NVIDIA_APPEND_API_VERSION:=True}
|
|
- provider_id: openai
|
|
provider_type: remote::openai
|
|
config:
|
|
api_key: ${env.OPENAI_API_KEY:=}
|
|
base_url: ${env.OPENAI_BASE_URL:=https://api.openai.com/v1}
|
|
- provider_id: anthropic
|
|
provider_type: remote::anthropic
|
|
config:
|
|
api_key: ${env.ANTHROPIC_API_KEY:=}
|
|
- provider_id: gemini
|
|
provider_type: remote::gemini
|
|
config:
|
|
api_key: ${env.GEMINI_API_KEY:=}
|
|
- provider_id: ${env.VERTEX_AI_PROJECT:+vertexai}
|
|
provider_type: remote::vertexai
|
|
config:
|
|
project: ${env.VERTEX_AI_PROJECT:=}
|
|
location: ${env.VERTEX_AI_LOCATION:=us-central1}
|
|
- provider_id: groq
|
|
provider_type: remote::groq
|
|
config:
|
|
url: https://api.groq.com
|
|
api_key: ${env.GROQ_API_KEY:=}
|
|
- provider_id: sambanova
|
|
provider_type: remote::sambanova
|
|
config:
|
|
url: https://api.sambanova.ai/v1
|
|
api_key: ${env.SAMBANOVA_API_KEY:=}
|
|
- provider_id: ${env.AZURE_API_KEY:+azure}
|
|
provider_type: remote::azure
|
|
config:
|
|
api_key: ${env.AZURE_API_KEY:=}
|
|
api_base: ${env.AZURE_API_BASE:=}
|
|
api_version: ${env.AZURE_API_VERSION:=}
|
|
api_type: ${env.AZURE_API_TYPE:=}
|
|
- provider_id: sentence-transformers
|
|
provider_type: inline::sentence-transformers
|
|
vector_io:
|
|
- provider_id: faiss
|
|
provider_type: inline::faiss
|
|
config:
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/faiss_store.db
|
|
- provider_id: sqlite-vec
|
|
provider_type: inline::sqlite-vec
|
|
config:
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/sqlite_vec.db
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/sqlite_vec_registry.db
|
|
- provider_id: ${env.MILVUS_URL:+milvus}
|
|
provider_type: inline::milvus
|
|
config:
|
|
db_path: ${env.MILVUS_DB_PATH:=~/.llama/distributions/starter}/milvus.db
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/milvus_registry.db
|
|
- provider_id: ${env.CHROMADB_URL:+chromadb}
|
|
provider_type: remote::chromadb
|
|
config:
|
|
url: ${env.CHROMADB_URL:=}
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter/}/chroma_remote_registry.db
|
|
- provider_id: ${env.PGVECTOR_DB:+pgvector}
|
|
provider_type: remote::pgvector
|
|
config:
|
|
host: ${env.PGVECTOR_HOST:=localhost}
|
|
port: ${env.PGVECTOR_PORT:=5432}
|
|
db: ${env.PGVECTOR_DB:=}
|
|
user: ${env.PGVECTOR_USER:=}
|
|
password: ${env.PGVECTOR_PASSWORD:=}
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/pgvector_registry.db
|
|
files:
|
|
- provider_id: meta-reference-files
|
|
provider_type: inline::localfs
|
|
config:
|
|
storage_dir: ${env.FILES_STORAGE_DIR:=~/.llama/distributions/starter/files}
|
|
metadata_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/files_metadata.db
|
|
safety:
|
|
- provider_id: llama-guard
|
|
provider_type: inline::llama-guard
|
|
config:
|
|
excluded_categories: []
|
|
- provider_id: code-scanner
|
|
provider_type: inline::code-scanner
|
|
agents:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
persistence_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/agents_store.db
|
|
responses_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/responses_store.db
|
|
telemetry:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
service_name: "${env.OTEL_SERVICE_NAME:=\u200B}"
|
|
sinks: ${env.TELEMETRY_SINKS:=console,sqlite}
|
|
sqlite_db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/trace_store.db
|
|
otel_exporter_otlp_endpoint: ${env.OTEL_EXPORTER_OTLP_ENDPOINT:=}
|
|
post_training:
|
|
- provider_id: torchtune-cpu
|
|
provider_type: inline::torchtune-cpu
|
|
config:
|
|
checkpoint_format: meta
|
|
eval:
|
|
- provider_id: meta-reference
|
|
provider_type: inline::meta-reference
|
|
config:
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/meta_reference_eval.db
|
|
datasetio:
|
|
- provider_id: huggingface
|
|
provider_type: remote::huggingface
|
|
config:
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/huggingface_datasetio.db
|
|
- provider_id: localfs
|
|
provider_type: inline::localfs
|
|
config:
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/localfs_datasetio.db
|
|
scoring:
|
|
- provider_id: basic
|
|
provider_type: inline::basic
|
|
- provider_id: llm-as-judge
|
|
provider_type: inline::llm-as-judge
|
|
- provider_id: braintrust
|
|
provider_type: inline::braintrust
|
|
config:
|
|
openai_api_key: ${env.OPENAI_API_KEY:=}
|
|
tool_runtime:
|
|
- provider_id: brave-search
|
|
provider_type: remote::brave-search
|
|
config:
|
|
api_key: ${env.BRAVE_SEARCH_API_KEY:=}
|
|
max_results: 3
|
|
- provider_id: tavily-search
|
|
provider_type: remote::tavily-search
|
|
config:
|
|
api_key: ${env.TAVILY_SEARCH_API_KEY:=}
|
|
max_results: 3
|
|
- provider_id: rag-runtime
|
|
provider_type: inline::rag-runtime
|
|
- provider_id: model-context-protocol
|
|
provider_type: remote::model-context-protocol
|
|
batches:
|
|
- provider_id: reference
|
|
provider_type: inline::reference
|
|
config:
|
|
kvstore:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/batches.db
|
|
metadata_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/registry.db
|
|
inference_store:
|
|
type: sqlite
|
|
db_path: ${env.SQLITE_STORE_DIR:=~/.llama/distributions/starter}/inference_store.db
|
|
models: []
|
|
shields:
|
|
- shield_id: llama-guard
|
|
provider_id: ${env.SAFETY_MODEL:+llama-guard}
|
|
provider_shield_id: ${env.SAFETY_MODEL:=}
|
|
- shield_id: code-scanner
|
|
provider_id: ${env.CODE_SCANNER_MODEL:+code-scanner}
|
|
provider_shield_id: ${env.CODE_SCANNER_MODEL:=}
|
|
vector_dbs: []
|
|
datasets: []
|
|
scoring_fns: []
|
|
benchmarks: []
|
|
tool_groups:
|
|
- toolgroup_id: builtin::websearch
|
|
provider_id: tavily-search
|
|
- toolgroup_id: builtin::rag
|
|
provider_id: rag-runtime
|
|
server:
|
|
port: 8321
|