mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-14 17:16:09 +00:00
# What does this PR do? ## Test Plan export MODEL=accounts/fireworks/models/llama4-scout-instruct-basic; LLAMA_STACK_CONFIG=verification pytest -s -v tests/integration/inference --vision-model $MODEL --text-model $MODEL
38 lines
937 B
YAML
38 lines
937 B
YAML
version: '2'
|
|
distribution_spec:
|
|
description: Distribution for running e2e tests in CI
|
|
providers:
|
|
inference:
|
|
- remote::openai
|
|
- remote::fireworks-openai-compat
|
|
- remote::together-openai-compat
|
|
- remote::groq-openai-compat
|
|
- remote::sambanova-openai-compat
|
|
- remote::cerebras-openai-compat
|
|
- inline::sentence-transformers
|
|
vector_io:
|
|
- inline::sqlite-vec
|
|
- remote::chromadb
|
|
- remote::pgvector
|
|
safety:
|
|
- inline::llama-guard
|
|
agents:
|
|
- inline::meta-reference
|
|
telemetry:
|
|
- inline::meta-reference
|
|
eval:
|
|
- inline::meta-reference
|
|
datasetio:
|
|
- remote::huggingface
|
|
- inline::localfs
|
|
scoring:
|
|
- inline::basic
|
|
- inline::llm-as-judge
|
|
- inline::braintrust
|
|
tool_runtime:
|
|
- remote::brave-search
|
|
- remote::tavily-search
|
|
- inline::code-interpreter
|
|
- inline::rag-runtime
|
|
- remote::model-context-protocol
|
|
image_type: conda
|