llama-stack-mirror/llama_stack/providers/tests/ci_test_config.yaml
2025-01-15 16:17:27 -08:00

59 lines
1.6 KiB
YAML

inference:
tests:
- inference/test_vision_inference.py::test_vision_chat_completion_streaming
- inference/test_vision_inference.py::test_vision_chat_completion_non_streaming
- inference/test_text_inference.py::test_structured_output
- inference/test_text_inference.py::test_chat_completion_streaming
- inference/test_text_inference.py::test_chat_completion_non_streaming
- inference/test_text_inference.py::test_chat_completion_with_tool_calling
- inference/test_text_inference.py::test_chat_completion_with_tool_calling_streaming
fixtures:
provider_fixtures:
- inference: ollama
- default_fixture_param_id: fireworks
- inference: together
# - inference: tgi
# - inference: vllm_remote
inference_models:
- meta-llama/Llama-3.1-8B-Instruct
- meta-llama/Llama-3.2-11B-Vision-Instruct
safety_shield: ~
embedding_model: ~
agent:
tests:
- agents/test_agents.py::test_agent_turns_with_safety
- agents/test_agents.py::test_rag_agent
fixtures:
provider_fixtures:
- default_fixture_param_id: ollama
- default_fixture_param_id: together
- default_fixture_param_id: fireworks
safety_shield: ~
embedding_model: ~
inference_models:
- meta-llama/Llama-3.2-1B-Instruct
memory:
tests:
- memory/test_memory.py::test_query_documents
fixtures:
provider_fixtures:
- default_fixture_param_id: ollama
- inference: sentence_transformers
memory: faiss
- default_fixture_param_id: chroma
inference_models:
- meta-llama/Llama-3.2-1B-Instruct
safety_shield: ~
embedding_model: ~