mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-25 21:57:45 +00:00
# What does this PR do? - provider and their models now live in config.yaml - better distinguish different cases within a test - add model key to surface provider's model_id - include example command to rerun single test case ## Test Plan <img width="1173" alt="image" src="https://github.com/user-attachments/assets/b414baf0-c768-451f-8c3b-c2905cf36fac" />
14 lines
No EOL
650 B
YAML
14 lines
No EOL
650 B
YAML
base_url: https://api.fireworks.ai/inference/v1
|
|
api_key_var: FIREWORKS_API_KEY
|
|
models:
|
|
- accounts/fireworks/models/llama-v3p3-70b-instruct
|
|
- accounts/fireworks/models/llama4-scout-instruct-basic
|
|
- accounts/fireworks/models/llama4-maverick-instruct-basic
|
|
model_display_names:
|
|
accounts/fireworks/models/llama-v3p3-70b-instruct: Llama-3.3-70B-Instruct
|
|
accounts/fireworks/models/llama4-scout-instruct-basic: Llama-4-Scout-Instruct
|
|
accounts/fireworks/models/llama4-maverick-instruct-basic: Llama-4-Maverick-Instruct
|
|
test_exclusions:
|
|
accounts/fireworks/models/llama-v3p3-70b-instruct:
|
|
- test_chat_non_streaming_image
|
|
- test_chat_streaming_image |