mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-02 00:34:44 +00:00
Test new provider name
This commit is contained in:
parent
377896a4c5
commit
1481a67365
2 changed files with 32 additions and 6 deletions
|
@ -41,6 +41,11 @@ on:
|
||||||
required: true
|
required: true
|
||||||
default: "meta-reference"
|
default: "meta-reference"
|
||||||
|
|
||||||
|
api_key:
|
||||||
|
description: 'Provider API key'
|
||||||
|
required: false
|
||||||
|
default: "---"
|
||||||
|
|
||||||
env:
|
env:
|
||||||
# Path to model checkpoints within EFS volume
|
# Path to model checkpoints within EFS volume
|
||||||
MODEL_CHECKPOINT_DIR: "/data/llama/Llama3.2-3B-Instruct"
|
MODEL_CHECKPOINT_DIR: "/data/llama/Llama3.2-3B-Instruct"
|
||||||
|
@ -52,7 +57,13 @@ env:
|
||||||
MODEL_IDS: "${{ inputs.model_ids || 'Llama3.2-3B-Instruct' }}"
|
MODEL_IDS: "${{ inputs.model_ids || 'Llama3.2-3B-Instruct' }}"
|
||||||
|
|
||||||
# ID used for each test's provider config
|
# ID used for each test's provider config
|
||||||
PROVIDER_ID: "${{ inputs.provider_id || 'meta-reference' }}"
|
#PROVIDER_ID: "${{ inputs.provider_id || 'meta-reference' }}"
|
||||||
|
|
||||||
|
# Defined dynamically when each test is run below
|
||||||
|
#PROVIDER_CONFIG: ""
|
||||||
|
|
||||||
|
# (Unused) API key that can be manually defined for workflow dispatch
|
||||||
|
API_KEY: "${{ inputs.api_key || '' }}"
|
||||||
|
|
||||||
# Defines which directories in TESTS_PATH to exclude from the test loop
|
# Defines which directories in TESTS_PATH to exclude from the test loop
|
||||||
EXCLUDED_DIRS: "__pycache__"
|
EXCLUDED_DIRS: "__pycache__"
|
||||||
|
@ -67,7 +78,7 @@ jobs:
|
||||||
pull-requests: write
|
pull-requests: write
|
||||||
defaults:
|
defaults:
|
||||||
run:
|
run:
|
||||||
shell: bash
|
shell: bash # default shell to run all steps for a given job.
|
||||||
runs-on: ${{ inputs.runner != '' && inputs.runner || 'llama-stack-gha-runner-gpu' }}
|
runs-on: ${{ inputs.runner != '' && inputs.runner || 'llama-stack-gha-runner-gpu' }}
|
||||||
if: always()
|
if: always()
|
||||||
steps:
|
steps:
|
||||||
|
@ -134,6 +145,14 @@ jobs:
|
||||||
############################
|
############################
|
||||||
#### UPDATE SYSTEM PATH ####
|
#### UPDATE SYSTEM PATH ####
|
||||||
############################
|
############################
|
||||||
|
- name: "[DEBUG] Update path: before"
|
||||||
|
id: path_update_before
|
||||||
|
if: ${{ inputs.debug == 'true' }}
|
||||||
|
run: |
|
||||||
|
echo "System path before update:"
|
||||||
|
echo "PATH=$PATH"
|
||||||
|
echo "GITHUB_PATH=$GITHUB_PATH"
|
||||||
|
|
||||||
- name: "Update path: execute"
|
- name: "Update path: execute"
|
||||||
id: path_update_exec
|
id: path_update_exec
|
||||||
run: |
|
run: |
|
||||||
|
@ -142,6 +161,14 @@ jobs:
|
||||||
mkdir -p ${HOME}/.local/bin
|
mkdir -p ${HOME}/.local/bin
|
||||||
echo "${HOME}/.local/bin" >> "$GITHUB_PATH"
|
echo "${HOME}/.local/bin" >> "$GITHUB_PATH"
|
||||||
|
|
||||||
|
- name: "[DEBUG] Update path: after"
|
||||||
|
id: path_update_after
|
||||||
|
if: ${{ inputs.debug == 'true' }}
|
||||||
|
run: |
|
||||||
|
echo "System path after update:"
|
||||||
|
echo "PATH=$PATH"
|
||||||
|
echo "GITHUB_PATH=$GITHUB_PATH"
|
||||||
|
|
||||||
##################################
|
##################################
|
||||||
#### DEPENDENCY INSTALLATIONS ####
|
#### DEPENDENCY INSTALLATIONS ####
|
||||||
##################################
|
##################################
|
||||||
|
@ -202,7 +229,6 @@ jobs:
|
||||||
working-directory: "${{ github.workspace }}"
|
working-directory: "${{ github.workspace }}"
|
||||||
run: |
|
run: |
|
||||||
pattern=""
|
pattern=""
|
||||||
echo "PROVIDER_ID = ${PROVIDER_ID}"
|
|
||||||
for dir in llama_stack/providers/tests/*; do
|
for dir in llama_stack/providers/tests/*; do
|
||||||
if [ -d "$dir" ]; then
|
if [ -d "$dir" ]; then
|
||||||
dir_name=$(basename "$dir")
|
dir_name=$(basename "$dir")
|
||||||
|
@ -210,7 +236,7 @@ jobs:
|
||||||
for file in "$dir"/test_*.py; do
|
for file in "$dir"/test_*.py; do
|
||||||
test_name=$(basename "$file")
|
test_name=$(basename "$file")
|
||||||
new_file="result-${dir_name}-${test_name}.xml"
|
new_file="result-${dir_name}-${test_name}.xml"
|
||||||
if torchrun $(which pytest) -s -v ${TESTS_PATH}/${dir_name}/${test_name} -m "meta-reference and llama_3b" \
|
if torchrun $(which pytest) -s -v ${TESTS_PATH}/${dir_name}/${test_name} -m "meta_reference and llama_3b" \
|
||||||
--junitxml="${{ github.workspace }}/${new_file}"; then
|
--junitxml="${{ github.workspace }}/${new_file}"; then
|
||||||
echo "Test passed: $test_name"
|
echo "Test passed: $test_name"
|
||||||
else
|
else
|
||||||
|
|
|
@ -36,6 +36,8 @@ class MetaReferenceInferenceImpl(Inference, ModelRegistryHelper, ModelsProtocolP
|
||||||
def __init__(self, config: MetaReferenceInferenceConfig) -> None:
|
def __init__(self, config: MetaReferenceInferenceConfig) -> None:
|
||||||
self.config = config
|
self.config = config
|
||||||
model = resolve_model(config.model)
|
model = resolve_model(config.model)
|
||||||
|
if model is None:
|
||||||
|
raise RuntimeError(f"Unknown model: {config.model}, Run `llama model list`")
|
||||||
ModelRegistryHelper.__init__(
|
ModelRegistryHelper.__init__(
|
||||||
self,
|
self,
|
||||||
[
|
[
|
||||||
|
@ -45,8 +47,6 @@ class MetaReferenceInferenceImpl(Inference, ModelRegistryHelper, ModelsProtocolP
|
||||||
)
|
)
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
if model is None:
|
|
||||||
raise RuntimeError(f"Unknown model: {config.model}, Run `llama model list`")
|
|
||||||
self.model = model
|
self.model = model
|
||||||
# verify that the checkpoint actually is for this model lol
|
# verify that the checkpoint actually is for this model lol
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue