mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-27 18:50:41 +00:00
ci: fix external provider test (#2438)
# What does this PR do? The test wasn't using the correct virtual environment. Also augment the console width for logs. Signed-off-by: Sébastien Han <seb@redhat.com>
This commit is contained in:
parent
de37a04c3e
commit
eb04731750
3 changed files with 8 additions and 5 deletions
10
.github/workflows/test-external-providers.yml
vendored
10
.github/workflows/test-external-providers.yml
vendored
|
@ -45,20 +45,22 @@ jobs:
|
||||||
|
|
||||||
- name: Build distro from config file
|
- name: Build distro from config file
|
||||||
run: |
|
run: |
|
||||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. llama stack build --config tests/external-provider/llama-stack-provider-ollama/custom-distro.yaml
|
||||||
|
|
||||||
- name: Start Llama Stack server in background
|
- name: Start Llama Stack server in background
|
||||||
if: ${{ matrix.image-type }} == 'venv'
|
if: ${{ matrix.image-type }} == 'venv'
|
||||||
env:
|
env:
|
||||||
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
INFERENCE_MODEL: "meta-llama/Llama-3.2-3B-Instruct"
|
||||||
run: |
|
run: |
|
||||||
uv run pip list
|
# Use the virtual environment created by the build step (name comes from build config)
|
||||||
nohup uv run --active llama stack run tests/external-provider/llama-stack-provider-ollama/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 &
|
source ci-test/bin/activate
|
||||||
|
uv pip list
|
||||||
|
nohup llama stack run tests/external-provider/llama-stack-provider-ollama/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 &
|
||||||
|
|
||||||
- name: Wait for Llama Stack server to be ready
|
- name: Wait for Llama Stack server to be ready
|
||||||
run: |
|
run: |
|
||||||
for i in {1..30}; do
|
for i in {1..30}; do
|
||||||
if ! grep -q "remote::custom_ollama from /home/runner/.llama/providers.d/remote/inference/custom_ollama.yaml" server.log; then
|
if ! grep -q "Successfully loaded external provider remote::custom_ollama" server.log; then
|
||||||
echo "Waiting for Llama Stack server to load the provider..."
|
echo "Waiting for Llama Stack server to load the provider..."
|
||||||
sleep 1
|
sleep 1
|
||||||
else
|
else
|
||||||
|
|
|
@ -180,6 +180,7 @@ def get_provider_registry(
|
||||||
if provider_type_key in ret[api]:
|
if provider_type_key in ret[api]:
|
||||||
logger.warning(f"Overriding already registered provider {provider_type_key} for {api.name}")
|
logger.warning(f"Overriding already registered provider {provider_type_key} for {api.name}")
|
||||||
ret[api][provider_type_key] = spec
|
ret[api][provider_type_key] = spec
|
||||||
|
logger.info(f"Successfully loaded external provider {provider_type_key}")
|
||||||
except yaml.YAMLError as yaml_err:
|
except yaml.YAMLError as yaml_err:
|
||||||
logger.error(f"Failed to parse YAML file {spec_path}: {yaml_err}")
|
logger.error(f"Failed to parse YAML file {spec_path}: {yaml_err}")
|
||||||
raise yaml_err
|
raise yaml_err
|
||||||
|
|
|
@ -115,7 +115,7 @@ def parse_environment_config(env_config: str) -> dict[str, int]:
|
||||||
|
|
||||||
class CustomRichHandler(RichHandler):
|
class CustomRichHandler(RichHandler):
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
kwargs["console"] = Console(width=120)
|
kwargs["console"] = Console(width=150)
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
def emit(self, record):
|
def emit(self, record):
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue