mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 14:08:00 +00:00
Merge 68d7dc521e
into 5b312a80b9
This commit is contained in:
commit
0407d58e0a
5 changed files with 68 additions and 68 deletions
|
@ -17,8 +17,6 @@ on:
|
|||
|
||||
jobs:
|
||||
test-external-providers-from-module:
|
||||
# This workflow is disabled. See https://github.com/meta-llama/llama-stack/pull/2975#issuecomment-3138702984 for details
|
||||
if: false
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
|
@ -32,50 +30,10 @@ jobs:
|
|||
- name: Install dependencies
|
||||
uses: ./.github/actions/setup-runner
|
||||
|
||||
- name: Install Ramalama
|
||||
shell: bash
|
||||
run: |
|
||||
uv pip install ramalama
|
||||
|
||||
- name: Run Ramalama
|
||||
shell: bash
|
||||
run: |
|
||||
nohup ramalama serve llama3.2:3b-instruct-fp16 > ramalama_server.log 2>&1 &
|
||||
- name: Apply image type to config file
|
||||
run: |
|
||||
yq -i '.image_type = "${{ matrix.image-type }}"' tests/external/ramalama-stack/run.yaml
|
||||
cat tests/external/ramalama-stack/run.yaml
|
||||
|
||||
- name: Build distro from config file
|
||||
run: |
|
||||
USE_COPY_NOT_MOUNT=true LLAMA_STACK_DIR=. uv run llama stack build --config tests/external/ramalama-stack/build.yaml
|
||||
|
||||
- name: Start Llama Stack server in background
|
||||
if: ${{ matrix.image-type }} == 'venv'
|
||||
env:
|
||||
INFERENCE_MODEL: "llama3.2:3b-instruct-fp16"
|
||||
LLAMA_STACK_LOG_FILE: "server.log"
|
||||
run: |
|
||||
# Use the virtual environment created by the build step (name comes from build config)
|
||||
source ramalama-stack-test/bin/activate
|
||||
uv pip list
|
||||
nohup llama stack run tests/external/ramalama-stack/run.yaml --image-type ${{ matrix.image-type }} > server.log 2>&1 &
|
||||
|
||||
- name: Wait for Llama Stack server to be ready
|
||||
run: |
|
||||
for i in {1..30}; do
|
||||
if ! grep -q "successfully connected to Ramalama" server.log; then
|
||||
echo "Waiting for Llama Stack server to load the provider..."
|
||||
sleep 1
|
||||
else
|
||||
echo "Provider loaded"
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
echo "Provider failed to load"
|
||||
cat server.log
|
||||
exit 1
|
||||
|
||||
- name: Upload all logs to artifacts
|
||||
if: ${{ always() }}
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
|
|
|
@ -143,8 +143,17 @@ ensure_conda_env_python310() {
|
|||
if [ -n "$external_provider_deps" ]; then
|
||||
IFS='#' read -ra parts <<<"$external_provider_deps"
|
||||
for part in "${parts[@]}"; do
|
||||
echo "$part"
|
||||
uv pip install "$part"
|
||||
echo "Installing external provider: $part"
|
||||
|
||||
# Create a temporary constraint file to exclude llama-stack
|
||||
cat > /tmp/constraints.txt << 'EOF'
|
||||
# Exclude llama-stack to avoid circular dependencies
|
||||
llama-stack==0.0.0
|
||||
EOF
|
||||
|
||||
# Install the external provider with constraints to exclude llama-stack
|
||||
uv pip install --constraint /tmp/constraints.txt "$part"
|
||||
rm -f /tmp/constraints.txt
|
||||
done
|
||||
fi
|
||||
else
|
||||
|
@ -193,7 +202,12 @@ try:
|
|||
module = importlib.import_module(f'$package_name.provider')
|
||||
spec = module.get_provider_spec()
|
||||
if hasattr(spec, 'pip_packages') and spec.pip_packages:
|
||||
print('\\n'.join(spec.pip_packages))
|
||||
# Filter out llama-stack from pip_packages to avoid circular dependency
|
||||
filtered_packages = [pkg for pkg in spec.pip_packages if not pkg.startswith('llama-stack')]
|
||||
if filtered_packages:
|
||||
print('\\n'.join(filtered_packages))
|
||||
else:
|
||||
print('No non-llama-stack dependencies found', file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f'Error getting provider spec for $package_name: {e}', file=sys.stderr)
|
||||
" | uv pip install -r -
|
||||
|
|
|
@ -199,10 +199,11 @@ if [ -n "$external_provider_deps" ]; then
|
|||
read -ra pip_args <<< "$part"
|
||||
quoted_deps=$(printf " %q" "${pip_args[@]}")
|
||||
add_to_container <<EOF
|
||||
RUN uv pip install --no-cache $quoted_deps
|
||||
EOF
|
||||
add_to_container <<EOF
|
||||
RUN python3 - <<PYTHON | uv pip install --no-cache -r -
|
||||
# Create constraint file to exclude llama-stack
|
||||
RUN echo "# Exclude llama-stack to avoid circular dependencies" > /tmp/constraints.txt && \\
|
||||
echo "llama-stack==0.0.0" >> /tmp/constraints.txt && \\
|
||||
|
||||
RUN python3 - <<PYTHON | uv pip install --no-cache --constraint /tmp/constraints.txt -r -
|
||||
import importlib
|
||||
import sys
|
||||
|
||||
|
@ -212,10 +213,17 @@ try:
|
|||
spec = module.get_provider_spec()
|
||||
if hasattr(spec, 'pip_packages') and spec.pip_packages:
|
||||
if isinstance(spec.pip_packages, (list, tuple)):
|
||||
print('\n'.join(spec.pip_packages))
|
||||
# Filter out llama-stack from pip_packages to avoid circular dependency
|
||||
filtered_packages = [pkg for pkg in spec.pip_packages if not pkg.startswith('llama-stack')]
|
||||
if filtered_packages:
|
||||
print('\n'.join(filtered_packages))
|
||||
else:
|
||||
print('No additional dependencies needed', file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f'Error getting provider spec for {package_name}: {e}', file=sys.stderr)
|
||||
PYTHON
|
||||
|
||||
RUN rm -f /tmp/constraints.txt
|
||||
EOF
|
||||
done
|
||||
fi
|
||||
|
|
|
@ -146,7 +146,18 @@ run() {
|
|||
IFS='#' read -ra parts <<<"$external_provider_deps"
|
||||
for part in "${parts[@]}"; do
|
||||
echo "$part"
|
||||
uv pip install "$part"
|
||||
|
||||
# Create a temporary constraint file to exclude llama-stack
|
||||
cat > /tmp/constraints.txt << 'EOF'
|
||||
# Exclude llama-stack to avoid circular dependencies
|
||||
llama-stack==0.0.0
|
||||
EOF
|
||||
|
||||
# Install the external provider with constraints to exclude llama-stack
|
||||
uv pip install --constraint /tmp/constraints.txt "$part"
|
||||
|
||||
# Clean up constraint file
|
||||
rm -f /tmp/constraints.txt
|
||||
done
|
||||
fi
|
||||
else
|
||||
|
@ -182,21 +193,42 @@ run() {
|
|||
if [ -n "$external_provider_deps" ]; then
|
||||
IFS='#' read -ra parts <<<"$external_provider_deps"
|
||||
for part in "${parts[@]}"; do
|
||||
echo "Installing external provider module: $part"
|
||||
uv pip install "$part"
|
||||
echo "Getting provider spec for module: $part and installing dependencies"
|
||||
echo "Installing external provider: $part"
|
||||
|
||||
# Create a temporary constraint file to exclude llama-stack
|
||||
cat > /tmp/constraints.txt << 'EOF'
|
||||
# Exclude llama-stack to avoid circular dependencies
|
||||
llama-stack==0.0.0
|
||||
llama-stack-client==0.0.0
|
||||
EOF
|
||||
|
||||
# Install the external provider with constraints to exclude llama-stack
|
||||
uv pip install --constraint /tmp/constraints.txt "$part"
|
||||
|
||||
echo "Getting provider spec for module: $part and installing additional dependencies"
|
||||
package_name=$(echo "$part" | sed 's/[<>=!].*//')
|
||||
|
||||
# Get additional dependencies from the provider spec (excluding llama-stack)
|
||||
python3 -c "
|
||||
import importlib
|
||||
import sys
|
||||
|
||||
try:
|
||||
module = importlib.import_module(f'$package_name.provider')
|
||||
spec = module.get_provider_spec()
|
||||
if hasattr(spec, 'pip_packages') and spec.pip_packages:
|
||||
print('\\n'.join(spec.pip_packages))
|
||||
# Filter out llama-stack from pip_packages to avoid circular dependency
|
||||
filtered_packages = [pkg for pkg in spec.pip_packages if not pkg.startswith('llama-stack')]
|
||||
if filtered_packages:
|
||||
print('\\n'.join(filtered_packages))
|
||||
else:
|
||||
print('No additional dependencies needed', file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f'Error getting provider spec for $package_name: {e}', file=sys.stderr)
|
||||
" | uv pip install -r -
|
||||
" | uv pip install --constraint /tmp/constraints.txt -r -
|
||||
|
||||
# Clean up constraint file
|
||||
rm -f /tmp/constraints.txt
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
|
12
tests/external/ramalama-stack/run.yaml
vendored
12
tests/external/ramalama-stack/run.yaml
vendored
|
@ -1,12 +0,0 @@
|
|||
version: 2
|
||||
image_name: ramalama
|
||||
apis:
|
||||
- inference
|
||||
providers:
|
||||
inference:
|
||||
- provider_id: ramalama
|
||||
provider_type: remote::ramalama
|
||||
module: ramalama_stack==0.3.0a0
|
||||
config: {}
|
||||
server:
|
||||
port: 8321
|
Loading…
Add table
Add a link
Reference in a new issue