mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-06-28 19:04:19 +00:00
fixes
This commit is contained in:
parent
f44546f10c
commit
6b251432c3
1 changed files with 6 additions and 3 deletions
|
@ -35,6 +35,9 @@ jobs:
|
||||||
with:
|
with:
|
||||||
python-version: ${{ matrix.python-version }}
|
python-version: ${{ matrix.python-version }}
|
||||||
|
|
||||||
|
- name: Setup ollama
|
||||||
|
uses: ./.github/actions/setup-ollama
|
||||||
|
|
||||||
- name: Setup Chroma
|
- name: Setup Chroma
|
||||||
run: |
|
run: |
|
||||||
docker run --rm -d --pull always \
|
docker run --rm -d --pull always \
|
||||||
|
@ -71,13 +74,13 @@ jobs:
|
||||||
|
|
||||||
- name: Run Vector IO Integration Tests
|
- name: Run Vector IO Integration Tests
|
||||||
env:
|
env:
|
||||||
FIREWORKS_API_KEY: ${{ secrets.FIREWORKS_API_KEY }}
|
|
||||||
ENABLE_CHROMADB: true
|
ENABLE_CHROMADB: true
|
||||||
CHROMADB_URL: http://localhost:8000
|
CHROMADB_URL: http://localhost:8000
|
||||||
|
OLLAMA_URL: "http://0.0.0.0:11434"
|
||||||
run: |
|
run: |
|
||||||
uv run pytest -sv --stack-config="inference=fireworks,vector_io=${{ matrix.vector-io-provider }}" \
|
uv run pytest -sv --stack-config="inference=ollama,vector_io=${{ matrix.vector-io-provider }}" \
|
||||||
tests/integration/vector_io \
|
tests/integration/vector_io \
|
||||||
--embedding-model nomic-ai/nomic-embed-text-v1.5
|
--embedding-model all-MiniLM-L6-v2
|
||||||
|
|
||||||
- name: Check Storage and Memory Available After Tests
|
- name: Check Storage and Memory Available After Tests
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue