mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-08-15 14:08:00 +00:00
Some checks failed
Integration Tests (Replay) / discover-tests (push) Successful in 3s
Test External Providers Installed via Module / test-external-providers-from-module (venv) (push) Has been skipped
Integration Auth Tests / test-matrix (oauth2_token) (push) Failing after 9s
Python Package Build Test / build (3.12) (push) Failing after 4s
Vector IO Integration Tests / test-matrix (3.12, inline::milvus) (push) Failing after 12s
Test Llama Stack Build / generate-matrix (push) Successful in 11s
Test Llama Stack Build / build-ubi9-container-distribution (push) Failing after 12s
Vector IO Integration Tests / test-matrix (3.12, inline::faiss) (push) Failing after 14s
SqlStore Integration Tests / test-postgres (3.12) (push) Failing after 22s
Test External API and Providers / test-external (venv) (push) Failing after 14s
Integration Tests (Replay) / Integration Tests (, , , client=, vision=) (push) Failing after 12s
Vector IO Integration Tests / test-matrix (3.12, remote::pgvector) (push) Failing after 15s
SqlStore Integration Tests / test-postgres (3.13) (push) Failing after 22s
Test Llama Stack Build / build-custom-container-distribution (push) Failing after 14s
Unit Tests / unit-tests (3.13) (push) Failing after 14s
Test Llama Stack Build / build-single-provider (push) Failing after 13s
Vector IO Integration Tests / test-matrix (3.12, remote::chromadb) (push) Failing after 18s
Unit Tests / unit-tests (3.12) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.12, remote::qdrant) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.13, remote::weaviate) (push) Failing after 10s
Vector IO Integration Tests / test-matrix (3.13, inline::faiss) (push) Failing after 11s
Vector IO Integration Tests / test-matrix (3.12, remote::weaviate) (push) Failing after 16s
Vector IO Integration Tests / test-matrix (3.13, remote::qdrant) (push) Failing after 18s
Test Llama Stack Build / build (push) Failing after 12s
Vector IO Integration Tests / test-matrix (3.13, remote::chromadb) (push) Failing after 18s
Vector IO Integration Tests / test-matrix (3.13, remote::pgvector) (push) Failing after 20s
Vector IO Integration Tests / test-matrix (3.13, inline::sqlite-vec) (push) Failing after 16s
Python Package Build Test / build (3.13) (push) Failing after 53s
Vector IO Integration Tests / test-matrix (3.13, inline::milvus) (push) Failing after 59s
Vector IO Integration Tests / test-matrix (3.12, inline::sqlite-vec) (push) Failing after 1m1s
Update ReadTheDocs / update-readthedocs (push) Failing after 1m6s
Pre-commit / pre-commit (push) Successful in 1m53s
A bunch of miscellaneous cleanup focusing on tests, but ended up speeding up starter distro substantially. - Pulled llama stack client init for tests into `pytest_sessionstart` so it does not clobber output - Profiling of that told me where we were doing lots of heavy imports for starter, so lazied them - starter now starts 20seconds+ faster on my Mac - A few other smallish refactors for `compat_client`
164 lines
5.1 KiB
JSON
164 lines
5.1 KiB
JSON
{
|
|
"request": {
|
|
"method": "POST",
|
|
"url": "http://localhost:11434/api/tags",
|
|
"headers": {},
|
|
"body": {},
|
|
"endpoint": "/api/tags",
|
|
"model": ""
|
|
},
|
|
"response": {
|
|
"body": {
|
|
"__type__": "ollama._types.ListResponse",
|
|
"__data__": {
|
|
"models": [
|
|
{
|
|
"model": "nomic-embed-text:latest",
|
|
"modified_at": "2025-08-05T14:04:07.946926-07:00",
|
|
"digest": "0a109f422b47e3a30ba2b10eca18548e944e8a23073ee3f3e947efcf3c45e59f",
|
|
"size": 274302450,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "nomic-bert",
|
|
"families": [
|
|
"nomic-bert"
|
|
],
|
|
"parameter_size": "137M",
|
|
"quantization_level": "F16"
|
|
}
|
|
},
|
|
{
|
|
"model": "llama3.2-vision:11b",
|
|
"modified_at": "2025-07-30T18:45:02.517873-07:00",
|
|
"digest": "6f2f9757ae97e8a3f8ea33d6adb2b11d93d9a35bef277cd2c0b1b5af8e8d0b1e",
|
|
"size": 7816589186,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "mllama",
|
|
"families": [
|
|
"mllama"
|
|
],
|
|
"parameter_size": "10.7B",
|
|
"quantization_level": "Q4_K_M"
|
|
}
|
|
},
|
|
{
|
|
"model": "llama3.2-vision:latest",
|
|
"modified_at": "2025-07-29T20:18:47.920468-07:00",
|
|
"digest": "6f2f9757ae97e8a3f8ea33d6adb2b11d93d9a35bef277cd2c0b1b5af8e8d0b1e",
|
|
"size": 7816589186,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "mllama",
|
|
"families": [
|
|
"mllama"
|
|
],
|
|
"parameter_size": "10.7B",
|
|
"quantization_level": "Q4_K_M"
|
|
}
|
|
},
|
|
{
|
|
"model": "llama-guard3:1b",
|
|
"modified_at": "2025-07-25T14:39:44.978630-07:00",
|
|
"digest": "494147e06bf99e10dbe67b63a07ac81c162f18ef3341aa3390007ac828571b3b",
|
|
"size": 1600181919,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "llama",
|
|
"families": [
|
|
"llama"
|
|
],
|
|
"parameter_size": "1.5B",
|
|
"quantization_level": "Q8_0"
|
|
}
|
|
},
|
|
{
|
|
"model": "all-minilm:l6-v2",
|
|
"modified_at": "2025-07-24T15:15:11.129290-07:00",
|
|
"digest": "1b226e2802dbb772b5fc32a58f103ca1804ef7501331012de126ab22f67475ef",
|
|
"size": 45960996,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "bert",
|
|
"families": [
|
|
"bert"
|
|
],
|
|
"parameter_size": "23M",
|
|
"quantization_level": "F16"
|
|
}
|
|
},
|
|
{
|
|
"model": "llama3.2:1b",
|
|
"modified_at": "2025-07-17T22:02:24.953208-07:00",
|
|
"digest": "baf6a787fdffd633537aa2eb51cfd54cb93ff08e28040095462bb63daf552878",
|
|
"size": 1321098329,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "llama",
|
|
"families": [
|
|
"llama"
|
|
],
|
|
"parameter_size": "1.2B",
|
|
"quantization_level": "Q8_0"
|
|
}
|
|
},
|
|
{
|
|
"model": "all-minilm:latest",
|
|
"modified_at": "2025-06-03T16:50:10.946583-07:00",
|
|
"digest": "1b226e2802dbb772b5fc32a58f103ca1804ef7501331012de126ab22f67475ef",
|
|
"size": 45960996,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "bert",
|
|
"families": [
|
|
"bert"
|
|
],
|
|
"parameter_size": "23M",
|
|
"quantization_level": "F16"
|
|
}
|
|
},
|
|
{
|
|
"model": "llama3.2:3b",
|
|
"modified_at": "2025-05-01T11:15:23.797447-07:00",
|
|
"digest": "a80c4f17acd55265feec403c7aef86be0c25983ab279d83f3bcd3abbcb5b8b72",
|
|
"size": 2019393189,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "llama",
|
|
"families": [
|
|
"llama"
|
|
],
|
|
"parameter_size": "3.2B",
|
|
"quantization_level": "Q4_K_M"
|
|
}
|
|
},
|
|
{
|
|
"model": "llama3.2:3b-instruct-fp16",
|
|
"modified_at": "2025-04-30T15:33:48.939665-07:00",
|
|
"digest": "195a8c01d91ec3cb1e0aad4624a51f2602c51fa7d96110f8ab5a20c84081804d",
|
|
"size": 6433703586,
|
|
"details": {
|
|
"parent_model": "",
|
|
"format": "gguf",
|
|
"family": "llama",
|
|
"families": [
|
|
"llama"
|
|
],
|
|
"parameter_size": "3.2B",
|
|
"quantization_level": "F16"
|
|
}
|
|
}
|
|
]
|
|
}
|
|
},
|
|
"is_streaming": false
|
|
}
|
|
}
|