Merge branch 'main' into fix/milvus-missing-files-api-parameter

This commit is contained in:
raghotham 2025-07-04 21:08:03 -07:00 committed by GitHub
commit 6d13591ef4
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 25 additions and 20 deletions

View file

@ -55,7 +55,7 @@
"\n",
"MODEL=\"Llama-4-Scout-17B-16E-Instruct\"\n",
"# get meta url from llama.com\n",
"!uv run --with llama-stackllama model download --source meta --model-id $MODEL --meta-url <META_URL>\n",
"!uv run --with llama-stack llama model download --source meta --model-id $MODEL --meta-url <META_URL>\n",
"\n",
"model_id = f\"meta-llama/{MODEL}\""
]

View file

@ -145,12 +145,12 @@
" del os.environ[\"UV_SYSTEM_PYTHON\"]\n",
"\n",
"# this command installs all the dependencies needed for the llama stack server with the ollama inference provider\n",
"!uv run --with llama-stack llama stack build --template ollama --image-type venv --image-name myvenv\n",
"!uv run --with llama-stack llama stack build --template starter --image-type venv\n",
"\n",
"def run_llama_stack_server_background():\n",
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
" process = subprocess.Popen(\n",
" f\"uv run --with llama-stack llama stack run ollama --image-type venv --image-name myvenv --env INFERENCE_MODEL=llama3.2:3b\",\n",
" f\"uv run --with llama-stack llama stack run starter --image-type venv --env INFERENCE_MODEL=llama3.2:3b\",\n",
" shell=True,\n",
" stdout=log_file,\n",
" stderr=log_file,\n",
@ -249,18 +249,23 @@
],
"source": [
"from llama_stack_client import Agent, AgentEventLogger, RAGDocument, LlamaStackClient\n",
"import os\n",
"\n",
"os.environ[\"ENABLE_OLLAMA\"] = \"ollama\"\n",
"os.environ[\"OLLAMA_INFERENCE_MODEL\"] = \"llama3.2:3b\"\n",
"os.environ[\"OLLAMA_EMBEDDING_MODEL\"] = \"all-minilm:l6-v2\"\n",
"os.environ[\"OLLAMA_EMBEDDING_DIMENSION\"] = \"384\"\n",
"\n",
"vector_db_id = \"my_demo_vector_db\"\n",
"client = LlamaStackClient(base_url=\"http://0.0.0.0:8321\")\n",
"\n",
"models = client.models.list()\n",
"\n",
"# Select the first LLM and first embedding models\n",
"model_id = next(m for m in models if m.model_type == \"llm\").identifier\n",
"embedding_model_id = (\n",
" em := next(m for m in models if m.model_type == \"embedding\")\n",
").identifier\n",
"embedding_dimension = em.metadata[\"embedding_dimension\"]\n",
"# Select the first ollama and first ollama's embedding model\n",
"model_id = next(m for m in models if m.model_type == \"llm\" and m.provider_id == \"ollama\").identifier\n",
"embedding_model = next(m for m in models if m.model_type == \"embedding\" and m.provider_id == \"ollama\")\n",
"embedding_model_id = embedding_model.identifier\n",
"embedding_dimension = embedding_model.metadata[\"embedding_dimension\"]\n",
"\n",
"_ = client.vector_dbs.register(\n",
" vector_db_id=vector_db_id,\n",

View file

@ -18,7 +18,7 @@ The `llamastack/distribution-starter` distribution is a comprehensive, multi-pro
The starter distribution consists of the following provider configurations:
| API | Provider(s) |
|-----|-------------|
|-----|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| agents | `inline::meta-reference` |
| datasetio | `remote::huggingface`, `inline::localfs` |
| eval | `inline::meta-reference` |
@ -28,7 +28,7 @@ The starter distribution consists of the following provider configurations:
| scoring | `inline::basic`, `inline::llm-as-judge`, `inline::braintrust` |
| telemetry | `inline::meta-reference` |
| tool_runtime | `remote::brave-search`, `remote::tavily-search`, `inline::rag-runtime`, `remote::model-context-protocol` |
| vector_io | `inline::faiss`, `inline::sqlite-vec`, `remote::chromadb`, `remote::pgvector` |
| vector_io | `inline::faiss`, `inline::sqlite-vec`, `inline::milvus`, `remote::chromadb`, `remote::pgvector` |
## Inference Providers