mirror of
https://github.com/meta-llama/llama-stack.git
synced 2025-07-07 22:34:37 +00:00
docs: update docs to use "starter" than "ollama" (#2629)
This commit is contained in:
parent
dc7df60d42
commit
c025cab3a3
2 changed files with 14 additions and 9 deletions
|
@ -55,7 +55,7 @@
|
||||||
"\n",
|
"\n",
|
||||||
"MODEL=\"Llama-4-Scout-17B-16E-Instruct\"\n",
|
"MODEL=\"Llama-4-Scout-17B-16E-Instruct\"\n",
|
||||||
"# get meta url from llama.com\n",
|
"# get meta url from llama.com\n",
|
||||||
"!uv run --with llama-stackllama model download --source meta --model-id $MODEL --meta-url <META_URL>\n",
|
"!uv run --with llama-stack llama model download --source meta --model-id $MODEL --meta-url <META_URL>\n",
|
||||||
"\n",
|
"\n",
|
||||||
"model_id = f\"meta-llama/{MODEL}\""
|
"model_id = f\"meta-llama/{MODEL}\""
|
||||||
]
|
]
|
||||||
|
|
|
@ -145,12 +145,12 @@
|
||||||
" del os.environ[\"UV_SYSTEM_PYTHON\"]\n",
|
" del os.environ[\"UV_SYSTEM_PYTHON\"]\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# this command installs all the dependencies needed for the llama stack server with the ollama inference provider\n",
|
"# this command installs all the dependencies needed for the llama stack server with the ollama inference provider\n",
|
||||||
"!uv run --with llama-stack llama stack build --template ollama --image-type venv --image-name myvenv\n",
|
"!uv run --with llama-stack llama stack build --template starter --image-type venv\n",
|
||||||
"\n",
|
"\n",
|
||||||
"def run_llama_stack_server_background():\n",
|
"def run_llama_stack_server_background():\n",
|
||||||
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
|
" log_file = open(\"llama_stack_server.log\", \"w\")\n",
|
||||||
" process = subprocess.Popen(\n",
|
" process = subprocess.Popen(\n",
|
||||||
" f\"uv run --with llama-stack llama stack run ollama --image-type venv --image-name myvenv --env INFERENCE_MODEL=llama3.2:3b\",\n",
|
" f\"uv run --with llama-stack llama stack run starter --image-type venv --env INFERENCE_MODEL=llama3.2:3b\",\n",
|
||||||
" shell=True,\n",
|
" shell=True,\n",
|
||||||
" stdout=log_file,\n",
|
" stdout=log_file,\n",
|
||||||
" stderr=log_file,\n",
|
" stderr=log_file,\n",
|
||||||
|
@ -249,18 +249,23 @@
|
||||||
],
|
],
|
||||||
"source": [
|
"source": [
|
||||||
"from llama_stack_client import Agent, AgentEventLogger, RAGDocument, LlamaStackClient\n",
|
"from llama_stack_client import Agent, AgentEventLogger, RAGDocument, LlamaStackClient\n",
|
||||||
|
"import os\n",
|
||||||
|
"\n",
|
||||||
|
"os.environ[\"ENABLE_OLLAMA\"] = \"ollama\"\n",
|
||||||
|
"os.environ[\"OLLAMA_INFERENCE_MODEL\"] = \"llama3.2:3b\"\n",
|
||||||
|
"os.environ[\"OLLAMA_EMBEDDING_MODEL\"] = \"all-minilm:l6-v2\"\n",
|
||||||
|
"os.environ[\"OLLAMA_EMBEDDING_DIMENSION\"] = \"384\"\n",
|
||||||
"\n",
|
"\n",
|
||||||
"vector_db_id = \"my_demo_vector_db\"\n",
|
"vector_db_id = \"my_demo_vector_db\"\n",
|
||||||
"client = LlamaStackClient(base_url=\"http://0.0.0.0:8321\")\n",
|
"client = LlamaStackClient(base_url=\"http://0.0.0.0:8321\")\n",
|
||||||
"\n",
|
"\n",
|
||||||
"models = client.models.list()\n",
|
"models = client.models.list()\n",
|
||||||
"\n",
|
"\n",
|
||||||
"# Select the first LLM and first embedding models\n",
|
"# Select the first ollama and first ollama's embedding model\n",
|
||||||
"model_id = next(m for m in models if m.model_type == \"llm\").identifier\n",
|
"model_id = next(m for m in models if m.model_type == \"llm\" and m.provider_id == \"ollama\").identifier\n",
|
||||||
"embedding_model_id = (\n",
|
"embedding_model = next(m for m in models if m.model_type == \"embedding\" and m.provider_id == \"ollama\")\n",
|
||||||
" em := next(m for m in models if m.model_type == \"embedding\")\n",
|
"embedding_model_id = embedding_model.identifier\n",
|
||||||
").identifier\n",
|
"embedding_dimension = embedding_model.metadata[\"embedding_dimension\"]\n",
|
||||||
"embedding_dimension = em.metadata[\"embedding_dimension\"]\n",
|
|
||||||
"\n",
|
"\n",
|
||||||
"_ = client.vector_dbs.register(\n",
|
"_ = client.vector_dbs.register(\n",
|
||||||
" vector_db_id=vector_db_id,\n",
|
" vector_db_id=vector_db_id,\n",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue