forked from phoenix-oss/llama-stack-mirror
# What does this PR do? Model context protocol (MCP) allows for remote tools to be connected with Agents. The current Ollama provider does not support it. This PR adds necessary code changes to ensure that the integration between Ollama backend and MCP works. This PR is an extension of #816 for Ollama. ## Test Plan [Describe the tests you ran to verify your changes with result summaries. *Provide clear instructions so the plan can be easily re-executed.*] 1. Run llama-stack server with the command: ``` llama stack build --template ollama --image-type conda llama stack run ./templates/ollama/run.yaml \ --port $LLAMA_STACK_PORT \ --env INFERENCE_MODEL=$INFERENCE_MODEL \ --env OLLAMA_URL=http://localhost:11434 ``` 2. Run the sample client agent with MCP tool: ``` from llama_stack_client.lib.agents.agent import Agent from llama_stack_client.lib.agents.event_logger import EventLogger from llama_stack_client.types.agent_create_params import AgentConfig from llama_stack_client.types.shared_params.url import URL from llama_stack_client import LlamaStackClient from termcolor import cprint ## Start the local MCP server # git clone https://github.com/modelcontextprotocol/python-sdk # Follow instructions to get the env ready # cd examples/servers/simple-tool # uv run mcp-simple-tool --transport sse --port 8000 # Connect to the llama stack server base_url="http://localhost:8321" model_id="meta-llama/Llama-3.2-3B-Instruct" client = LlamaStackClient(base_url=base_url) # Register MCP tools client.toolgroups.register( toolgroup_id="mcp::filesystem", provider_id="model-context-protocol", mcp_endpoint=URL(uri="http://localhost:8000/sse")) # Define an agent with MCP toolgroup agent_config = AgentConfig( model=model_id, instructions="You are a helpful assistant", toolgroups=["mcp::filesystem"], input_shields=[], output_shields=[], enable_session_persistence=False, ) agent = Agent(client, agent_config) user_prompts = [ "Fetch content from https://www.google.com and print the response" ] # Run a session with the agent session_id = agent.create_session("test-session") for prompt in user_prompts: cprint(f"User> {prompt}", "green") response = agent.create_turn( messages=[ { "role": "user", "content": prompt, } ], session_id=session_id, ) for log in EventLogger().log(response): log.print() ``` # Documentation The file docs/source/distributions/self_hosted_distro/ollama.md is updated to indicate the MCP tool runtime availability. Signed-off-by: Shreyanand <shanand@redhat.com>
153 lines
5.3 KiB
Python
153 lines
5.3 KiB
Python
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
# All rights reserved.
|
|
#
|
|
# This source code is licensed under the terms described in the LICENSE file in
|
|
# the root directory of this source tree.
|
|
|
|
from pathlib import Path
|
|
|
|
from llama_stack.apis.models.models import ModelType
|
|
from llama_stack.distribution.datatypes import (
|
|
ModelInput,
|
|
Provider,
|
|
ShieldInput,
|
|
ToolGroupInput,
|
|
)
|
|
from llama_stack.providers.inline.vector_io.sqlite_vec.config import SQLiteVectorIOConfig
|
|
from llama_stack.providers.remote.inference.ollama import OllamaImplConfig
|
|
from llama_stack.templates.template import DistributionTemplate, RunConfigSettings
|
|
|
|
|
|
def get_distribution_template() -> DistributionTemplate:
|
|
providers = {
|
|
"inference": ["remote::ollama"],
|
|
"vector_io": ["inline::sqlite-vec", "remote::chromadb", "remote::pgvector"],
|
|
"safety": ["inline::llama-guard"],
|
|
"agents": ["inline::meta-reference"],
|
|
"telemetry": ["inline::meta-reference"],
|
|
"eval": ["inline::meta-reference"],
|
|
"datasetio": ["remote::huggingface", "inline::localfs"],
|
|
"scoring": ["inline::basic", "inline::llm-as-judge", "inline::braintrust"],
|
|
"tool_runtime": [
|
|
"remote::brave-search",
|
|
"remote::tavily-search",
|
|
"inline::code-interpreter",
|
|
"inline::rag-runtime",
|
|
"remote::model-context-protocol",
|
|
],
|
|
}
|
|
name = "ollama"
|
|
inference_provider = Provider(
|
|
provider_id="ollama",
|
|
provider_type="remote::ollama",
|
|
config=OllamaImplConfig.sample_run_config(),
|
|
)
|
|
vector_io_provider_sqlite = Provider(
|
|
provider_id="sqlite-vec",
|
|
provider_type="inline::sqlite-vec",
|
|
config=SQLiteVectorIOConfig.sample_run_config(f"distributions/{name}"),
|
|
)
|
|
|
|
inference_model = ModelInput(
|
|
model_id="${env.INFERENCE_MODEL}",
|
|
provider_id="ollama",
|
|
)
|
|
safety_model = ModelInput(
|
|
model_id="${env.SAFETY_MODEL}",
|
|
provider_id="ollama",
|
|
)
|
|
embedding_model = ModelInput(
|
|
model_id="all-MiniLM-L6-v2",
|
|
provider_id="ollama",
|
|
provider_model_id="all-minilm:latest",
|
|
model_type=ModelType.embedding,
|
|
metadata={
|
|
"embedding_dimension": 384,
|
|
},
|
|
)
|
|
default_tool_groups = [
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::websearch",
|
|
provider_id="tavily-search",
|
|
),
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::rag",
|
|
provider_id="rag-runtime",
|
|
),
|
|
ToolGroupInput(
|
|
toolgroup_id="builtin::code_interpreter",
|
|
provider_id="code-interpreter",
|
|
),
|
|
]
|
|
|
|
return DistributionTemplate(
|
|
name=name,
|
|
distro_type="self_hosted",
|
|
description="Use (an external) Ollama server for running LLM inference",
|
|
container_image=None,
|
|
template_path=Path(__file__).parent / "doc_template.md",
|
|
providers=providers,
|
|
default_models=[inference_model, safety_model],
|
|
run_configs={
|
|
"run.yaml": RunConfigSettings(
|
|
provider_overrides={
|
|
"inference": [inference_provider],
|
|
"vector_io": [vector_io_provider_sqlite],
|
|
},
|
|
default_models=[inference_model],
|
|
default_tool_groups=default_tool_groups,
|
|
),
|
|
"run-with-safety.yaml": RunConfigSettings(
|
|
provider_overrides={
|
|
"inference": [inference_provider],
|
|
"vector_io": [vector_io_provider_sqlite],
|
|
"safety": [
|
|
Provider(
|
|
provider_id="llama-guard",
|
|
provider_type="inline::llama-guard",
|
|
config={},
|
|
),
|
|
Provider(
|
|
provider_id="code-scanner",
|
|
provider_type="inline::code-scanner",
|
|
config={},
|
|
),
|
|
],
|
|
},
|
|
default_models=[
|
|
inference_model,
|
|
safety_model,
|
|
embedding_model,
|
|
],
|
|
default_shields=[
|
|
ShieldInput(
|
|
shield_id="${env.SAFETY_MODEL}",
|
|
provider_id="llama-guard",
|
|
),
|
|
ShieldInput(
|
|
shield_id="CodeScanner",
|
|
provider_id="code-scanner",
|
|
),
|
|
],
|
|
default_tool_groups=default_tool_groups,
|
|
),
|
|
},
|
|
run_config_env_vars={
|
|
"LLAMA_STACK_PORT": (
|
|
"5001",
|
|
"Port for the Llama Stack distribution server",
|
|
),
|
|
"OLLAMA_URL": (
|
|
"http://127.0.0.1:11434",
|
|
"URL of the Ollama server",
|
|
),
|
|
"INFERENCE_MODEL": (
|
|
"meta-llama/Llama-3.2-3B-Instruct",
|
|
"Inference model loaded into the Ollama server",
|
|
),
|
|
"SAFETY_MODEL": (
|
|
"meta-llama/Llama-Guard-3-1B",
|
|
"Safety model loaded into the Ollama server",
|
|
),
|
|
},
|
|
)
|