docs mcp docs update

This commit is contained in:
Ishaan Jaff 2025-03-21 17:21:40 -07:00
parent b006e325cc
commit a8cf71973d
4 changed files with 29 additions and 18 deletions

View file

@ -21,13 +21,22 @@ Use Model Context Protocol with LiteLLM
LiteLLM acts as a MCP bridge to utilize MCP tools with all LiteLLM supported models. LiteLLM offers the following features for using MCP LiteLLM acts as a MCP bridge to utilize MCP tools with all LiteLLM supported models. LiteLLM offers the following features for using MCP
- **List** Available MCP Tools: OpenAI clients can view all available MCP tools - **List** Available MCP Tools: OpenAI clients can view all available MCP tools
- `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools
- **Call** MCP Tools: OpenAI clients can call MCP tools - **Call** MCP Tools: OpenAI clients can call MCP tools
- `litellm.experimental_mcp_client.call_openai_tool` to call an OpenAI tool on an MCP server
## Usage ## Usage
### 1. List Available MCP Tools ### 1. List Available MCP Tools
In this example we'll use `litellm.experimental_mcp_client.load_mcp_tools` to list all available MCP tools on any MCP server. This method can be used in two ways:
- `format="mcp"` - (default) Return MCP tools
- Returns: `mcp.types.Tool`
- `format="openai"` - Return MCP tools converted to OpenAI API compatible tools. Allows using with OpenAI endpoints.
- Returns: `openai.types.chat.ChatCompletionToolParam`
<Tabs> <Tabs>
<TabItem value="sdk" label="LiteLLM Python SDK"> <TabItem value="sdk" label="LiteLLM Python SDK">
@ -36,12 +45,8 @@ LiteLLM acts as a MCP bridge to utilize MCP tools with all LiteLLM supported mod
from mcp import ClientSession, StdioServerParameters from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client from mcp.client.stdio import stdio_client
import os import os
from litellm.mcp_client.tools import (
load_mcp_tools,
transform_openai_tool_to_mcp_tool,
call_openai_tool,
)
import litellm import litellm
from litellm import experimental_mcp_client
server_params = StdioServerParameters( server_params = StdioServerParameters(
@ -56,12 +61,10 @@ async with stdio_client(server_params) as (read, write):
await session.initialize() await session.initialize()
# Get tools # Get tools
tools = await load_mcp_tools(session=session, format="openai") tools = await experimental_mcp_client.load_mcp_tools(session=session, format="openai")
print("MCP TOOLS: ", tools) print("MCP TOOLS: ", tools)
# Create and run the agent
messages = [{"role": "user", "content": "what's (3 + 5)"}] messages = [{"role": "user", "content": "what's (3 + 5)"}]
print(os.getenv("OPENAI_API_KEY"))
llm_response = await litellm.acompletion( llm_response = await litellm.acompletion(
model="gpt-4o", model="gpt-4o",
api_key=os.getenv("OPENAI_API_KEY"), api_key=os.getenv("OPENAI_API_KEY"),

View file

@ -0,0 +1,3 @@
from .tools import call_openai_tool, load_mcp_tools
__all__ = ["load_mcp_tools", "call_openai_tool"]

View file

@ -53,7 +53,7 @@ async def load_mcp_tools(
format: The format to convert the tools to format: The format to convert the tools to
By default, the tools are returned in MCP format. By default, the tools are returned in MCP format.
If format is set to "openai", the tools are converted to OpenAI tools. If format is set to "openai", the tools are converted to OpenAI API compatible tools.
""" """
tools = await session.list_tools() tools = await session.list_tools()
if format == "openai": if format == "openai":
@ -80,7 +80,15 @@ async def call_openai_tool(
session: ClientSession, session: ClientSession,
openai_tool: ChatCompletionToolParam, openai_tool: ChatCompletionToolParam,
) -> CallToolResult: ) -> CallToolResult:
"""Call an OpenAI tool using MCP client.""" """
Call an OpenAI tool using MCP client.
Args:
session: The MCP session to use
openai_tool: The OpenAI tool to call. You can get this from the `choices[0].message.tool_calls[0]` of the response from the OpenAI API.
Returns:
The result of the MCP tool call.
"""
mcp_tool = transform_openai_tool_to_mcp_tool( mcp_tool = transform_openai_tool_to_mcp_tool(
openai_tool=openai_tool, openai_tool=openai_tool,
) )

View file

@ -10,11 +10,7 @@ sys.path.insert(
from mcp import ClientSession, StdioServerParameters from mcp import ClientSession, StdioServerParameters
from mcp.client.stdio import stdio_client from mcp.client.stdio import stdio_client
import os import os
from litellm.mcp_client.tools import ( from litellm import experimental_mcp_client
load_mcp_tools,
transform_openai_tool_to_mcp_tool,
call_openai_tool,
)
import litellm import litellm
import pytest import pytest
import json import json
@ -34,12 +30,13 @@ async def test_mcp_agent():
await session.initialize() await session.initialize()
# Get tools # Get tools
tools = await load_mcp_tools(session=session, format="openai") tools = await experimental_mcp_client.load_mcp_tools(
session=session, format="openai"
)
print("MCP TOOLS: ", tools) print("MCP TOOLS: ", tools)
# Create and run the agent # Create and run the agent
messages = [{"role": "user", "content": "what's (3 + 5)"}] messages = [{"role": "user", "content": "what's (3 + 5)"}]
print(os.getenv("OPENAI_API_KEY"))
llm_response = await litellm.acompletion( llm_response = await litellm.acompletion(
model="gpt-4o", model="gpt-4o",
api_key=os.getenv("OPENAI_API_KEY"), api_key=os.getenv("OPENAI_API_KEY"),
@ -59,7 +56,7 @@ async def test_mcp_agent():
openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0] openai_tool = llm_response["choices"][0]["message"]["tool_calls"][0]
# Call the tool using MCP client # Call the tool using MCP client
call_result = await call_openai_tool( call_result = await experimental_mcp_client.call_openai_tool(
session=session, session=session,
openai_tool=openai_tool, openai_tool=openai_tool,
) )