它虽然叫 Model Context Protocol(模型上下文协议),但它实际上跟模型毫无关联。它只是在 Function Calling 之上封装的一层标准化协议。(只是大致来说,另外他还可以给资源和提示词模板)
MCP 主要由两个核心组件构成:
提供 Function Call(工具调用),负责提供工具的具体描述和逻辑执行。
向 Server 获取对应的工具内容,并把得到的函数描述放到自己 LLM 的 Function Calling 当中。
注:这里的格式可能是 OpenAI 的 Function Calling 格式,也可能是 Cursor 的 XML 格式。
核心优势:
首先需要找一个 Client,既能连接 MCP 服务,又能连接 LLM:
MCP Server 平台:
必须了解 MCP 的生命周期和交互逻辑,才能更清楚地理解整体架构。以下是 Weather 服务的完整对话日志,通过这段日志能深刻体会到 MCP 只是在 Function Calling 之上做的一层协议封装。
Weather 服务的完整 JSON 交互日志(已做格式化处理):
// 1. 初始化连接请求
输入:
{
"method": "initialize",
"params": {
"protocolVersion": "2025-03-26",
"capabilities": {},
"clientInfo": { "name": "Cline", "version": "3.18.1" }
},
"jsonrpc": "2.0",
"id": 0
}
输出:
{
"jsonrpc": "2.0",
"id": 0,
"result": {
"protocolVersion": "2024-11-05",
"capabilities": {
"experimental": {},
"prompts": { "listChanged": false },
"resources": { "subscribe": false, "listChanged": false },
"tools": { "listChanged": false }
},
"serverInfo": { "name": "weather", "version": "1.6.0" }
}
}
// 2. 确认初始化并请求工具列表
输入: { "method": "notifications/initialized", "jsonrpc": "2.0" }
输入: { "method": "tools/list", "jsonrpc": "2.0", "id": 1 }
输出:
{
"jsonrpc": "2.0",
"id": 1,
"result": {
"tools": [
{
"name": "get_alerts",
"description": "Get weather alerts for a US state.\n\nArgs:\n state: Two-letter US state code (e.g. CA, NY)\n",
"inputSchema": {
"properties": {
"state": { "title": "State", "type": "string" }
},
"required": ["state"],
"title": "get_alertsArguments",
"type": "object"
}
},
{
"name": "get_forecast",
"description": "Get weather forecast for a location.\n\nArgs:\n latitude: Latitude of the location\n longitude: Longitude of the location\n",
"inputSchema": {
"properties": {
"latitude": { "title": "Latitude", "type": "number" },
"longitude": { "title": "Longitude", "type": "number" }
},
"required": ["latitude", "longitude"],
"title": "get_forecastArguments",
"type": "object"
}
}
]
}
}
// 3. 请求资源与模板列表
输入: { "method": "resources/list", "jsonrpc": "2.0", "id": 2 }
输出: { "jsonrpc": "2.0", "id": 2, "result": { "resources": [] } }
输入: { "method": "resources/templates/list", "jsonrpc": "2.0", "id": 3 }
输出: { "jsonrpc": "2.0", "id": 3, "result": { "resourceTemplates": [] } }
// 4. 执行具体的工具调用 (get_forecast)
输入:
{
"method": "tools/call",
"params": {
"name": "get_forecast",
"arguments": { "latitude": 40.7128, "longitude": -74.006 }
},
"jsonrpc": "2.0",
"id": 4
}
输出:
{
"jsonrpc": "2.0",
"id": 4,
"result": {
"content": [
{
"type": "text",
"text": "\nOvernight:\nTemperature: 75°F\nWind: 5 mph SW\nForecast: A slight chance of rain showers... \n\n---\n\nSunday:\nTemperature: 88°F\nWind: 5 to 8 mph W\nForecast: A slight chance of rain showers before 8am...\n"
}
],
"isError": false
}
}
// 5. 执行具体的工具调用 (get_alerts)
输入:
{
"method": "tools/call",
"params": {
"name": "get_alerts",
"arguments": { "state": "NY" }
},
"jsonrpc": "2.0",
"id": 5
}
输出:
{
"jsonrpc": "2.0",
"id": 5,
"result": {
"content": [
{
"type": "text",
"text": "No active alerts for this state."
}
],
"isError": false
}
}
核心语法是使用 @mcp.tool() 这个装饰器函数,对当前函数做注解。在函数下方必须写一段详细的注释(Docstring),说明函数的功能、参数类型及含义。这段注释会被直接发给 Client 并转交 LLM。
from typing import Any
import httpx
from mcp.server.fastmcp import FastMCP
# 初始化 FastMCP 服务器,这个 ERROR 级别一定要加
mcp = FastMCP("weather", log_level="ERROR")
# 常量定义
NWS_API_BASE = "https://api.weather.gov" # 美国气象局接口
USER_AGENT = "weather-app/1.0" # 客户端标识
async def make_nws_request(url: str) -> dict[str, Any] | None:
"""向 NWS API 发起请求,并进行适当的错误处理。"""
headers = {
"User-Agent": USER_AGENT,
"Accept": "application/geo+json"
}
async with httpx.AsyncClient() as client:
try:
response = await client.get(url, headers=headers, timeout=30.0)
response.raise_for_status()
return response.json()
except Exception:
return None
def format_alert(feature: dict) -> str:
"""将天气警报特征格式化为可读字符串。"""
props = feature["properties"]
return f"""
事件: {props.get('event', '未知')}
区域: {props.get('areaDesc', '未知')}
严重程度: {props.get('severity', '未知')}
描述: {props.get('description', '暂无描述')}
指令: {props.get('instruction', '暂无具体指令')}
"""
@mcp.tool()
async def get_alerts(state: str) -> str:
"""获取美国各州的实时天气警报。
参数:
state: 两个字母组成的美国州代码 (例如:CA, NY)
"""
url = f"{NWS_API_BASE}/alerts/active/area/{state}"
data = await make_nws_request(url)
if not data or "features" not in data:
return "无法获取警报信息或未发现警报。"
if not data["features"]:
return "该州目前没有活跃的警报。"
alerts = [format_alert(feature) for feature in data["features"]]
return "\n---\n".join(alerts)
@mcp.tool()
async def get_forecast(latitude: float, longitude: float) -> str:
"""获取指定经纬度位置的天气预报。
参数:
latitude: 地点纬度
longitude: 地点经度
"""
points_url = f"{NWS_API_BASE}/points/{latitude},{longitude}"
points_data = await make_nws_request(points_url)
if not points_data:
return "无法获取该位置的预报数据。"
forecast_url = points_data["properties"]["forecast"]
forecast_data = await make_nws_request(forecast_url)
if not forecast_data:
return "无法获取详细天气预报。"
periods = forecast_data["properties"]["periods"]
forecasts = []
for period in periods[:5]:
forecast = f"""
时间段: {period['name']}
温度: {period['temperature']}°{period['temperatureUnit']}
风力风向: {period['windSpeed']} {period['windDirection']}
预报详情: {period['detailedForecast']}
"""
forecasts.append(forecast)
return "\n---\n".join(forecasts)
if __name__ == "__main__":
# 初始化并运行服务器
mcp.run(transport='stdio')
要在 Claude Desktop 或 Cline 客户端中识别并运行您的 MCP Server,需要在它们的配置文件(如 claude_desktop_config.json)中添加正确配置。请确保使用 Python 解释器的绝对路径。
{
"mcpServers": {
"my-search-tools": {
"command": "C:/Users/你的用户名/AppData/Local/Programs/Python/Python311/python.exe",
"args": [
"C:/绝对路径/指向你的/mcp_server.py"
],
"env": {
"SERPER_API_KEY": "你的_SERPER_API_KEY_填在这里"
}
}
}
}
注:如果是虚拟环境,command 需替换为虚拟环境中 Python 的绝对路径;务必确保引用的 Python 脚本路径(mcp_server.py)也是绝对路径。
为了彻底弄懂如何写一个完整的 Client 和 Server,以下展示基于您提供的搜索工具双端完整源码。
该服务端利用 Serper.dev API 提供了网页搜索、新闻搜索和天气查询三个标准的 MCP 工具:
#!/usr/bin/env python3
"""
MCP服务器实现 - 搜索工具
提供Google搜索功能,通过Serper.dev API实现
"""
import os
from typing import Any, Dict, List
import httpx
from mcp.server import FastMCP
from mcp.types import TextContent
from dotenv import load_dotenv
# 加载环境变量
load_dotenv()
# 创建 MCP 服务器实例
mcp = FastMCP("Search Tools Server")
class SearchTool:
"""搜索工具类,封装 Serper.dev API"""
def __init__(self):
self.api_key = os.getenv("SERPER_API_KEY")
self.base_url = "https://google.serper.dev/search"
async def search(self, query: str, num_results: int = 10) -> Dict[str, Any]:
"""执行搜索查询"""
if not self.api_key:
return {"error": "未配置 SERPER_API_KEY"}
headers = {
"X-API-KEY": self.api_key,
"Content-Type": "application/json"
}
payload = {
"q": query,
"num": num_results,
"gl": "cn", # 地理位置:中国
"hl": "zh-cn" # 语言:中文
}
try:
async with httpx.AsyncClient() as client:
response = await client.post(
self.base_url,
headers=headers,
json=payload,
timeout=30.0
)
response.raise_for_status()
return response.json()
except httpx.HTTPError as e:
return {"error": f"HTTP 错误: {str(e)}"}
except Exception as e:
return {"error": f"搜索失败: {str(e)}"}
# 创建搜索工具实例
search_tool = SearchTool()
@mcp.tool()
async def web_search(query: str, num_results: int = 10) -> List[TextContent]:
"""
执行网络搜索
参数:
query: 搜索查询词
num_results: 返回结果数量,默认 10 条
返回:
格式化后的搜索结果列表
"""
result = await search_tool.search(query, num_results)
if "error" in result:
return [TextContent(
type="text",
text=f"搜索失败: {result['error']}"
)]
# 格式化搜索结果数据
formatted_results = []
# 添加搜索元数据摘要
if "searchParameters" in result:
search_info = f"搜索查询: {result['searchParameters'].get('q', query)}\n"
search_info += f"搜索时间: {result.get('searchMetadata', {}).get('createdAt', 'N/A')}\n\n"
formatted_results.append(search_info)
# 处理常规(有机)搜索结果
if "organic" in result:
formatted_results.append("=== 网页搜索结果 ===\n")
for i, item in enumerate(result["organic"][:num_results], 1):
title = item.get("title", "无标题")
link = item.get("link", "#")
snippet = item.get("snippet", "无描述")
result_text = f"{i}. **{title}**\n"
result_text += f" 链接: {link}\n"
result_text += f" 摘要: {snippet}\n\n"
formatted_results.append(result_text)
# 处理知识图谱数据
if "knowledgeGraph" in result:
kg = result["knowledgeGraph"]
formatted_results.append("=== 知识图谱信息 ===\n")
formatted_results.append(f"标题: {kg.get('title', '')}\n")
formatted_results.append(f"类型: {kg.get('type', '')}\n")
formatted_results.append(f"描述: {kg.get('description', '')}\n\n")
return [TextContent(
type="text",
text="".join(formatted_results)
)]
@mcp.tool()
async def news_search(query: str, num_results: int = 10) -> List[TextContent]:
"""
执行新闻搜索
参数:
query: 新闻查询词
num_results: 返回结果数量,默认 10 条
返回:
新闻搜索结果列表
"""
if not search_tool.api_key:
return [TextContent(
type="text",
text="搜索失败: 未配置 SERPER_API_KEY"
)]
headers = {
"X-API-KEY": search_tool.api_key,
"Content-Type": "application/json"
}
payload = {
"q": query,
"num": num_results,
"gl": "cn",
"hl": "zh-cn"
}
try:
async with httpx.AsyncClient() as client:
response = await client.post(
"https://google.serper.dev/news",
headers=headers,
json=payload,
timeout=30.0
)
response.raise_for_status()
result = response.json()
# 格式化新闻搜索结果
formatted_results = []
formatted_results.append(f"新闻搜索关键词: {query}\n\n")
if "news" in result:
formatted_results.append("=== 最新新闻结果 ===\n")
for i, item in enumerate(result["news"][:num_results], 1):
title = item.get("title", "无标题")
link = item.get("link", "#")
snippet = item.get("snippet", "无描述")
source = item.get("source", "未知来源")
date = item.get("date", "未知时间")
result_text = f"{i}. **{title}**\n"
result_text += f" 来源: {source} | 发布时间: {date}\n"
result_text += f" 链接: {link}\n"
result_text += f" 摘要: {snippet}\n\n"
formatted_results.append(result_text)
return [TextContent(
type="text",
text="".join(formatted_results)
)]
except Exception as e:
return [TextContent(
type="text",
text=f"新闻搜索失败: {str(e)}"
)]
@mcp.tool()
async def weather_search(city: str) -> List[TextContent]:
"""
通过搜索引擎查询指定城市的天气信息
参数:
city: 城市名称
返回:
汇总后的天气查询结果
"""
# 构建搜索词来模拟天气预报查询
query = f"{city} 天气预报 今天"
result = await search_tool.search(query, 3)
if "error" in result:
return [TextContent(
type="text",
text=f"天气查询失败: {result['error']}"
)]
# 格式化提取的天气数据
formatted_results = []
formatted_results.append(f"🌤️ {city} 实时天气检索信息\n\n")
if "organic" in result:
for i, item in enumerate(result["organic"][:3], 1):
title = item.get("title", "")
snippet = item.get("snippet", "")
link = item.get("link", "")
if "天气" in title or "温度" in snippet:
result_text = f"{i}. {title}\n"
result_text += f" 实时数据: {snippet}\n"
result_text += f" 详情来源: {link}\n\n"
formatted_results.append(result_text)
if len(formatted_results) == 1: # 只有标题,未找到有效天气内容
formatted_results.append("未找到详细天气描述,请尝试访问专业天气网站。")
return [TextContent(
type="text",
text="".join(formatted_results)
)]
def main():
"""启动 MCP 服务器主入口"""
print("正在启动 MCP 搜索工具服务器...")
print("已注册工具集:")
print("- web_search: 全网深度搜索")
print("- news_search: 实时新闻检索")
print("- weather_search: 城市天气查询")
print("按 Ctrl+C 即可安全停止服务器")
# 启动 FastMCP 运行循环
mcp.run()
if __name__ == "__main__":
main()
客户端演示了如何利用大模型(阿里云百炼)连接上述 Server,动态获取工具列表并自主决定调用:
#!/usr/bin/env python3
"""
LLM 客户端实现 - 集成阿里云百炼与 MCP
演示 LLM 如何通过标准协议发现并调用外部工具
"""
import asyncio
import json
import os
import httpx
from typing import Any, Dict, Optional
from openai import OpenAI
from mcp.client.session import ClientSession
from mcp.client.stdio import StdioServerParameters, stdio_client
from dotenv import load_dotenv
# 载入 .env 文件中的配置
load_dotenv()
class LLMWithMCP:
"""核心类:将 LLM 的智能与 MCP 服务器的工具能力无缝对接"""
def __init__(self):
# 初始化阿里云百炼客户端(使用 OpenAI 兼容模式)
self.dashscope_api_key = os.getenv("DASHSCOPE_API_KEY")
if not self.dashscope_api_key:
raise ValueError("错误:未发现 DASHSCOPE_API_KEY 环境变量")
# 配置 OpenAI 兼容的基础 URL 和 API 密钥
self.client = OpenAI(
api_key=self.dashscope_api_key,
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1"
)
self.model_name = "qwen-plus" # 指定使用的模型名称
self.mcp_session: Optional[ClientSession] = None
# 这里的工具定义将通过协议握手自动从 MCP Server 获取
self.tools = []
self.mcp_tools_info = [] # 缓存 MCP 工具的结构化信息
async def initialize_mcp(self):
"""初始化 MCP 会话并自动从服务器同步工具 Schema"""
try:
# 定义如何启动 MCP 服务器进程(通过标准输入输出交互)
server_params = StdioServerParameters(
command="python",
args=["mcp_server.py"],
env=None
)
# 开启 stdio 通道与服务器连接
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
# 请求服务器返回所有可用工具列表
tools_result = await session.list_tools()
print(f"🔧 MCP 发现成功:共识别出 {len(tools_result.tools)} 个可用工具:")
openai_tools = []
mcp_tools_info = []
for tool in tools_result.tools:
print(f" 📦 工具名: {tool.name} | 功能说明: {tool.description}")
# 将 MCP 协议格式转换为 OpenAI 的 function calling 格式
openai_tool = {
"type": "function",
"function": {
"name": tool.name,
"description": tool.description
}
}
# 同步参数定义 (InputSchema)
if hasattr(tool, 'inputSchema') and tool.inputSchema:
openai_tool["function"]["parameters"] = tool.inputSchema
print(f" 📋 参数定义已加载: {list(tool.inputSchema.get('properties', {}).keys())}")
else:
# 如果没有 Schema,则提供基本的兜底逻辑
openai_tool["function"]["parameters"] = {
"type": "object",
"properties": {
"query": {"type": "string", "description": "查询字符串"}
},
"required": ["query"]
}
openai_tools.append(openai_tool)
mcp_tools_info.append({
"name": tool.name,
"description": tool.description,
"schema": tool.inputSchema if hasattr(tool, 'inputSchema') else None
})
self.tools = openai_tools
self.mcp_tools_info = mcp_tools_info
print(f"✅ 成功将 {len(self.tools)} 个工具同步至模型上下文")
return True
except Exception as e:
print(f"❌ MCP 初始化失败: {str(e)}")
return False
def build_system_prompt(self) -> str:
"""根据发现的工具动态生成系统提示词"""
if not self.mcp_tools_info:
return "你是一个智能助手。"
tools_desc = []
for i, tool in enumerate(self.mcp_tools_info, 1):
tools_desc.append(f"{i}. {tool['name']}: {tool['description']}")
tools_text = "\n".join(tools_desc)
return f"""你是一个具备实时外部工具调用能力的智能助手。
当前可用工具集如下:
{tools_text}
原则:
1. 当用户询问最新新闻、网页搜索或特定天气时,必须调用相应工具。
2. 优先使用工具返回的实时数据,而非你的旧有知识库。
3. 如果工具返回错误,请诚实告知用户。"""
async def call_mcp_tool(self, tool_name: str, arguments: Dict[str, Any]) -> str:
"""执行实际的 MCP 工具远程调用"""
try:
print(f"🚀 正在触发外部工具: {tool_name} | 参数内容: {arguments}")
# 为演示简洁,每次调用重新建立连接(生产环境建议复用 session)
server_params = StdioServerParameters(command="python", args=["mcp_server.py"])
async with stdio_client(server_params) as (read, write):
async with ClientSession(read, write) as session:
await session.initialize()
result = await session.call_tool(tool_name, arguments)
if result.content:
return result.content[0].text if result.content[0].type == "text" else str(result.content[0])
return "工具返回了空结果。"
except Exception as e:
print(f"❌ 远程调用异常: {str(e)}")
return f"调用外部工具时发生错误: {str(e)}"
async def chat_with_tools(self, user_message: str) -> str:
"""对话循环:LLM 自主决定何时需要调用 MCP 工具"""
try:
messages = [
{"role": "system", "content": self.build_system_prompt()},
{"role": "user", "content": user_message}
]
# 步骤 1:LLM 分析意图,决定是否需要 Tool Call
response = self.client.chat.completions.create(
model=self.model_name,
messages=messages,
tools=self.tools,
temperature=0.7
)
assistant_message = response.choices[0].message
# 步骤 2:判断模型是否生成了工具调用请求
if assistant_message.tool_calls:
messages.append(assistant_message)
# 步骤 3:遍历并执行所有请求的工具
for tool_call in assistant_message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# 执行 MCP 协议调用
tool_result = await self.call_mcp_tool(function_name, function_args)
# 将执行结果反馈给模型
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": tool_result
})
# 步骤 4:模型汇总实时信息,生成最终答案
final_response = self.client.chat.completions.create(
model=self.model_name,
messages=messages,
temperature=0.7
)
return final_response.choices[0].message.content
else:
# 模型认为无需调用工具,直接返回普通回复
return assistant_message.content
except Exception as e:
return f"对话流程中断: {str(e)}"
async def main():
"""主演示流程"""
print("=== LLM + MCP 工具链实战演示 ===")
client = LLMWithMCP()
try:
# 初始化握手
if not await client.initialize_mcp():
print("由于无法连接 MCP 服务器,程序退出。")
return
# 模拟测试对话
questions = [
"请问目前关于 DeepSeek 的最新新闻有哪些?",
"上海现在的天气如何?"
]
for q in questions:
print(f"\n问: {q}")
answer = await client.chat_with_tools(q)
print(f"答: {answer}")
finally:
print("\n演示结束。")
if __name__ == "__main__":
asyncio.run(main())
🔥 核心原理解读:
通过这套双端代码可以清晰看出:Client 代码不需要自己去硬编码定义每个搜索参数。它只需要执行 session.list_tools(),MCP Server 就会把 web_search、news_search 的完整描述发过来。然后 Client 将其转化为 OpenAI 的 Function 格式塞给 LLM。
这就是 MCP 标准化协议最大的威力:彻底解耦大模型客户端与底层工具逻辑!