因为之前也在网络安全公司待过,对网络安全,渗透测试有一定了解,并且近期看到有篇国外的博客讲了他怎么用智能体黑了两家大公司的经历,于是今天我也让 AI 给我写了一个能做渗透测试的智能体,不知道能不能用,代码如下:
核心架构设计:
python
# pentest_agent.py
# 自主渗透测试智能体 - LangChain + LangGraph 实现
import os
import json
import asyncio
from typing import TypedDict, List, Dict, Any, Optional, Literal, Annotated
from datetime import datetime
from dataclasses import dataclass, field
from langchain_openai import ChatOpenAI
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage, ToolMessage
from langchain_core.tools import tool, BaseTool
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langgraph.graph import StateGraph, END
from langgraph.prebuilt import ToolNode
from langgraph.checkpoint.memory import MemorySaver
# ==================== 数据模型定义 ====================
class PentestState(TypedDict):
"""渗透测试状态"""
messages: Annotated[List[Any], "对话历史"]
target: str # 目标域名/IP
current_phase: Literal["recon", "scanning", "exploitation", "post_exploit", "reporting", "finished"]
findings: List[Dict] # 发现的漏洞
scan_results: Dict # 扫描结果缓存
task_queue: List[Dict] # 待执行的任务队列
executed_tasks: List[str] # 已执行的任务
risk_level: Literal["low", "medium", "high", "critical"]
permissions: Dict # 授权范围
reasoning: str # 当前推理过程
@dataclass
class Vulnerability:
name: str
severity: Literal["info", "low", "medium", "high", "critical"]
description: str
evidence: str
remediation: str
cwe_id: Optional[str] = None
cvss_score: Optional[float] = None
# ==================== 安全工具定义 ====================
class SecurityTools:
"""渗透测试工具集合"""
@staticmethod
@tool
async def dns_enumeration(domain: str) -> str:
"""
执行DNS枚举,发现子域名和DNS记录
:param domain: 目标域名,如 example.com
"""
# 实际实现应调用 subfinder, amass 等工具
return f"""
DNS枚举结果 for {domain}:
- 发现子域名: 12个
- 发现A记录: api.{domain}, admin.{domain}, dev.{domain}
- 发现CNAME: www -> cdn.example.com
- DNSSEC: 未启用
- SPF记录: 存在,但配置宽松
"""
@staticmethod
@tool
async def port_scan(target: str, ports: str = "top1000") -> str:
"""
执行端口扫描
:param target: 目标IP或域名
:param ports: 端口范围,如 "80,443" 或 "top1000"
"""
return f"""
端口扫描结果 for {target}:
- 开放端口: 22(SSH), 80(HTTP), 443(HTTPS), 3306(MySQL), 8080(Tomcat)
- 服务识别:
* 80: Apache httpd 2.4.41
* 443: nginx/1.18.0
* 3306: MySQL 5.7.33
* 8080: Apache Tomcat/9.0.31
"""
@staticmethod
@tool
async def web_scan(url: str) -> str:
"""
Web应用安全扫描
:param url: 目标URL
"""
return f"""
Web扫描结果 for {url}:
- 技术栈: PHP 7.4, Apache, jQuery 1.12.4(存在已知漏洞)
- 发现端点: /admin, /api/v1, /upload, /.git/HEAD(暴露)
- 安全头缺失: X-Frame-Options, CSP, X-Content-Type-Options
- 发现表单: 登录页 /login.php (可能存在暴力破解)
- Cookie: 未设置 HttpOnly 和 Secure 标志
"""
@staticmethod
@tool
async def vulnerability_scan(service: str, version: str) -> str:
"""
漏洞扫描 - 检查特定服务版本的已知CVE
:param service: 服务名称,如 "apache"
:param version: 版本号,如 "2.4.41"
"""
cve_db = {
"apache": {
"2.4.41": ["CVE-2020-11984", "CVE-2020-11993"],
"2.4.49": ["CVE-2021-41773", "CVE-2021-42013"]
},
"tomcat": {
"9.0.31": ["CVE-2020-9484", "CVE-2020-13935"]
}
}
cves = cve_db.get(service.lower(), {}).get(version, [])
if cves:
return f"发现 {len(cves)} 个已知CVE: {', '.join(cves)}"
return "未发现该版本的已知CVE"
@staticmethod
@tool
async def directory_bruteforce(url: str, wordlist: str = "common") -> str:
"""
目录爆破
:param url: 基础URL
:param wordlist: 字典类型
"""
return f"""
目录爆破结果 for {url}:
- 发现敏感目录: /backup/, /config/, /logs/, /phpmyadmin/
- 发现备份文件: /backup/website_20240101.zip
- 发现配置文件: /.env.bak, /config.php.bak
- 响应码分析: 200(15个), 403(23个), 301(8个)
"""
@staticmethod
@tool
async def sql_injection_test(endpoint: str, method: str = "GET") -> str:
"""
SQL注入测试
:param endpoint: 测试端点
:param method: HTTP方法
"""
return f"""
SQL注入测试 for {endpoint}:
- 参数 id: 存在整数型注入 (延时确认: 5秒)
- 参数 search: 可能存在盲注
- 数据库类型: MySQL 5.7 (通过错误信息确认)
- 注入类型: UNION-based, Time-based blind
- 建议: 使用参数化查询,WAF规则需加强
"""
@staticmethod
@tool
def generate_report(findings: str, format_type: str = "markdown") -> str:
"""
生成渗透测试报告
:param findings: 发现的漏洞JSON字符串
:param format_type: 报告格式 markdown/pdf/html
"""
return f"""
报告生成完成:
- 格式: {format_type}
- 统计: 高危 2个, 中危 5个, 低危 8个
- 文件: /reports/pentest_report_{datetime.now().strftime('%Y%m%d')}.{format_type}
- 包含: 执行摘要、技术细节、修复建议、复现步骤
"""
# ==================== 智能体核心逻辑 ====================
class PentestAgent:
"""自主渗透测试智能体"""
def __init__(self, api_key: Optional[str] = None):
self.llm = ChatOpenAI(
model="gpt-4-turbo-preview",
temperature=0.1,
api_key=api_key or os.getenv("OPENAI_API_KEY")
)
# 初始化工具
self.tools = [
SecurityTools.dns_enumeration,
SecurityTools.port_scan,
SecurityTools.web_scan,
SecurityTools.vulnerability_scan,
SecurityTools.directory_bruteforce,
SecurityTools.sql_injection_test,
SecurityTools.generate_report
]
self.tool_map = {tool.name: tool for tool in self.tools}
# 构建图
self.workflow = self._build_graph()
self.app = self.workflow.compile(checkpointer=MemorySaver())
def _build_graph(self) -> StateGraph:
"""构建LangGraph工作流"""
# 定义节点
def planning_node(state: PentestState) -> PentestState:
"""规划节点:分析当前状态,决定下一步行动"""
prompt = ChatPromptTemplate.from_messages([
SystemMessage(content="""你是高级渗透测试专家AI。基于当前状态,分析已收集的信息,决定下一步最佳行动。
当前阶段: {phase}
已发现: {findings_count} 个安全问题
已执行任务: {executed}
决策规则:
1. recon阶段: 优先执行DNS枚举、WHOIS查询
2. scanning阶段: 基于recon结果选择端口扫描、Web扫描
3. exploitation阶段: 对发现的漏洞进行深入验证
4. 每个阶段完成后,评估是否进入下一阶段或需要补充信息
输出JSON格式:
{{
"reasoning": "详细推理过程",
"next_action": "工具名称或'phase_transition'",
"action_params": {{参数}},
"should_transition": false,
"new_phase": "阶段名称(如果需要转换)"
}}
"""),
MessagesPlaceholder(variable_name="messages")
])
chain = prompt | self.llm
response = chain.invoke({
"phase": state["current_phase"],
"findings_count": len(state["findings"]),
"executed": state["executed_tasks"],
"messages": state["messages"]
})
# 解析决策
try:
decision = json.loads(response.content)
except:
decision = {
"reasoning": "解析错误,继续信息收集",
"next_action": "dns_enumeration",
"action_params": {"domain": state["target"]},
"should_transition": False
}
# 更新状态
state["reasoning"] = decision.get("reasoning", "")
if decision.get("should_transition"):
state["current_phase"] = decision.get("new_phase", state["current_phase"])
# 添加任务到队列
if decision.get("next_action") != "phase_transition":
state["task_queue"].append({
"tool": decision["next_action"],
"params": decision.get("action_params", {}),
"reasoning": decision["reasoning"]
})
state["messages"].append(AIMessage(content=response.content))
return state
def execution_node(state: PentestState) -> PentestState:
"""执行节点:执行工具调用"""
if not state["task_queue"]:
return state
task = state["task_queue"].pop(0)
tool_name = task["tool"]
params = task["params"]
if tool_name in self.tool_map:
tool_func = self.tool_map[tool_name]
try:
# 执行工具
if asyncio.iscoroutinefunction(tool_func._func):
result = asyncio.run(tool_func._func(**params))
else:
result = tool_func._func(**params)
# 记录执行
state["executed_tasks"].append(f"{tool_name}: {params}")
# 分析结果,提取发现
analysis = self._analyze_result(tool_name, result)
if analysis:
state["findings"].extend(analysis)
state["scan_results"][tool_name] = result
# 添加消息
state["messages"].append(ToolMessage(
content=f"工具 {tool_name} 执行结果:\n{result}",
tool_call_id=tool_name
))
except Exception as e:
state["messages"].append(ToolMessage(
content=f"工具 {tool_name} 执行失败: {str(e)}",
tool_call_id=tool_name
))
return state
def analysis_node(state: PentestState) -> PentestState:
"""分析节点:深度分析扫描结果,识别漏洞"""
if not state["scan_results"]:
return state
prompt = ChatPromptTemplate.from_messages([
SystemMessage(content="""分析以下扫描结果,识别安全漏洞并评估风险。
输出严格的JSON数组格式,每个漏洞包含:
- name: 漏洞名称
- severity: 严重程度 (critical/high/medium/low/info)
- description: 描述
- evidence: 证据
- remediation: 修复建议
- cwe_id: CWE编号(可选)
"""),
HumanMessage(content=f"扫描结果:\n{json.dumps(state['scan_results'], indent=2)}")
])
chain = prompt | self.llm
try:
response = chain.invoke({})
vulnerabilities = json.loads(response.content)
# 去重并更新发现
existing_names = {f["name"] for f in state["findings"]}
for vuln in vulnerabilities:
if vuln["name"] not in existing_names:
state["findings"].append(vuln)
# 更新风险等级
severities = [f["severity"] for f in state["findings"]]
if "critical" in severities:
state["risk_level"] = "critical"
elif "high" in severities:
state["risk_level"] = "high"
elif "medium" in severities:
state["risk_level"] = "medium"
except Exception as e:
print(f"分析节点错误: {e}")
return state
def should_continue(state: PentestState) -> str:
"""路由决策:决定下一个节点"""
# 检查是否完成
if state["current_phase"] == "finished":
return "end"
# 如果有待执行任务,继续执行
if state["task_queue"]:
return "execute"
# 如果没有任务了,回到规划
return "plan"
# 构建图
workflow = StateGraph(PentestState)
# 添加节点
workflow.add_node("planner", planning_node)
workflow.add_node("executor", execution_node)
workflow.add_node("analyzer", analysis_node)
workflow.add_node("tools", ToolNode(self.tools))
# 添加边
workflow.set_entry_point("planner")
workflow.add_conditional_edges(
"planner",
should_continue,
{
"execute": "executor",
"plan": "planner",
"end": END
}
)
workflow.add_edge("executor", "analyzer")
workflow.add_edge("analyzer", "planner")
return workflow
def _analyze_result(self, tool_name: str, result: str) -> List[Dict]:
"""分析工具输出,提取结构化漏洞信息"""
findings = []
# 简单的模式匹配提取关键信息
if "CVE" in result:
import re
cves = re.findall(r'CVE-\d{4}-\d+', result)
for cve in cves:
findings.append({
"name": f"已知漏洞 {cve}",
"severity": "high",
"description": f"检测到存在{cve}漏洞",
"evidence": result[:200],
"remediation": "立即升级受影响的软件到最新版本",
"source_tool": tool_name
})
if "注入" in result or "injection" in result.lower():
findings.append({
"name": "SQL注入漏洞",
"severity": "critical",
"description": "应用程序存在SQL注入漏洞",
"evidence": result[:300],
"remediation": "使用参数化查询,输入验证,最小权限原则",
"source_tool": tool_name
})
if "备份" in result or ".git" in result:
findings.append({
"name": "敏感信息泄露",
"severity": "high",
"description": "发现备份文件或版本控制信息泄露",
"evidence": result[:200],
"remediation": "删除敏感备份文件,配置.gitignore,限制目录访问",
"source_tool": tool_name
})
return findings
async def run(self, target: str, scope: Optional[Dict] = None) -> Dict:
"""
运行渗透测试
:param target: 目标域名或IP
:param scope: 测试范围限制
"""
# 初始化状态
initial_state: PentestState = {
"messages": [
SystemMessage(content=f"""开始渗透测试。目标: {target}
规则:
1. 仅在授权范围内测试
2. 避免破坏性操作
3. 记录所有操作
4. 优先识别高危漏洞
"""),
HumanMessage(content=f"开始对 {target} 进行渗透测试")
],
"target": target,
"current_phase": "recon",
"findings": [],
"scan_results": {},
"task_queue": [],
"executed_tasks": [],
"risk_level": "low",
"permissions": scope or {"allowed": True, "restrictions": []},
"reasoning": ""
}
# 执行图
config = {"configurable": {"thread_id": f"pentest_{target}_{datetime.now().timestamp()}"}}
final_state = None
async for event in self.app.astream(initial_state, config):
for key, value in event.items():
print(f"节点: {key}")
if key == "planner":
print(f" 推理: {value.get('reasoning', 'N/A')[:100]}...")
elif key == "executor":
print(f" 执行任务: {value.get('executed_tasks', [])[-1] if value.get('executed_tasks') else 'N/A'}")
final_state = value
return {
"target": target,
"findings": final_state["findings"] if final_state else [],
"risk_level": final_state["risk_level"] if final_state else "unknown",
"phases_completed": list(set(final_state["executed_tasks"])) if final_state else [],
"report": self._generate_final_report(final_state) if final_state else "No data"
}
def _generate_final_report(self, state: PentestState) -> str:
"""生成最终报告"""
findings = state["findings"]
critical = [f for f in findings if f.get("severity") == "critical"]
high = [f for f in findings if f.get("severity") == "high"]
medium = [f for f in findings if f.get("severity") == "medium"]
low = [f for f in findings if f.get("severity") == "low"]
report = f"""
# 渗透测试报告
**目标**: {state['target']}
**测试时间**: {datetime.now().strftime('%Y-%m-%d %H:%M')}
**总体风险**: {state['risk_level'].upper()}
## 执行摘要
本次测试采用自主AI智能体进行,共执行 {len(state['executed_tasks'])} 项测试任务,
发现 {len(findings)} 个安全问题。
## 风险统计
- 🔴 严重 (Critical): {len(critical)} 个
- 🟠 高危 (High): {len(high)} 个
- 🟡 中危 (Medium): {len(medium)} 个
- 🟢 低危 (Low): {len(low)} 个
## 详细发现
"""
for sev, items in [("Critical", critical), ("High", high), ("Medium", medium), ("Low", low)]:
if items:
report += f"\n### {sev} 级别漏洞\n"
for item in items:
report += f"""
**{item['name']}**
- **描述**: {item['description']}
- **证据**: {item['evidence'][:150]}...
- **修复建议**: {item['remediation']}
"""
report += f"""
## 测试覆盖
已执行测试: {', '.join(set([t.split(':')[0] for t in state['executed_tasks']]))}
## 建议
1. 优先修复 Critical 和 High 级别漏洞
2. 建立定期安全扫描机制
3. 加强输入验证和访问控制
"""
return report
# ==================== 使用示例 ====================
async def main():
"""主函数示例"""
# 初始化智能体
agent = PentestAgent(api_key="your-api-key")
# 配置测试范围(重要:确保有合法授权)
scope = {
"allowed": True,
"target": "testphp.vulnweb.com", # 这是一个故意设计的有漏洞的测试站点
"restrictions": ["no_dos", "no_data_exfiltration"],
"authorized_by": "Security Team",
"valid_until": "2024-12-31"
}
print("=" * 50)
print("🛡️ 自主渗透测试智能体启动")
print("=" * 50)
print(f"目标: {scope['target']}")
print(f"授权: {scope['authorized_by']}")
print("-" * 50)
# 运行测试
result = await agent.run(scope["target"], scope)
print("\n" + "=" * 50)
print("📊 测试完成")
print("=" * 50)
print(f"风险等级: {result['risk_level']}")
print(f"发现问题: {len(result['findings'])} 个")
print("\n完整报告:")
print(result['report'])
if __name__ == "__main__":
asyncio.run(main())
高级功能扩展
- 多智能体协作系统
python
# multi_agent_pentest.py
from langchain_core.messages import BaseMessage
from langgraph.graph import StateGraph, END
import operator
from typing import Sequence
class MultiAgentState(TypedDict):
messages: Annotated[Sequence[BaseMessage], operator.add]
next: str
target: str
findings: Dict[str, List] # 按代理分类的发现
def create_multi_agent_system():
"""创建多智能体协作渗透测试系统"""
# 侦察代理 - 专注于信息收集
recon_agent = create_recon_agent()
# 扫描代理 - 专注于漏洞扫描
scan_agent = create_scan_agent()
# 利用代理 - 专注于漏洞验证
exploit_agent = create_exploit_agent()
# 报告代理 - 专注于报告生成
report_agent = create_report_agent()
# 主管代理 - 协调任务分配
def supervisor(state: MultiAgentState) -> MultiAgentState:
"""主管节点,决定哪个代理工作"""
prompt = f"""基于当前渗透测试状态,决定下一步由哪个专家代理执行。
当前阶段: {state.get('phase', 'initial')}
待处理任务: {state.get('pending_tasks', [])}
选择: RECON(侦察) | SCAN(扫描) | EXPLOIT(利用) | REPORT(报告) | FINISH(完成)
"""
response = llm.invoke(prompt)
state["next"] = response.content.strip().upper()
return state
# 构建多代理图
workflow = StateGraph(MultiAgentState)
workflow.add_node("supervisor", supervisor)
workflow.add_node("recon", recon_agent)
workflow.add_node("scanner", scan_agent)
workflow.add_node("exploiter", exploit_agent)
workflow.add_node("reporter", report_agent)
# 条件路由
workflow.add_conditional_edges(
"supervisor",
lambda x: x["next"],
{
"RECON": "recon",
"SCAN": "scanner",
"EXPLOIT": "exploiter",
"REPORT": "reporter",
"FINISH": END
}
)
# 所有代理完成后返回主管
for node in ["recon", "scanner", "exploiter", "reporter"]:
workflow.add_edge(node, "supervisor")
workflow.set_entry_point("supervisor")
return workflow.compile()
- 记忆与学习能力
python
# learning_module.py
from langchain_core.vectorstores import VectorStore
from langchain_openai import OpenAIEmbeddings
class PentestMemory:
"""渗透测试记忆系统"""
def __init__(self):
self.embeddings = OpenAIEmbeddings()
self.exploit_db = [] # 成功利用技术库
self.false_positives = set() # 误报记录
async def store_successful_exploit(self, technique: Dict):
"""存储成功的利用技术"""
self.exploit_db.append({
"technique": technique,
"embedding": await self.embeddings.aembed_query(str(technique)),
"timestamp": datetime.now(),
"success_rate": technique.get("success", True)
})
async def similar_targets(self, current_target: str) -> List[Dict]:
"""查找历史相似目标"""
current_emb = await self.embeddings.aembed_query(current_target)
# 向量相似度搜索
return sorted(
self.exploit_db,
key=lambda x: cosine_similarity(current_emb, x["embedding"]),
reverse=True
)[:5]
def learn_from_result(self, action: str, result: str, was_successful: bool):
"""强化学习:从结果中学习"""
if not was_successful and "timeout" in result.lower():
self.false_positives.add(action)
# 更新策略...
- 安全约束与伦理检查
python
# safety_guardrails.py
from langchain_core.runnables import RunnableLambda
class SafetyChecker:
"""安全检查层"""
FORBIDDEN_PATTERNS = [
r"rm\s+-rf\s+/", # 破坏性命令
r"dd\s+if=.*of=/dev", # 磁盘操作
r"mkfs\.", # 格式化
r">/etc/", # 系统文件修改
]
@staticmethod
def check_command(command: str) -> tuple[bool, str]:
"""检查命令是否安全"""
import re
for pattern in SafetyChecker.FORBIDDEN_PATTERNS:
if re.search(pattern, command, re.IGNORECASE):
return False, f"检测到危险命令模式: {pattern}"
return True, "安全"
@staticmethod
def validate_scope(target: str, authorized_targets: List[str]) -> bool:
"""验证目标在授权范围内"""
return any(
target == auth or target.endswith(f".{auth}")
for auth in authorized_targets
)
# 在工具执行前添加安全检查
def safe_tool_execution(tool_func):
"""装饰器:为工具添加安全检查"""
async def wrapper(*args, **kwargs):
# 检查参数
if "command" in kwargs:
is_safe, msg = SafetyChecker.check_command(kwargs["command"])
if not is_safe:
return f"❌ 安全检查阻止: {msg}"
# 执行原函数
return await tool_func(*args, **kwargs)
return wrapper
部署建议
python
┌─────────────────────────────────────────┐
│ API Gateway (FastAPI) │
│ /pentest/start /pentest/status │
├─────────────────────────────────────────┤
│ LangGraph 工作流引擎 │
│ ┌─────────┐ ┌─────────┐ ┌────────┐ │
│ │ Planner │→│ Executor│→│Analyzer│ │
│ └─────────┘ └────┬────┘ └────────┘ │
│ ↓ │
│ ┌─────────┐ │
│ │ Tool Set │ │
│ │ • Nmap │ │
│ │ • SQLMap │ │
│ │ • Burp API│ │
│ └─────────┘ │
├─────────────────────────────────────────┤
│ 结果存储 & 报告生成 │
│ PostgreSQL + Redis + Celery │
└─────────────────────────────────────────┘