可视化方法可以用于帮助理解分类算法的决策边界、性能和在不同数据集上的行为。
下面列举几个常见的可视化方法。
1. 决策边界可视化
这种方法用于可视化不同分类算法在二维特征空间中如何分隔不同类别。对于理解决策树、支持向量机(SVM)、逻辑回归和k近邻(k-NN)等模型的行为非常有用。
python
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
# 生成一个二维的合成数据集
X, y = make_classification(n_samples=200, n_features=2, n_classes=2, n_clusters_per_class=1, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# 定义分类器
classifiers = {
'逻辑回归': LogisticRegression(),
'支持向量机': SVC(),
'决策树': DecisionTreeClassifier(),
'k近邻': KNeighborsClassifier()
}
# 可视化决策边界函数
def plot_decision_boundaries(X, y, model, ax, title):
h = .02 # 网格步长
# 创建网格
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# 模型训练并预测
model.fit(X, y)
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# 绘制决策边界
ax.contourf(xx, yy, Z, alpha=0.8)
ax.scatter(X[:, 0], X[:, 1], c=y, edgecolors='k', marker='o')
ax.set_title(title)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
# 创建子图
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
axes = axes.ravel()
# 为每个分类器绘制决策边界
for idx, (name, clf) in enumerate(classifiers.items()):
plot_decision_boundaries(X_train, y_train, clf, axes[idx], title=name)
plt.tight_layout()
plt.show()
2. 混淆矩阵可视化
混淆矩阵是一种用来评估分类模型性能的工具,展示了预测类别与真实类别的匹配情况。
示例代码
python
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
# 使用逻辑回归分类器为例
model = LogisticRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# 计算混淆矩阵
cm = confusion_matrix(y_test, y_pred)
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=['Class 0', 'Class 1'])
# 绘制混淆矩阵
disp.plot(cmap=plt.cm.Blues)
plt.title('混淆矩阵')
plt.show()
3. 学习曲线
学习曲线展示了模型在训练集和验证集上的表现随训练样本数量的变化情况,用于检测模型是否欠拟合或过拟合。
示例代码
python
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
# 使用支持向量机分类器为例
model = SVC()
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=0)
train_sizes, train_scores, test_scores = learning_curve(model, X, y, cv=cv, n_jobs=-1, train_sizes=np.linspace(0.1, 1.0, 5))
# 计算平均和标准差
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
# 绘制学习曲线
plt.figure(figsize=(8, 6))
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r", label="训练分数")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g", label="验证分数")
plt.xlabel("训练样本数量")
plt.ylabel("得分")
plt.title("学习曲线")
plt.legend(loc="best")
plt.show()
这些可视化方法和代码示例可以更好地理解和展示分类算法的行为及其效果。