分类算法——基于heart数据集实现

1 heart数据集------描述性统计分析

python 复制代码
import matplotlib.pyplot as plt
import pandas as pd

# Load the dataset
heart = pd.read_csv(r"heart.csv", sep=',')

# Check the columns in the DataFrame
print(heart.columns)

a=heart.loc[:, 'y'].value_counts()
print(a)
heart.loc[:, 'y'].value_counts().plot(kind='bar')
#设置0和1的标签,0为无心脏病,1为有心脏病
plt.xticks([0, 1], ['No heart disease', 'Yes heart disease'])
#设置横坐标旋转45度
plt.xticks(rotation=0)
# 设置矩形数据标签
for x, y in enumerate(heart.loc[:, 'y'].value_counts()):
    plt.text(x, y, '%s' % y, ha='center', va='bottom')
#更改颜色
plt.bar([0, 1], heart.loc[:, 'y'].value_counts(), color=['#FF0000', '#00FF00'])

#设置标题
plt.title('Heart disease distribution')
plt.show()
复制代码
Index(['sbp', 'tobacco', 'ldl', 'adiposity', 'age', 'y'], dtype='object')
y
0    302
1    160
Name: count, dtype: int64

2 Cp交叉验证,选择最优的k值进行判别分析

python 复制代码
#Cp交叉验证,选择最优的k值进行判别分析
from sklearn.model_selection import cross_val_score
from sklearn.neighbors import KNeighborsClassifier
    
X = heart.iloc[:, 0:5]
y = heart.loc[:, 'y']
k_range = range(1, 31)
k_scores = []
for k in k_range:
    knn = KNeighborsClassifier(n_neighbors=k)
    scores = cross_val_score(knn, X, y, cv=10, scoring='accuracy')
    k_scores.append(scores.mean())
    
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')

#选择最优的k值
k = k_scores.index(max(k_scores)) + 1
print('Optimal k: %d' % k)
#绘制最优k值在图中的位置
plt.plot(k_range, k_scores)
plt.xlabel('Value of K for KNN')
plt.ylabel('Cross-Validated Accuracy')
plt.scatter(k, max(k_scores), color='red')

#显示最优k直在图中等于多少
plt.text(k, max(k_scores), '(%d, %.2f)' % (k, max(k_scores)), ha='center', va='bottom')
plt.show()
复制代码
Optimal k: 22

KNN分类器

python 复制代码
#使用最优k值建立KNN进行分类
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

# Initialize and fit the KNN classifier
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)

# Predict and print accuracy
y_pred = knn.predict(X_test)
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))

#绘制决策区域
from matplotlib.colors import ListedColormap
import numpy as np
from sklearn.decomposition import PCA

def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
    # Reduce dimensionality to 2D using PCA
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X)

    # setup marker generator and color map
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
    cmap = ListedColormap(colors[:len(np.unique(y))])

    # plot the decision surface
    x1_min, x1_max = X_pca[:, 0].min() - 1, X_pca[:, 0].max() + 1
    x2_min, x2_max = X_pca[:, 1].min() - 1, X_pca[:, 1].max() + 1
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                           np.arange(x2_min, x2_max, resolution))
    Z = classifier.predict(pca.inverse_transform(np.array([xx1.ravel(), xx2.ravel()]).T))
    Z = Z.reshape(xx1.shape)
    plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
    plt.xlim(xx1.min(), xx1.max())
    plt.ylim(xx2.min(), xx2.max())

    for idx, cl in enumerate(np.unique(y)):
        plt.scatter(x=X_pca[y == cl, 0], y=X_pca[y == cl, 1],
                    alpha=0.8, c=[cmap(idx)],
                    marker=markers[idx], label=cl)

    # highlight test samples
    if test_idx:
        X_test, y_test = X_pca[test_idx, :2], y[test_idx]
        plt.scatter(X_test[:, 0], X_test[:, 1],
                    alpha=1.0, linewidth=1, marker='o',
                    s=55, label='test set')
        
# Plot decision regions using PCA-transformed features
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined, y=y_combined, classifier=knn, test_idx=range(len(y_train), len(y_train) + len(y_test)))
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(loc='upper left')
plt.show()
复制代码
Accuracy: 0.69

朴素贝叶斯分类器

python 复制代码
#朴素贝叶斯分类器
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from matplotlib.colors import ListedColormap

# Load the dataset
heart = pd.read_csv(r"heart.csv", sep=',')

# Select features and target
X = heart.iloc[:, 0:5]
y = heart.loc[:, 'y']

# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

# Initialize and fit the Gaussian Naive Bayes classifier
gnb = GaussianNB()
gnb.fit(X_train, y_train)

# Predict and print accuracy
y_pred = gnb.predict(X_test)
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))

# Define the function to plot decision regions
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.decomposition import PCA

def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
    # Reduce dimensionality to 2D using PCA
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X)

    # setup marker generator and color map
    markers = ('s', 'x', 'o', '^', 'v')
    colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
    cmap = ListedColormap(colors[:len(np.unique(y))])

    # plot the decision surface
    x1_min, x1_max = X_pca[:, 0].min() - 1, X_pca[:, 0].max() + 1
    x2_min, x2_max = X_pca[:, 1].min() - 1, X_pca[:, 1].max() + 1
    xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
                           np.arange(x2_min, x2_max, resolution))
    Z = classifier.predict(pca.inverse_transform(np.array([xx1.ravel(), xx2.ravel()]).T))
    Z = Z.reshape(xx1.shape)
    plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
    plt.xlim(xx1.min(), xx1.max())
    plt.ylim(xx2.min(), xx2.max())

    for idx, cl in enumerate(np.unique(y)):
        plt.scatter(x=X_pca[y == cl, 0], y=X_pca[y == cl, 1],
                    alpha=0.8, c=[cmap(idx)],
                    marker=markers[idx], label=cl)

    # # highlight test samples
    # if test_idx:
    #     X_test, y_test = X_pca[test_idx, :2], y[test_idx]
    #     plt.scatter(X_test[:, 0], X_test[:, 1],
    #                 alpha=1.0, linewidth=1, marker='o',
    #                 s=55, label='test set')

# Plot decision regions using PCA-transformed features
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined, y=y_combined, classifier=gnb, test_idx=range(len(y_train), len(y_train) + len(y_test)))
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(loc='upper left')
plt.show()
复制代码
Accuracy: 0.70

SVM分类器

python 复制代码
#使用SVM进行分类
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
    
from sklearn.svm import SVC

# Load the dataset
heart = pd.read_csv(r"heart.csv", sep=',')
# Select features and target
X = heart.iloc[:, 0:5]
y = heart.loc[:, 'y']
    
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

# Initialize and fit the SVM classifier
svm = SVC(kernel='linear', C=1.0, random_state=0)
svm.fit(X_train, y_train)

# Predict and print accuracy
y_pred = svm.predict(X_test)
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))
复制代码
Accuracy: 0.66
python 复制代码
# Plot decision regions using PCA-transformed features
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined, y=y_combined, classifier=svm, test_idx=range(len(y_train), len(y_train) + len(y_test)))
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(loc='upper left')
plt.show()

随机森林分类

python 复制代码
# Import necessary libraries
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import export_graphviz
import pydotplus
from IPython.display import Image

# Load the dataset
heart = pd.read_csv(r"heart.csv", sep=',')

# Select features and target
X = heart.iloc[:, 0:5]
y = heart.loc[:, 'y']

# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)

# Initialize and fit the Decision Tree classifier
tree = DecisionTreeClassifier(max_depth=3, random_state=0)
tree.fit(X_train, y_train)

# Predict and print accuracy
y_pred = tree.predict(X_test)
print('Accuracy: %.2f' % accuracy_score(y_test, y_pred))

# Export the decision tree to a file
export_graphviz(tree, out_file='tree.dot', feature_names=X.columns)

# Convert the dot file to a png
graph = pydotplus.graph_from_dot_file('tree.dot')
Image(graph.create_png())

# Plot decision regions using PCA-transformed features
X_combined = np.vstack((X_train, X_test))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined, y=y_combined, classifier=tree, test_idx=range(len(y_train), len(y_train) + len(y_test)))
plt.xlabel('Principal Component 1')
plt.ylabel('Principal Component 2')
plt.legend(loc='upper left')
plt.show()
复制代码
Accuracy: 0.68

决策树分类

python 复制代码
#绘制出决策树
from sklearn.tree import plot_tree
plt.figure(figsize=(20, 10))
plot_tree(tree, filled=True, feature_names=X.columns, class_names=['0', '1'])
plt.show()
相关推荐
硅谷秋水23 分钟前
MAPLE:编码从自我为中心的视频中学习的灵巧机器人操作先验
人工智能·机器学习·计算机视觉·机器人·音视频
Lx35225 分钟前
📌 深度搜索实战:3天完成原本1个月的代码重构
人工智能
offerwa27 分钟前
大模型提示工程:入门到精通的实用指南
人工智能
仙人掌_lz29 分钟前
详解如何复现LLaMA 4:从零开始利用Python构建
人工智能·python·ai·llama·智能体·ai agents
PcVue China31 分钟前
PcVue助力立讯:精密制造的智能化管控实践!
大数据·人工智能·制造
AI绘画咪酱40 分钟前
Stable Diffusion【进阶篇】:如何实现人脸一致
人工智能·深度学习·学习·机器学习·ai作画·stable diffusion
vx_330762317241 分钟前
vos3000外呼系统怎么给普通用户开通播放下载录音权限?
运维·服务器·人工智能·ai·媒体
孔令飞43 分钟前
如何使用Docker在本地运行一个大模型?
人工智能·云原生·go
蚝油菜花1 小时前
这个开源AI平台把文生图/音/字全包了!Pollinations.AI:提供完全免费的AI内容生成
人工智能·开源
补三补四1 小时前
CNN卷积神经网络
人工智能·深度学习·神经网络·机器学习·cnn