2024最新分别利用sklearn和Numpy实现c均值对鸢尾花数据集进行聚类(附完整代码和注释)

C均值聚类算法(K-Means Clustering)是一种非常流行的聚类算法,用于将数据点分成多个簇,使得簇内的点尽可能相似,簇间的点尽可能不同。以下是K-Means算法的基本步骤:

  1. 初始化:随机选择K个点作为初始的簇中心(质心)

  2. 分配:将每个数据点分配到最近的质心所属的簇中。

  3. 更新:计算每个簇中所有点的均值,更新质心为这个均值。

  4. 迭代:重复步骤2和3,直到满足某个终止条件(例如,达到最大迭代次数,或者质心的变化小于某个阈值)。

  5. 终止:当满足终止条件时,算法结束,最终的簇划分就是聚类结果。

sklearn方法

复制代码
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler

# 加载鸢尾花数据集
iris = datasets.load_iris()
X = iris.data

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 定义不同的K值
k_values = [2, 3, 4, 5]

# 评估不同K值的聚类效果
for k in k_values:
    kmeans = KMeans(n_clusters=k, random_state=42)
    kmeans.fit(X_scaled)
    labels = kmeans.labels_
    
    # 计算轮廓系数
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, silhouette score is {silhouette_avg}")

    # 可视化聚类效果
    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    centers = kmeans.cluster_centers_
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means Clustering with n_clusters = {k}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()

# 评估不同初始化方法的聚类效果
k = 3
#
init_methods = ['random', 'k-means++']
for init in init_methods:
    kmeans = KMeans(n_clusters=k, init=init, random_state=42)
    kmeans.fit(X_scaled)
    labels = kmeans.labels_
    
    # 计算轮廓系数
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, init method = {init}, silhouette score is {silhouette_avg}")

    # 可视化聚类效果
    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    centers = kmeans.cluster_centers_
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means Clustering with n_clusters = {k}, init method = {init}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()

Numpy方法

复制代码
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.preprocessing import StandardScaler

#使用 NumPy 实现 K-Means 算法
def kmeans(X, k, max_iters=100):
    n_samples, n_features = X.shape
    centroids = X[np.random.choice(n_samples, k, replace=False)]
    for _ in range(max_iters):
        distances = np.sqrt((X[:, np.newaxis] - centroids) ** 2).sum(axis=2)
        labels = np.argmin(distances, axis=1)
        new_centroids = np.array([X[labels == i].mean(axis=0) for i in range(k)])
        if np.all(centroids == new_centroids):
            break
        centroids = new_centroids
    return labels, centroids

#加载鸢尾花数据集并对其进行标准化
iris = datasets.load_iris()
X = iris.data
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

#评估不同 c 值的聚类效果
from sklearn.metrics import silhouette_score
k_values = [2, 3, 4]
for k in k_values:
    labels, centers = kmeans(X_scaled, k)
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, silhouette score is {silhouette_avg}")
#可视化每个 k 值的聚类结果。
for k in k_values:
    labels, centers = kmeans(X_scaled, k)
    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means Clustering with n_clusters = {k}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()
    
#评估不同初始化方法对聚类效果的影响
def kmeans_plusplus(X, k, max_iters=100):
    n_samples, n_features = X.shape
    centroids = [X[np.random.choice(n_samples)]]
    for _ in range(1, k):
        distances = np.sqrt((X[:, np.newaxis] - centroids) ** 2).sum(axis=2)
        probabilities = distances.min(axis=1) ** 2
        cumulative_probabilities = probabilities.cumsum()
        r = np.random.rand() * cumulative_probabilities[-1]
        new_centroid_index = np.searchsorted(cumulative_probabilities, r)
        centroids.append(X[new_centroid_index])
    
    centroids = np.array(centroids)
    for _ in range(max_iters):
        distances = np.sqrt((X[:, np.newaxis] - centroids) ** 2).sum(axis=2)
        labels = np.argmin(distances, axis=1)
        new_centroids = np.array([X[labels == i].mean(axis=0) for i in range(k)])
        if np.all(centroids == new_centroids):
            break
        centroids = new_centroids
    return labels, centroids

for k in k_values:
    labels, centers = kmeans_plusplus(X_scaled, k)
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, silhouette score (k-means++) is {silhouette_avg}")

    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means++ Clustering with n_clusters = {k}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()
相关推荐
机器学习之心17 小时前
改进模糊C均值时序聚类+编码器状态识别!IPOA-FCM-Transformer组合模型
均值算法·transformer·聚类·ipoa-fcm·改进模糊c均值时序聚类
phoenix@Capricornus18 小时前
K均值(K-Means) & 高斯混合模型(GMM)——K均值是高斯混合模型的特例
机器学习·均值算法·kmeans
正在走向自律2 天前
Conda 完全指南:从环境管理到工具集成
开发语言·python·conda·numpy·fastapi·pip·开发工具
xiaohanbao094 天前
day26 Python 自定义函数
开发语言·python·学习·机器学习·信息可视化·numpy
来自星星的坤5 天前
深入理解 NumPy:Python 科学计算的基石
开发语言·python·numpy
留思难8 天前
Pyhton生活手册-NumPy数据类型:从快递单到智能家居的数据变形术
numpy
绝顶大聪明9 天前
[sklearn机器学习概述]机器学习-part3
人工智能·机器学习·sklearn
灯下夜无眠9 天前
sklearn自定义pipeline的数据处理
人工智能·python·机器学习·pipeline·sklearn
yz1.10 天前
[sklearn] 特征工程
python·机器学习·sklearn
留思难13 天前
Python生活手册-Numpy数组索引:从快递柜到咖啡店的数字化生活指南
python·numpy