2024最新分别利用sklearn和Numpy实现c均值对鸢尾花数据集进行聚类(附完整代码和注释)

C均值聚类算法(K-Means Clustering)是一种非常流行的聚类算法,用于将数据点分成多个簇,使得簇内的点尽可能相似,簇间的点尽可能不同。以下是K-Means算法的基本步骤:

  1. 初始化:随机选择K个点作为初始的簇中心(质心)

  2. 分配:将每个数据点分配到最近的质心所属的簇中。

  3. 更新:计算每个簇中所有点的均值,更新质心为这个均值。

  4. 迭代:重复步骤2和3,直到满足某个终止条件(例如,达到最大迭代次数,或者质心的变化小于某个阈值)。

  5. 终止:当满足终止条件时,算法结束,最终的簇划分就是聚类结果。

sklearn方法

import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler

# 加载鸢尾花数据集
iris = datasets.load_iris()
X = iris.data

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 定义不同的K值
k_values = [2, 3, 4, 5]

# 评估不同K值的聚类效果
for k in k_values:
    kmeans = KMeans(n_clusters=k, random_state=42)
    kmeans.fit(X_scaled)
    labels = kmeans.labels_
    
    # 计算轮廓系数
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, silhouette score is {silhouette_avg}")

    # 可视化聚类效果
    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    centers = kmeans.cluster_centers_
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means Clustering with n_clusters = {k}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()

# 评估不同初始化方法的聚类效果
k = 3
#
init_methods = ['random', 'k-means++']
for init in init_methods:
    kmeans = KMeans(n_clusters=k, init=init, random_state=42)
    kmeans.fit(X_scaled)
    labels = kmeans.labels_
    
    # 计算轮廓系数
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, init method = {init}, silhouette score is {silhouette_avg}")

    # 可视化聚类效果
    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    centers = kmeans.cluster_centers_
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means Clustering with n_clusters = {k}, init method = {init}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()

Numpy方法

import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.preprocessing import StandardScaler

#使用 NumPy 实现 K-Means 算法
def kmeans(X, k, max_iters=100):
    n_samples, n_features = X.shape
    centroids = X[np.random.choice(n_samples, k, replace=False)]
    for _ in range(max_iters):
        distances = np.sqrt((X[:, np.newaxis] - centroids) ** 2).sum(axis=2)
        labels = np.argmin(distances, axis=1)
        new_centroids = np.array([X[labels == i].mean(axis=0) for i in range(k)])
        if np.all(centroids == new_centroids):
            break
        centroids = new_centroids
    return labels, centroids

#加载鸢尾花数据集并对其进行标准化
iris = datasets.load_iris()
X = iris.data
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

#评估不同 c 值的聚类效果
from sklearn.metrics import silhouette_score
k_values = [2, 3, 4]
for k in k_values:
    labels, centers = kmeans(X_scaled, k)
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, silhouette score is {silhouette_avg}")
#可视化每个 k 值的聚类结果。
for k in k_values:
    labels, centers = kmeans(X_scaled, k)
    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means Clustering with n_clusters = {k}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()
    
#评估不同初始化方法对聚类效果的影响
def kmeans_plusplus(X, k, max_iters=100):
    n_samples, n_features = X.shape
    centroids = [X[np.random.choice(n_samples)]]
    for _ in range(1, k):
        distances = np.sqrt((X[:, np.newaxis] - centroids) ** 2).sum(axis=2)
        probabilities = distances.min(axis=1) ** 2
        cumulative_probabilities = probabilities.cumsum()
        r = np.random.rand() * cumulative_probabilities[-1]
        new_centroid_index = np.searchsorted(cumulative_probabilities, r)
        centroids.append(X[new_centroid_index])
    
    centroids = np.array(centroids)
    for _ in range(max_iters):
        distances = np.sqrt((X[:, np.newaxis] - centroids) ** 2).sum(axis=2)
        labels = np.argmin(distances, axis=1)
        new_centroids = np.array([X[labels == i].mean(axis=0) for i in range(k)])
        if np.all(centroids == new_centroids):
            break
        centroids = new_centroids
    return labels, centroids

for k in k_values:
    labels, centers = kmeans_plusplus(X_scaled, k)
    silhouette_avg = silhouette_score(X_scaled, labels)
    print(f"For n_clusters = {k}, silhouette score (k-means++) is {silhouette_avg}")

    plt.figure(figsize=(8, 6))
    plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels, cmap='viridis', marker='o', label='Cluster')
    plt.scatter(centers[:, 0], centers[:, 1], c='red', s=200, alpha=0.75, marker='*', label='Centroids')
    plt.title(f'K-Means++ Clustering with n_clusters = {k}')
    plt.xlabel('Feature 1')
    plt.ylabel('Feature 2')
    plt.legend()
    plt.show()
相关推荐
dundunmm1 小时前
机器学习之scikit-learn(简称 sklearn)
python·算法·机器学习·scikit-learn·sklearn·分类算法
古希腊掌管学习的神1 小时前
[机器学习]sklearn入门指南(1)
人工智能·python·算法·机器学习·sklearn
AI小白白猫20 小时前
20241230 基础数学-线性代数-(1)求解特征值(numpy, scipy)
线性代数·numpy·scipy
NiNg_1_2342 天前
Python的sklearn中的RandomForestRegressor使用详解
开发语言·python·sklearn
捂一捂啊啊4 天前
深度学习中,用损失的均值或者总和反向传播的区别
人工智能·深度学习·均值算法
子晓聊技术4 天前
【Python技术】同花顺wencai涨停分析基础上增加连板分析
后端·python·numpy
zhangfeng11335 天前
反归一化 from sklearn.preprocessing import MinMaxScaler
人工智能·算法·sklearn
奔跑的犀牛先生6 天前
概率论得学习和整理27:关于离散的数组 & 随机变量数组的均值,方差的求法3种公式,思考和细节。
学习·均值算法·概率论
西猫雷婶8 天前
python学opencv|读取图像(九)用numpy创建黑白相间灰度图
python·opencv·numpy
星霜旅人8 天前
Python的基础知识
开发语言·python·numpy