活动介绍

import numpy as np import pandas as pd from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from scipy.spatial.distance import cdist import matplotlib.pyplot as plt from pandas import DataFrame from sklearn.decomposition import PCA plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签 plt.rcParams['axes.unicode_minus']=False #用来正常显示负号 pd.set_option('display.max_rows', None)#显示全部行 pd.set_option('display.max_columns', None)#显示全部列 np.set_printoptions(threshold=np.inf) pd.set_option('display.max_columns', 9000) pd.set_option('display.width', 9000) pd.set_option('display.max_colwidth', 9000) df = pd.read_csv(r'附件1.csv',encoding='gbk') X = np.array(df.iloc[:, 1:]) X=X[0:,1:] k=93 kmeans_model = KMeans(n_clusters=k, random_state=123) fit_kmeans = kmeans_model.fit(X) # 模型训练 #查看聚类结果 kmeans_cc = kmeans_model.cluster_centers_ # 聚类中心 print('各类聚类中心为:\n', kmeans_cc) kmeans_labels = kmeans_model.labels_ # 样本的类别标签 print('各样本的类别标签为:\n', kmeans_labels) r1 = pd.Series(kmeans_model.labels_).value_counts() # 统计不同类别样本的数目 print('最终每个类别的数目为:\n', r1) # 输出聚类分群的结果 # cluster_center = pd.DataFrame(kmeans_model.cluster_centers_, # columns=[ str(x) for x in range(1,94)]) # 将聚类中心放在数据框中 # cluster_center.index = pd.DataFrame(kmeans_model.labels_). \ # drop_duplicates().iloc[:, 0] # 将样本类别作为数据框索引 # print(cluster_center)代码解释

时间: 2024-03-26 11:39:19 浏览: 144
这段代码主要是使用KMeans算法对一个数据集进行聚类分析,并输出聚类结果。下面是代码的解释: 1. 导入所需的库 ```python import numpy as np import pandas as pd from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from scipy.spatial.distance import cdist import matplotlib.pyplot as plt from pandas import DataFrame from sklearn.decomposition import PCA ``` 2. 读取数据集 ```python df = pd.read_csv(r'附件1.csv',encoding='gbk') X = np.array(df.iloc[:, 1:]) X=X[0:,1:] ``` 3. 使用KMeans算法进行聚类分析 ```python k=93 kmeans_model = KMeans(n_clusters=k, random_state=123) fit_kmeans = kmeans_model.fit(X) ``` 4. 输出聚类结果 ```python kmeans_cc = kmeans_model.cluster_centers_ print('各类聚类中心为:\n', kmeans_cc) kmeans_labels = kmeans_model.labels_ print('各样本的类别标签为:\n', kmeans_labels) r1 = pd.Series(kmeans_model.labels_).value_counts() print('最终每个类别的数目为:\n', r1) ``` 上述代码中,kmeans_cc表示各个类别的聚类中心,kmeans_labels表示每个样本所属的类别,r1表示每个类别的样本数目。 5. 将聚类中心放在数据框中 ```python # cluster_center = pd.DataFrame(kmeans_model.cluster_centers_, # columns=[ str(x) for x in range(1,94)]) # 将聚类中心放在数据框中 # cluster_center.index = pd.DataFrame(kmeans_model.labels_). \ # drop_duplicates().iloc[:, 0] # 将样本类别作为数据框索引 # print(cluster_center) ``` 这段代码是将聚类中心放在数据框中,并以样本类别作为索引。但是,这段代码被注释掉了,因此不会被执行。
阅读全文

相关推荐

import numpy as np import pandas as pd from sklearn.cluster import KMeans from sklearn.preprocessing import StandardScaler from scipy.spatial.distance import cdist import matplotlib.pyplot as plt # 1. 数据标准化处理 def standardize_data(data): scaler = StandardScaler() X_scaled = scaler.fit_transform(data) return X_scaled, scaler # 2. 执行 K-Means 聚类 def perform_kmeans(X_scaled, k=3): kmeans = KMeans(n_clusters=k, init='k-means++', n_init=10, random_state=None) kmeans.fit(X_scaled) cluster_labels = kmeans.labels_ centroids = kmeans.cluster_centers_ return cluster_labels, centroids, kmeans # 3. 计算每个簇的统计信息 def calculate_cluster_statistics(X_scaled, cluster_labels, centroids): stats = { '观测数量': np.bincount(cluster_labels), '类内平方和': [np.sum((X_scaled[cluster_labels == i] - centroids[i]) ** 2) for i in range(len(centroids))], '平均距离': [np.mean(cdist(X_scaled[cluster_labels == i], [centroids[i]], 'euclidean').flatten()) for i in range(len(centroids))], '最大距离': [np.max(cdist(X_scaled[cluster_labels == i], [centroids[i]], 'euclidean').flatten()) for i in range(len(centroids))] } return stats # 4. 输出结果 def display_results(stats, centroids): stats_df = pd.DataFrame(stats, index=[f'聚类{i + 1}' for i in range(len(centroids))]) print("K 均值聚类分析结果:") print(stats_df) print("\n聚类质心:") for i, centroid in enumerate(centroids): print(f"聚类{i + 1}: {centroid}") print("\n质心之间的距离:") centroid_distances = cdist(centroids, centroids, 'euclidean') print(pd.DataFrame(centroid_distances, index=[f'聚类{i + 1}' for i in range(len(centroids))], columns=[f'聚类{i + 1}' for i in range(len(centroids))]))类内平方和 平均距离 最大距离、质心之间的距离算的都有点问题,给我正确的

import numpy as np import pandas as pd from sklearn.metrics import silhouette_score # 1. K-Means++ 初始化质心 def initialize_centroids(data, k): centroids = [data[np.random.randint(data.shape[0])]] for _ in range(1, k): distances = np.array([min([np.linalg.norm(x - c) for c in centroids]) for x in data]) probabilities = distances / distances.sum() cumulative_probabilities = np.cumsum(probabilities) r = np.random.rand() for i, p in enumerate(cumulative_probabilities): if r < p: centroids.append(data[i]) break return np.array(centroids) # 2. K-Means 核心迭代流程 def k_means(data, k, max_iters=100, tol=1e-4): centroids = initialize_centroids(data, k) for _ in range(max_iters): clusters = [[] for _ in range(k)] for x in data: distances = [np.linalg.norm(x - c) for c in centroids] cluster_index = np.argmin(distances) clusters[cluster_index].append(x) new_centroids = np.array([np.mean(cluster, axis=0) if cluster else centroids[i] for i, cluster in enumerate(clusters)]) if np.all(np.abs(new_centroids - centroids) < tol): break centroids = new_centroids return centroids, clusters # 3. 验证方法:轮廓系数 def silhouette_analysis(data, clusters): labels = np.zeros(data.shape[0], dtype=int) idx = 0 for i, cluster in enumerate(clusters): for _ in cluster: labels[idx] = i idx += 1 silhouette_avg = silhouette_score(data, labels) return silhouette_avg # 4. 主函数 def main(): # 加载数据 file_path = "C:\\Users\\刘梦雅\\Desktop\\2.xlsx " # 您的数据文件路径 data = pd.read_excel(file_path, engine='openpyxl').values k = 2 # 聚类数 max_iters = 100 tol = 1e-4 # 运行 K-Means centroids, clusters = k_means(data, k, max_iters, tol)根据上面的内容,给我K 均值聚类分析: C1, C2 标准化变量 最终分割 聚类数 3 观测值 类内平 到质心的 到质心的 个数 方和 平均距离 最大距离 聚类1 这种

doc