python
复制代码
from sklearn import neighbors
# 分类
# 创建KNN分类器模型并进行训练
classifier = neighbors.KNeighborsClassifier(num_neighbors, weights='distance')
classifier.fit(X, y)
test_datapoint = [4.5, 3.6]
# 提取KNN分类结果
dist, indices = classifier.kneighbors([test_datapoint])
# 回归
# 定义并训练回归器
knn_regressor = neighbors.KNeighborsRegressor(n_neighbors, weights='distance')
# 预测
y_values = knn_regressor.fit(X, y).predict(x_values)
python
复制代码
# -*- coding: UTF-8 -*-
import sys
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
# 数据准备
X = [1,2,3,4,5,6,7,8,9,10]
y = [22,22,23,24,25,27,27,30,29,30]
# 取80%的样本作为训练数据
num_training = int(0.8 * len(X))
num_test = len(X) - num_training
# 训练数据 80% reshape:(行数,列数)
# 行数:样本数
# 列数:特征数
X_train = np.array(X[:num_training]).reshape((num_training,1))
y_train = np.array(y[:num_training])
# 测试数据 20%
X_test = np.array(X[num_training:]).reshape((num_test,1))
y_test = np.array(y[num_training:])
# 创建线性回归对象
linear_regressor = linear_model.LinearRegression()
# 用训练数据集训练模型
linear_regressor.fit(X_train, y_train)
# 用训练好的模型预测测试数据集
# 预测数据
y_test_pred = linear_regressor.predict(X_test)
print('测试数据集的预测结果:',y_test_pred)
# 创建一个空白的窗口
plt.figure()
# 画出训练数据 散点图 -- 测试数据实际值
plt.scatter(X_test, y_test, color='green')
# 画出预测数据 折线图
plt.plot(X_test, y_test_pred, color='black', linewidth=4)
plt.title('Training data')
plt.show()