1.实战内容
(1) 加载鸢尾花数据集(iris.txt)并存到iris_df中,使用seaborn.lmplot寻找class(种类)项中的异常值,其他异常值也同时处理 。
python
复制代码
import pandas as pd
from sklearn.datasets import load_iris
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
iris=load_iris()
iris_df = pd.DataFrame(iris['data'], columns=iris['feature_names'])
iris_df['target']=iris['target']
python
复制代码
import pandas as pd
import matplotlib.pyplot as plt
iris_df=pd.read_csv('iris.txt',sep=',')
iris_df
python
复制代码
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
sns.lmplot(x='sepal_length',y='sepal_width',col='class',data=iris_df)
sns.lmplot(x='petal_length',y='petal_width',col='class',data=iris_df)
iris_df['class'].drop_duplicates()
#通过上面的语句,发现class中有异常值,同时发现sepal_width和sepal_length有异常值
python
复制代码
#class应为3类,将versicolor修改为Iris-versicolor,将iris-setossa修改为Iris-setosa
iris_df.loc[iris_df['class']=='versicolor','class']='Iris-versicolor'
iris_df.loc[iris_df['class']=='Iris-setossa','class']='Iris-setosa'
sns.lmplot(x='sepal_length',y='sepal_width',col='class',data=iris_df)#重画,检验是否是3类
python
复制代码
#通过直方图观察数据分布
iris_df.loc[iris_df['class']=='Iris-setosa','sepal_width'].hist()
python
复制代码
# 将Iris-setosa的sepal_width小于2.5cm删除
iris_df=iris_df.loc[(iris_df['class']!='Iris-setosa')|(iris_df['sepal_width']>=2.5)]
iris_df.loc[iris_df['class']=='Iris-setosa','sepal_width'].hist()
python
复制代码
#列出异常值
iris_df.loc[(iris_df['class']=='Iris-versicolor')&(iris_df['sepal_length']<1.0)]
python
复制代码
# 将Iris-versicolor的sepal_length接近于0的异常值乘100,'米'转化成'厘米'
iris_df.loc[(iris_df['class']=='Iris-versicolor')&(iris_df['sepal_length']<1.0),'sepal_length']*= 100
iris_df.loc[iris_df['class']=='Iris-versicolor','sepal_length'].hist()
(2) 使用isnull和describe查看缺失值,并处理
python
复制代码
# 列出缺失的样本
iris_df.isnull().sum()
python
复制代码
iris_df.describe()
python
复制代码
iris_df.loc[iris_df['petal_width'].isnull()]
python
复制代码
#用该类的平均值来填补缺失值,并列出修改过样本
avg_value=iris_df.loc[iris_df['class']=='Iris-setosa','petal_width'].mean()
iris_df.loc[(iris_df['class']=='Iris-setosa')&(iris_df['petal_width'].isnull()), 'petal_width'] = avg_value
iris_df.loc[(iris_df['class']=='Iris-setosa')&(iris_df['petal_width']==avg_value)]
python
复制代码
#检查是否还存在缺失值
iris_df.isnull().sum()
python
复制代码
#将标签名称转化成标签(如:Iris-setosa变成0)
class_mapping={'Iris-setosa':0,'Iris-versicolor':1,'Iris-virginica':2}
iris_df['class']=iris_df['class'].map(class_mapping)
iris_df
python
复制代码
#保存数据
iris_df.to_csv('iris-clean.csv',index=False)
(3) 导入sklearn自带的数据集load_iris,获取特征矩阵和目标数组(标签)
python
复制代码
from sklearn.datasets import load_iris
iris=load_iris()
iris_X=iris.data
iris_Y=iris.target
(4) 使用KNeighborsClassifier()分类预测
python
复制代码
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split,cross_val_score
def knn_function(X,Y):
X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=0.3)
clf=KNeighborsClassifier()#建立模型
clf.fit(X_train,Y_train)#训练模型
predict_test=clf.predict(X_test)
print('预测的值','\n',predict_test)
print('真实的值','\n',Y_test)
score=clf.score(X_test,Y_test,sample_weight=None)#计算准确率
print('准确率','\n',score)
return clf
knn_function(iris_X,iris_Y)
(5) 导入iris_clean.csv,获取特征矩阵和目标数组,调用函数knn_function(),保存模型
python
复制代码
import pandas as pd
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
iris= pd.read_csv('iris-clean.csv')
#获取特征矩阵和目标数组(标签)
iris_XX = iris.loc[0:,'sepal_length':'petal_width'].values
iris_YY = iris['class'].values
#调用函数
knn_model = knn_function(iris_XX,iris_YY)
# 保存模型
with open('knn_model.pkl', 'wb') as f:
pickle.dump(knn_model, f)
# 读取保存模型
with open('knn_model.pkl', 'rb') as f:
model = pickle.load(f)
#模型的表现与训练集的选择关系
model_accuracies = []
for repetition in range(1000):
X_train, X_test, Y_train, Y_test = \
train_test_split(iris_XX, iris_YY, test_size=0.3)
# 通过读取保存模型knn_model.pkl代码,建立模型model
score = model.score(X_test, Y_test, sample_weight=None)
model_accuracies.append(score)
sns.distplot(model_accuracies)
plt.show()
(6) 超参数与调整,以sklearn自带的鸢尾花数据为例,选择KNN模型,调整超参数K的值,用10折交叉验证判断K值为1~25时的最优值
python
复制代码
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
iris = datasets.load_iris()
X = iris.data
Y = iris.target
# 划分训练集和测试集,测试集占总数据的33%,随机数生成器种子为10
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.33,
random_state=10)
k_range = range(1, 26)
cv_scores = []
for n in k_range:
clf = KNeighborsClassifier(n)
scores = cross_val_score(clf, X_train, Y_train, cv=10,scoring='accuracy')
cv_scores.append(scores.mean())
plt.plot(k_range, cv_scores)
plt.xlabel('K')
plt.ylabel('Accuracy')
plt.show()
#选择最优的k
best_clf = KNeighborsClassifier(n_neighbors=5)
best_clf.fit(X_train, Y_train)
print('参数',best_clf.get_params())
print('准确率',best_clf.score(X_test, Y_test))
print('预测的值',best_clf.predict(X_test))
2.数据集下载