python
复制代码
# 导库
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# SVM分类和回归库
from sklearn.svm import SVC,SVR
# 生成二分类数据
from sklearn.datasets import make_blobs
data,target = make_blobs(centers=2)
plt.scatter(data[:,0],data[:,1],c=target)
# 训练数据
# pd.DataFrame(data).head()
# data.shape,target.shape
sv = SVC(C=1,kernel='linear')
'''
SVC当中的参数说明:
1、C:越大表示约分类越严格,对于一些噪声就不%%sh略,可能会导致分类效果差
2、kernel:核函数,一般选默认的rbf,建议使用默认
'''
sv.fit(data,target)
# 画出通过SVM训练之后的最优分界线
# w1*x1+w2*x2+b=0
# 首先获取斜率和截距
# sv.coef_[0] # array([-0.69045562, -0.92961922])
# sv.coef_.shape # (1, 2)
w1,w2 = sv.coef_[0]
b = sv.intercept_[0]
# 画分界线
plt.scatter(data[:,0],data[:,1],c=target)
x1 = np.linspace(-7,-2,100)
x2 = -1 * (w1*x1+b)/w2
plt.plot(x1,x2,c='r')
# 画支持向量
x1_s = sv.support_vectors_[:,0]
x2_s = sv.support_vectors_[:,1]
plt.scatter(x1_s,x2_s,s=200,alpha=0.3,c='b')
# 画支持向量对应的直线,用虚线表示
b1 = -1*(w1*x1_s[1]+w2*x2_s[1])
b2 = -1*(w1*x1_s[2]+w2*x2_s[2])
x2_1 = -1 * (w1*x1+b1)/w2
x2_2 = -1 * (w1*x1+b2)/w2
plt.plot(x1,x2_1,linestyle='--',linewidth=5,c='r')
plt.plot(x1,x2_2,linestyle='--',linewidth=5,c='r')
python
复制代码
# 导包
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.svm import SVR
# 制作训练数据
x = np.random.random(150)*10
y = np.sin(x)
# 添加噪声
y[::5] += np.random.randn(30)*0.1
plt.scatter(x,y)
# 制作测试数据
x_test = np.linspace(0,10,100)
# 训练数据
sv_line = SVR(kernel='linear')
sv_line.fit(x.reshape(-1,1),y)
y_line_pred = sv_line.predict(x_test.reshape(-1,1))
sv_poly = SVR(kernel='poly')
sv_poly.fit(x.reshape(-1,1),y)
y_poly_pred = sv_poly.predict(x_test.reshape(-1,1))
sv_rbf = SVR(kernel='rbf')
sv_rbf.fit(x.reshape(-1,1),y)
y_rbf_pred = sv_rbf.predict(x_test.reshape(-1,1))
# 画图
plt.scatter(x,y)
plt.plot(x_test,y_line_pred,label='line',c='r')
plt.plot(x_test,y_poly_pred,label='ploy')
plt.plot(x_test,y_rbf_pred,label='rbf')
plt.legend(loc='lower left')