交叉驗證
主要是針對準確率估計,提高對預測準確度的正確估計
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.cross_validation import cross_val_score
import matplotlib.pyplot as plt
#導入的訓練用的數據
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train,X_test,y_train,y_test = train_test_split(X,y,random_state = 4)
knn = KNeighborsClassifier(n_neighbors = 5)
knn.fit(X_train,y_train)
scores = cross_val_score(knn,X,y,cv = 5,scoring = 'accuracy')
#scores得到的是一個list
#其中cv表示將整個數據按5次進行計算準確率
#每一次的準確率都是不同的test_data 與 train_data
print(scores.mean())
#求出平均數
k_range = range(1,31)
k_scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k)
loss = -cross_val_score(knn,X,y,cv = 10,scoring = 'mean_squared_error')
#scores = cross_val_score(knn,X,y,cv = 10,scoring = 'accuracy')
k_scores.append(loss.mean())
#k_scores.append(scores.mean())
plt.plot(k_range,k_scores)
plt.show()
可以根據不同的 knn = KNeighborsClassifier(n_neighbors = k) k值的選擇選擇出
最好的(準確率最高的)k值作為屬性