https://tianchi.aliyun.com/competition/entrance/231702/rankingList
一、數據和模型初探
1.1 數據預處理
# coding=utf-8
import pandas as pd
import numpy as np
from sklearn import preprocessing
df=pd.read_csv(r'/Users/ranmo/Desktop/天池/幸福感/happiness_train_complete.csv',encoding='GB2312',index_col='id')
df = df[df["happiness"]>0] #原表中幸福度非正的都是錯誤數據,可以剔除12條錯誤數據
df.dtypes[df.dtypes==object] #查得有四列不是數據類型,需要對其進行轉化
for i in range(df.dtypes[df.dtypes==object].shape[0]):
print(df.dtypes[df.dtypes==object].index[i])
#轉化四列數據,轉換后df全為數值格式
df["survey_month"] = df["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[1]).astype("int64") #返回調查月:用空格來切分日期和時間,日期中第1項為月
df["survey_day"] = df["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[2]).astype("int64") #返回調查日
df["survey_hour"] = df["survey_time"].transform(lambda line:line.split(" ")[1].split(":")[0]).astype("int64") #返回調查小時
df=df.drop(columns='survey_time')
enc1=preprocessing.OrdinalEncoder()
enc2=preprocessing.OrdinalEncoder()
enc3=preprocessing.OrdinalEncoder()
df['edu_other']=enc1.fit_transform(df['edu_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))
print(enc.categories_) #查看編碼類型
df['property_other']=enc2.fit_transform(df['property_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))
print(enc.categories_) #查看編碼類型
df['invest_other']=enc3.fit_transform(df['invest_other'].fillna(0).transform(lambda x:str(x)).values.reshape(-1,1))
print(enc.categories_) #查看編碼類型
#確定X和Y
X=df.drop(columns='happiness').fillna(0)
Y=df.happiness
1.2 基本模型跑一遍看效果
- 線性回歸
from sklearn import metrics
from sklearn import linear_model
from sklearn import model_selection
#=============
#1、線性回歸
#=============
#=============
#1.1、普通線性回歸
#=============
reg11=linear_model.LinearRegression()
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
#mes1是未取整,mes2是四舍五入取整,mes3是向下取整,mes4是向上取整
mes1=[]
mes2=[]
mes3=[]
mes4=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=reg1.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
e2=metrics.mean_squared_error(np.round(y_pred),y_test)
e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)
e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)
mes1.append(e1)
mes2.append(e2)
mes3.append(e3)
mes4.append(e4)
print('normal_liner:')
print(mes1)
print(np.mean(mes1))
print('-------------')
print(mes2)
print(np.mean(mes2))
print('-------------')
print(mes3)
print(np.mean(mes3))
print('-------------')
print(mes4)
print(np.mean(mes4))
print()
print()
#表明幾種取整的方案都不是很好,不如回歸的效果,但是回歸的非整數也不滿足目標值需求,因此要考慮分類
#=============
#1.2、L1的lasso回歸
#=============
reg12=linear_model.Lasso()
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
#mes1是未取整,mes2是四舍五入取整,mes3是向下取整,mes4是向上取整
mes1=[]
mes2=[]
mes3=[]
mes4=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=reg2.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
e2=metrics.mean_squared_error(np.round(y_pred),y_test)
e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)
e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)
mes1.append(e1)
mes2.append(e2)
mes3.append(e3)
mes4.append(e4)
print('Lasso:')
print(mes1)
print(np.mean(mes1))
print('-------------')
print(mes2)
print(np.mean(mes2))
print('-------------')
print(mes3)
print(np.mean(mes3))
print('-------------')
print(mes4)
print(np.mean(mes4))
print()
print()
#=============
#1.3、L2的嶺回歸
#=============
reg13=linear_model.Ridge()
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
#mes1是未取整,mes2是四舍五入取整,mes3是向下取整,mes4是向上取整
mes1=[]
mes2=[]
mes3=[]
mes4=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=reg3.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
e2=metrics.mean_squared_error(np.round(y_pred),y_test)
e3=metrics.mean_squared_error(np.trunc(y_pred),y_test)
e4=metrics.mean_squared_error(np.ceil(y_pred),y_test)
mes1.append(e1)
mes2.append(e2)
mes3.append(e3)
mes4.append(e4)
print('Ridge:')
print(mes1)
print(np.mean(mes1))
print('-------------')
print(mes2)
print(np.mean(mes2))
print('-------------')
print(mes3)
print(np.mean(mes3))
print('-------------')
print(mes4)
print(np.mean(mes4))
print()
print()
#=============
#1.4、邏輯回歸
#=============
clf14=linear_model.LogisticRegression(penalty='none',solver='saga') #正則會導致準確率下降,所以不正則
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes1=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=reg3.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes1.append(e1)
print('LR:')
print(mes1)
print(np.mean(mes1))
print()
print()
#結論:普通二乘回歸和邏輯回歸效果最好
- SVM
from sklearn import metrics
from sklearn import svm
from sklearn import model_selection
#=============
#2、SVM
#=============
clf2=svm.SVC() #gamma和C都是默認值,沒有調參
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf2.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('SVM:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果很一般
- KNN
from sklearn import metrics
from sklearn import neighbors
from sklearn import model_selection
#=============
#3、KNN
#=============
for n in range(10,101,10): #K值肯定會造成影響
clf3=neighbors.KNeighborsClassifier(n_neighbors=n)
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf3.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('KNN(n=%d):'%n)
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果很一般
- naive_bayes
from sklearn import metrics
from sklearn import naive_bayes
from sklearn import model_selection
X_new=X # 本來想標準化,但發現標準化后的效果更差,所以就沒有標準化
#=============
#4、樸素貝葉斯
#=============
clf4=naive_bayes.GaussianNB() #多想分布樸素貝葉斯跑不通,必須是正定矩陣什么的,所以這里用的高斯
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X_new.iloc[train]
y_train = Y.iloc[train]
X_test = X_new.iloc[test]
y_test = Y.iloc[test]
y_pred=clf4.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('bayes:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果很差,說明確實不適合用高斯貝葉斯,如果用多項式貝葉斯想過可能會更好
- 決策樹
from sklearn import metrics
from sklearn import tree
from sklearn import model_selection
#=============
#5、決策樹
#=============
clf5=tree.DecisionTreeClassifier()
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf5.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果很差
- MLP
from sklearn import metrics
from sklearn import neural_network
from sklearn import model_selection
#=============
#6、MLP
#=============
clf6=neural_network.MLPClassifier(hidden_layer_sizes=(10,8,5,3,2),activation='logistic') #隨意設置下隱藏層構成
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf6.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果竟然還可以,之后可以考慮利用神經網絡調參
- 隨機森林
from sklearn import metrics
from sklearn import ensemble
from sklearn import model_selection
#=============
#7、隨機森林
#=============
clf7=ensemble.RandomForestRegressor(n_estimators=20,n_jobs=-1)
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf7.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果一般,之后考慮調參
#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline
a=ensemble.RandomForestRegressor(n_estimators=20).fit(X,Y).feature_importances_
temp=np.argsort(a) #返回index
a=list(a)
a.sort()
b=[]
for i in temp:
b.append(X.columns[i])
plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)
#參數結論:
# 1、edu_other、property_other、invest_other這三項轉換數據都不太重要,而且property、invest的各項數據似乎都不重要
# 2、前十項中equity、depresion反映社會態度和心態;
# class、family_income、floor_area反映財富;
# birth、marital_1st、weight_jin、country反映客觀狀態
# survey_day為什么也會有影響,這是一個最有疑問的指標
- gdbt
from sklearn import metrics
from sklearn import ensemble
from sklearn import model_selection
#=============
#8、gdbt
#=============
clf8=ensemble.GradientBoostingRegressor(max_features=20) #必須要設置參數,不然跑太慢了
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf8.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果挺好
#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline
a=ensemble.GradientBoostingClassifier().fit(X,Y).feature_importances_
temp=np.argsort(a) #返回index
a=list(a)
a.sort()
b=[]
for i in temp:
b.append(X.columns[i])
plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)
- xgboost
from sklearn import metrics
import xgboost
from sklearn import model_selection
#=============
#9、xgboost
#=============
clf9=xgboost.XGBRegressor()
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf9.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:簡直無語,一來就取得這么好的效果。。。。。
#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline
a=xgboost.XGBRegressor().fit(X,Y).feature_importances_
temp=np.argsort(a) #返回index
a=list(a)
a.sort()
b=[]
for i in temp:
b.append(X.columns[i])
plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)
- lightgbm
from sklearn import metrics
import lightgbm
from sklearn import model_selection
#lighgbm防報錯
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
#=============
#10、LightGBM
#=============
clf10=lightgbm.LGBMRegressor()
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test = X.iloc[test]
y_test = Y.iloc[test]
y_pred=clf10.fit(X_train,y_train).predict(X_test)
e1=metrics.mean_squared_error(y_pred,y_test)
mes.append(e1)
print('Tree:')
print(mes)
print(np.mean(mes))
print()
print()
#結論:效果也很好,之后再調參。。。。。
#=============
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline
a=lightgbm.LGBMRegressor().fit(X,Y).feature_importances_
temp=np.argsort(a) #返回index
a=list(a)
a.sort()
b=[]
for i in temp:
b.append(X.columns[i])
plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,a,)
1.3 分析結果
從模型結果來看,gdbt、xgboost和lightgbm的效果都很好,隨機森林效果很一般,二乘回歸和lR的效果也不錯,之后考慮利用xgboost、lightgbm、gdbt和隨機森林調參增強模型,然后還可以用LR進一步融合模型。
采用基本xgboost模型提交結果,原始數據結果得分為0.48043,四舍五入得分為0.55394。。。。
df1=pd.read_csv(r'/Users/ranmo/Desktop/天池/幸福感/happiness_test_complete.csv',encoding='GB2312',index_col='id')
#轉化四列數據,轉換后df全為數值格式
df1["survey_month"] = df1["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[1]).astype("int64") #返回調查月:用空格來切分日期和時間,日期中第1項為月
df1["survey_day"] = df1["survey_time"].transform(lambda line:line.split(" ")[0].split("/")[2]).astype("int64") #返回調查日
df1["survey_hour"] = df1["survey_time"].transform(lambda line:line.split(" ")[1].split(":")[0]).astype("int64") #返回調查小時
df1=df1.drop(columns='survey_time')
def temp1(a):
if a not in enc1.categories_[0]:
return 0
else:
return a
df1['edu_other']=enc1.transform(df1['edu_other'].transform(temp1).transform(lambda x:str(x)).values.reshape(-1,1))
def temp2(a):
if a not in enc2.categories_[0]:
return 0
else:
return a
df1['property_other']=enc2.transform(df1['property_other'].transform(temp2).transform(lambda x:str(x)).values.reshape(-1,1))
def temp3(a):
if a not in enc3.categories_[0]:
return 0
else:
return a
df1['invest_other']=enc3.transform(df1['invest_other'].transform(temp2).transform(lambda x:str(x)).values.reshape(-1,1))
#確定X_test
X_test=df1.fillna(0)
# 結果1
y_test=xgboost.XGBRegressor().fit(X,Y).predict(X_test)
df1_final=pd.DataFrame({'id':X_test.index,'happiness':y_test}).set_index('id')
df1_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/df1_final.csv')
# 結果1四舍五入
df1_final_round=pd.DataFrame({'id':X_test.index,'happiness':np.round(y_test)}).set_index('id')
df1_final_round.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/df1_final.csv')
二、超參數搜索
2.1 xgboost
參考https://blog.csdn.net/han_xiaoyang/article/details/52665396
xgboost的參數包括:
- max_depth,這個參數的取值最好在3-10之間。
- min_child_weight,了葉子節點中,樣本的權重之和,如果在一次分裂中,葉子結點上所有樣本的權重和小于min_child_weight則停止分裂,能夠有效的防止過擬合,防止學到特殊樣本,默認設置為1。
- gamma,繼續分類的損失函數最小的減少值。 起始值一般比較小,0~0.2之間就可以。
- subsample, colsample_bytree:subsample是構建每棵樹時對樣本進行采樣的比例,colsample_bytree是構建每科樹是對樣本特征的進行采樣的比例,典型值的范圍在0.5-0.9之間,設置得小容易造成欠擬合。
- scale_pos_weight: 用來解決類別不平衡問題,加快收斂(調整不同樣本的學習率),具體原理沒有研究,所以也不用管。
- reg_alpha
- reg_lambda
等等。
2.1.1 初始化參數
#直接按初始參數跑基本模型
clf9=xgboost.XGBRegressor(loss_function='RMSE')
clf=model_selection.GridSearchCV(clf9,{'max_depth':np.array([3])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計算score
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
ps:雖然cv的scoring如果不設置默認是采用訓練模型所采用的score方式,但這里不設置的話結果不對,umm。??赡苁莤gb的默認score不是rmse吧。。。
2.1.2 max_depth 和 min_weight 參數調優
#粗調max_depth 和 min_weight
param_test = {
'max_depth':range(1,10,2),
'min_child_weight':range(1,6,2)
}
clf=model_selection.GridSearchCV(clf9,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print()
print(clf.best_params_)
最優結果基礎上,拓展范圍進行精調
#精調max_depth 和 min_weight
param_test = {
'max_depth':[4,5,6],
'min_child_weight':[4,5,6]
}
clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print(clf.best_score_)
print(clf.best_params_)
較原始的0.47178有較好下降。
2.1.3 gamma參數調優
#粗調gamma
#粗調gamma
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.arange(0,0.5,0.1)
}
clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
最優結果是初始化參數,所以不用調整。(但為什么結果又下降了???)
2.1.4 調整subsample 和 colsample_bytree 參數
#粗調subsample 和 colsample_bytree
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.arange(0.6,1,0.1),
'colsample_bytree':np.arange(0.6,1,0.1)
}
clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
最優參數在0.9和0.8,進行精調
#精調subsample 和 colsample_bytree
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.arange(0.75,0.86,0.05),
'colsample_bytree':np.arange(0.75,0.86,0.05)
}
clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
精調尋優的結果是0.75和0.8。(score竟然上漲了??)
2.1.5 正則參數尋優
#粗調reg_alpha和reg_lambda
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.array([0.8]),
'colsample_bytree':np.array([0.75]),
'reg_alpha':[1e-5, 1e-2, 0.1, 1, 100],
'reg_lambda':[1e-5, 1e-2, 0.1, 1, 100]
}
clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
在1,0.1附近搜索下是否有更好的參數。
#精調reg_alpha和reg_lambda
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.array([0.8]),
'colsample_bytree':np.array([0.75]),
'reg_alpha':[0,0.5,1,2,5],
'reg_lambda':[0,0.05,0.1,0.2,0.5]
}
clf=model_selection.GridSearchCV(clf9,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
最優參數5,0.1。
2.1.6 低學習速率、多樹調試最終結果
#調試最終結果
param_test = {
'max_depth':np.array([4]),
'min_child_weight':np.array([5]),
'gamma':np.array([0]),
'subsample':np.array([0.8]),
'colsample_bytree':np.array([0.75]),
'reg_alpha':np.array([5]),
'reg_lambda':np.array([0.1]) ,
'learning_rate':np.array([0.01]),
'n_estimators':np.array([5000]),
}
clf9=xgboost.XGBRegressor(loss_function='RMSE')
clf=model_selection.GridSearchCV(clf9,{'max_depth':np.array([3])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計算score
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_train_score']:=%s"%clf.cv_results_['mean_train_score'])
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
從數據上來看,效果確實提升不少。由初始的0.47178降為0.46099。不過降低學習率并且增加樹的數量后,模型明顯變慢,同時在成績取得上還降低了(可能導致過擬合,或者說本身小幅度的提升或者降低都是很正常的),因此這里實際跑模型并沒有采用低學習率和多數的結構。
# 結果2
from sklearn import metrics
import xgboost
from sklearn import model_selection
from sklearn.externals import joblib
#=============
#xgboost_modified
#=============
clf_xgboost_modified=xgboost.XGBRegressor(max_depth=4,min_child_weight=5,gamma=0,subsample=0.8,colsample_bytree=0.75,reg_alpha=5,reg_lambda=0.1)
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
mes=[]
i=0
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train = X.iloc[train]
y_train = Y.iloc[train]
X_test1 = X.iloc[test]
y_test1 = Y.iloc[test]
clf_xgboost_modified.fit(X_train,y_train)
y_pred=clf_xgboost_modified.predict(X_test1)
e1=metrics.mean_squared_error(y_pred,y_test1)
mes.append(e1)
joblib.dump(clf_xgboost_modified,filename='/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
y_test=clf_xgboost_modified.predict(X_test)
df2_final=pd.DataFrame({'id':X_test.index,'happiness':y_test}).set_index('id')
df2_final.to_csv('/Users/ranmo/Desktop/天池/幸福感/xgboost/df2_xgboost_%d.csv'%i)
i+=1
print('clf_xgboost_modified:')
print(mes)
print(np.mean(mes))
print()
print()
最佳成績為0.47675。
2.2 lightgbm
2.2.1 初始化參數
默認參數:
#直接按初始參數跑基本模型
clf10=lightgbm.LGBMRegressor(metric='l2') #默認default={l2 for regression}
clf=model_selection.GridSearchCV(clf10,{'max_depth':np.array([-1])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計算score
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
2.2.2 尋優結果
超參數搜索本質上可以按照xgboost那一套。但涉及到lightGBM自身的優化機制,有額外的參數需要設置:
- max_depth:相互對應
- num_leaves:xgboost里面也有一樣功能的max_leaf_nodes(葉節點的最大數目),但是xgb中兩個參數只能二選一(好像是按二叉樹展開,所以兩者有恒定關系:n平方),因此xgb不用設定。但是lgbm中因為算法機制還需要額外進行優化(優化后肯定是小于n平方),所以要配合max_depth同時設置num_leaves。
- min_child_samples:葉節點所需最小樣本數目,xgb沒有這個參數。
- min_child_weight:一樣,葉節點所需最小樣本權重。
- min_split_gain對應:一樣,葉節點所需最小損失減少。
- subsample:相互對應
- colsample_bytree:相互對應
- subsample_freq:lgbm獨有的,配合subsample進行,是進行subsample的頻率,默認為1,即每一次都subsample,如果為0,則每次都不進行subsample而采用全采樣,為k則是每k次進行一次subsample。
- reg_alpha、reg_lambda、learning_rate、n_estimators都是通用。
理論上lgbm需要尋優的參數更多,但每次尋優運行更快,所以尋有效率還行。。
https://www.imooc.com/article/43784?block_id=tuijian_wz
#調試最終結果
clf10=lightgbm.LGBMRegressor(metric='l2') #默認default={l2 for regression}
param_test = {
'max_depth':np.array([9]),
'min_child_weight':np.array([0.0001]),
'min_split_gain':np.array([0.4]),
'subsample':np.array([0.5]),
'colsample_bytree':np.array([1]),
'reg_alpha':np.array([1e-05]),
'reg_lambda':np.array([0.0001]) ,
'learning_rate':np.array([0.1]),
}
clf=model_selection.GridSearchCV(clf10,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
# 結論:{'colsample_bytree': 1, 'learning_rate': 0.1, 'max_depth': 9, 'min_child_weight': 0.0001, 'min_split_gain': 0.4, 'reg_alpha': 1e-05, 'reg_lambda': 0.0001, 'subsample': 0.5}
rmse由0.47728降為0.47000.
用lightgbm優化后的模型提交成績后,最優成績為0.48128。
2.3 gdbt
https://blog.csdn.net/manjhok/article/details/82017696
2.3.1 初始化參數
默認參數:
#=============
#GDBT_modified
#=============
#直接按初始參數跑基本模型
clf8=ensemble.GradientBoostingRegressor(loss='ls')
clf=model_selection.GridSearchCV(clf8,{'max_depth':np.array([3])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計算score
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
2.3.2 尋優結果
超參數搜索都是差不多的,名稱上有差異。
#調試最終結果
clf8=ensemble.GradientBoostingRegressor(loss='ls')
param_test = {
'max_depth':np.array([2]),
'min_weight_fraction_leaf':np.array([0.002]),
'min_impurity_split':np.array([0.0001]),
'subsample':np.array([0.96]),
'max_features':np.array([0.88]),
'n_estimators':np.array([80]),
'learning_rate':np.array([0.2]),
}
clf=model_selection.GridSearchCV(clf8,param_test,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
# 結論:{'colsample_bytree': 1, 'learning_rate': 0.1, 'max_depth': 9, 'min_child_weight': 0.0001, 'min_split_gain': 0.4, 'reg_alpha': 1e-05, 'reg_lambda': 0.0001, 'subsample': 0.5}
rmse由0.47534降為0.47148.
用gbdt優化后的模型提交成績后,最優成績為0.48317。
2.4 隨機森林
https://blog.csdn.net/u012559520/article/details/77336098
2.4.1 初始化參數
#=============
#RandomForest_modified
#=============
#直接按初始參數跑基本模型
clf7=ensemble.RandomForestRegressor(criterion='mse',n_jobs=-1)
clf=model_selection.GridSearchCV(clf7,{'min_samples_split':np.array([2])},cv=10,n_jobs=-1,scoring='neg_mean_squared_error') #用均方差計算score
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
2.4.2 尋優參數
#調試最終結果
param_test = {
'min_samples_split':np.array([4]),
'min_weight_fraction_leaf':np.array([0.01]),
'min_impurity_decrease':np.array([0]),
'n_estimators':[150],
'max_features':[0.8], #隨機森林的話這個不能太高吧
}
clf=model_selection.GridSearchCV(clf7,param_test ,cv=10,n_jobs=-1,scoring='neg_mean_squared_error')
clf.fit(X_train,y_train)
print("clf.cv_results_['mean_test_score']:=%s"%clf.cv_results_['mean_test_score'])
print(clf.best_score_)
print(clf.best_params_)
# 結論:{'max_features': 0.8, 'min_impurity_decrease': 0, 'min_samples_split': 4, 'min_weight_fraction_leaf': 0.01, 'n_estimators': 150}
rmse由0.53373降為0.48867
用rf優化后的模型提交成績后,最優成績為0.51088。
三、模型融合
3.1 平均融合xgboost + lightgbm + gdbt現有模型
#平均融合xgboost + lightgbm + gdbt現有模型
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
mix_mes=[]
i=0
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train1 = X_train.iloc[train]
y_train1 = y_train.iloc[train]
X_test1 = X_train.iloc[test]
y_test1 = y_train.iloc[test]
xgboost=joblib.load(r'C:\Users\sunsharp\Desktop\學習\幸福感\xgboost\xgboost_%d.pkl'%i)
lightgbm=joblib.load(r'C:\Users\sunsharp\Desktop\學習\幸福感\lightgbm\lightgbm_%d.pkl'%i)
gdbt=joblib.load(r'C:\Users\sunsharp\Desktop\學習\幸福感\gdbt\gdbt_%d.pkl'%i)
xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
mix_y_pred=(xgboost_y_pred+lightgbm_y_pred+gdbt_y_pred)/3
xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
mix_mes.append(metrics.mean_squared_error(mix_y_pred,y_test1))
xgboost_y_test=xgboost.predict(X_test)
lightgbm_y_test=lightgbm.predict(X_test)
gdbt_y_test=gdbt.predict(X_test)
mix_y_test=(xgboost_y_test+lightgbm_y_test+gdbt_y_test)/3
df_mix_final=pd.DataFrame({'id':X_test.index,'happiness':mix_y_test}).set_index('id')
df_mix_final.to_csv(r'C:\Users\sunsharp\Desktop\學習\幸福感\mixmodel\df_mix_%d.csv'%i)
i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('mix:')
print(mix_mes)
print(np.mean(mix_mes))
print()
從訓練集結果上看,結果有輕微提升。
用融合模型提交成績后,最優成績為0.47104。
3.2 線性回歸融合xgboost + lightgbm + gdbt
#LR融合xgboost + lightgbm + gdbt現有模型
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報錯
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble
from sklearn import linear_model
xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
lrmix_mes=[]
i=0
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train1 = X_train.iloc[train]
y_train1 = y_train.iloc[train]
X_test1 = X_train.iloc[test]
y_test1 = y_train.iloc[test]
print(i)
xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/lightgbm/lightgbm_%d.pkl'%i)
gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/gdbt/gdbt_%d.pkl'%i)
xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
#訓練融合模型
a=xgboost.fit(X_train1,y_train1).predict(X_train1)
b=lightgbm.fit(X_train1,y_train1).predict(X_train1)
c=gdbt.fit(X_train1,y_train1).predict(X_train1)
lr_mix=linear_model.LinearRegression().fit(np.array([a,b,c]).T,y_train1)
lrmix_y_pred=lr_mix.predict(np.array([xgboost_y_pred,lightgbm_y_pred,gdbt_y_pred]).T)
xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
lrmix_mes.append(metrics.mean_squared_error(lrmix_y_pred,y_test1))
xgboost_y_test=xgboost.predict(X_test)
lightgbm_y_test=lightgbm.predict(X_test)
gdbt_y_test=gdbt.predict(X_test)
lrmix_y_test=lr_mix.predict(np.array([xgboost_y_test,lightgbm_y_test,gdbt_y_test]).T)
df_lrmix_final=pd.DataFrame({'id':X_test.index,'happiness':lrmix_y_test}).set_index('id')
df_lrmix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/lrmixmodel/df_lrmix_%d.csv'%i)
i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('lrmix:')
print(mix_mes)
print(np.mean(lrmix_mes))
print()
效果不是很理想。不理想的原因是因為對訓練集再做回歸融合(訓練集的成績能夠達到0.15),雖然能夠提升訓練集模型精度,但是是過擬合,然后在測試集中就不能取得很好的效果。。。
3.3 加權融合xgboost + lightgbm + gdbt
因為平均融合效果好,而回歸融合過擬合,但是查看了回歸模型的系數,和比較接近于1,因此考慮將三者模型進行加權融合(權重和為1)。
a=np.arange(0,1.1,0.05)
b=np.arange(0,1.1,0.05)
c=np.arange(0,1.1,0.05)
coef_list=[]
for i in a:
for j in b:
for k in c:
if i+j+k==1:
coef_list.append([i,j,k])
#加權融合xgboost + lightgbm + gdbt現有模型
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報錯
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble
xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
weightmix_mes=[]
i=0
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train1 = X_train.iloc[train]
y_train1 = y_train.iloc[train]
X_test1 = X_train.iloc[test]
y_test1 = y_train.iloc[test]
print(i)
xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/lightgbm/lightgbm_%d.pkl'%i)
gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/gdbt/gdbt_%d.pkl'%i)
xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
#訓練融合模型
error_list=[]
for coef_i in coef_list:
error_list.append(metrics.mean_squared_error(np.dot(np.array([xgboost_y_pred,lightgbm_y_pred,gdbt_y_pred]).T,coef_i),y_test1))
coef=temp[np.argmin(error_list)]
xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
weightmix_mes.append(min(error_list))
xgboost_y_test=xgboost.predict(X_test)
lightgbm_y_test=lightgbm.predict(X_test)
gdbt_y_test=gdbt.predict(X_test)
weightmix_y_test=np.dot(np.array([xgboost_y_test,lightgbm_y_test,gdbt_y_test]).T,coef)
df_weightmix_final=pd.DataFrame({'id':X_test.index,'happiness':weightmix_y_test}).set_index('id')
df_weightmix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/weightmixmodel/df_weightmix_%d.csv'%i)
i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('weightmix:')
print(weightmix_mes)
print(np.mean(weightmix_mes))
print()
模型結果有輕微提升。實際最優成績為0.47531。
3.4 神經網絡融合xgboost + lightgbm + gdbt
#神經網絡融合xgboost + lightgbm + gdbt現有模型
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報錯
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble
from sklearn import neural_network
xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
MLPmix_mes=[]
i=0
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X):
X_train1 = X_train.iloc[train]
y_train1 = y_train.iloc[train]
X_test1 = X_train.iloc[test]
y_test1 = y_train.iloc[test]
print(i)
xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/xgboost/xgboost_%d.pkl'%i)
lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/lightgbm/lightgbm_%d.pkl'%i)
gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/gdbt/gdbt_%d.pkl'%i)
xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
#訓練融合模型
a=xgboost.fit(X_train1,y_train1).predict(X_train1)
b=lightgbm.fit(X_train1,y_train1).predict(X_train1)
c=gdbt.fit(X_train1,y_train1).predict(X_train1)
MLP_mix=neural_network.MLPClassifier(hidden_layer_sizes=(5,3,2),activation='logistic').fit(np.array([a,b,c]).T,y_train1)
MLPmix_y_pred=MLP_mix.predict(np.array([xgboost_y_pred,lightgbm_y_pred,gdbt_y_pred]).T)
xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
MLPmix_mes.append(metrics.mean_squared_error(MLPmix_y_pred,y_test1))
xgboost_y_test=xgboost.predict(X_test)
lightgbm_y_test=lightgbm.predict(X_test)
gdbt_y_test=gdbt.predict(X_test)
MLPmix_y_test=MLP_mix.predict(np.array([xgboost_y_test,lightgbm_y_test,gdbt_y_test]).T)
df_MLPmix_final=pd.DataFrame({'id':X_test.index,'happiness':MLPmix_y_test}).set_index('id')
df_MLPmix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/MLPmixmodel/df_MLPmix_%d.csv'%i)
i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('MLPmix:')
print(mix_mes)
print(np.mean(MLPmix_mes))
print()
效果也不理想。
四、簡單特征工程
本來這部分工作應該是在建模之前做的,但是現在的集成算法已經能夠很好地尋找重要特征,并且減小非重要特征的權重,所以大大減少了尋找特征工程的工作量。但是另一方面,要尋找好的特征工程并快速提高模型精度是很費精力的部分,所以限于此,先跑的模型,并基于模型給出來的特征重要性,適當進行開展特征工程。
4.1 去除不重要的特征
從集中學習模型給出的特征重要度來看,不重要的特征主要是:
- edu_other:考慮去除edu_other
- invest和invest_other:考慮去除inverst全部項和invest_other
- property和property_other:考慮去除property_other
- s_work_type:考慮去除s_work_type
4.1.1 低方差
np.var(X_train)[np.var(X_train)<=np.percentile(np.var(X_train),20)]
可以看到,方差小于0.1的特征項:
- edu_other
- property_0、property_3~property_7
- invest_0~invest_8
之后會移除這部分特征項
4.1.2 卡方校驗
卡方校驗的時候發現出現非正定矩陣無法校驗,進一步檢驗發現數據項中有很多負值部分:
所以其實原始數據中有錯誤數據,并且在建模前就應該處理。
這里將負值都處理為該特征項的眾數,并進行卡方校驗。
X_train_new=X_train
#負值處理為眾數
dict_temp={}
for i in X_train_new.columns:
dict_temp[i]=X_train_new[i].value_counts().index[0]
for i in dict_temp.keys():
X_train_new[i][X_train_new[i]<0]=dict_temp[i]
#處理完之后竟然還有負值,那就直接處理為其絕對值
X_train_new=np.abs(X_train_new)
p_value=feature_selection.chi2(X_train_new,y_train)[1]
p_value[np.isnan(p_value)]=0 #有0值
#看一下特征重要程度排序
import matplotlib.pyplot as plt
%matplotlib inline
temp=np.argsort(-p_value) #返回index
p_value=list(p_value)
p_value=np.sort(p_value)
b=[]
for i in temp:
b.append(X_train_new.columns[i])
plt.figure(figsize=(10,40))
plt.grid()
plt.barh(b,p_value,)
可以看到,與目標變量密切相關的主要是:
- income收入部分;
- marital婚姻情況;
- 自己以及父母的出生年份;
- public_service對公共服務的滿意度等等;
而前文提到的edu_other、property_0、property_3property_7、invest_0invest_8基本上屬于無關變量,唯一的特例是invest_6的p值較高,但是這里仍然進行移除。
4.2 修正模型
##最后一次平均融合
#平均融合xgboost + lightgbm + gdbt現有模型
#交叉驗證確定準確率,因為對回歸值會采用取整操作,所以不用自帶的交叉驗證模型
import pandas as pd
import numpy as np
from sklearn import preprocessing
from sklearn import model_selection
from sklearn.externals import joblib
from sklearn import metrics
import lightgbm
#lighgbm防報錯
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import xgboost
from sklearn import ensemble
xgboost_mes=[]
lightgbm_mes=[]
gdbt_mes=[]
mix_mes=[]
i=0
kf=model_selection.KFold(10,shuffle=True)
for train,test in kf.split(X_train_new):
X_train1 = X_train_new.iloc[train]
y_train1 = y_train.iloc[train]
X_test1 = X_train_new.iloc[test]
y_test1 = y_train.iloc[test]
print(i)
xgboost=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/feature/xgboost/xgboost_%d.pkl'%i)
lightgbm=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/feature/lightgbm/lightgbm_%d.pkl'%i)
gdbt=joblib.load(r'/Users/ranmo/Desktop/天池/幸福感/feature/gdbt/gdbt_%d.pkl'%i)
xgboost_y_pred=xgboost.fit(X_train1,y_train1).predict(X_test1)
lightgbm_y_pred=lightgbm.fit(X_train1,y_train1).predict(X_test1)
gdbt_y_pred=gdbt.fit(X_train1,y_train1).predict(X_test1)
mix_y_pred=(xgboost_y_pred+lightgbm_y_pred+gdbt_y_pred)/3
xgboost_mes.append(metrics.mean_squared_error(xgboost_y_pred,y_test1))
lightgbm_mes.append(metrics.mean_squared_error(lightgbm_y_pred,y_test1))
gdbt_mes.append(metrics.mean_squared_error(gdbt_y_pred,y_test1))
mix_mes.append(metrics.mean_squared_error(mix_y_pred,y_test1))
xgboost_y_test=xgboost.predict(X_test_new)
lightgbm_y_test=lightgbm.predict(X_test_new)
gdbt_y_test=gdbt.predict(X_test_new)
mix_y_test=(xgboost_y_test+lightgbm_y_test+gdbt_y_test)/3
df_mix_final=pd.DataFrame({'id':X_test.index,'happiness':mix_y_test}).set_index('id')
df_mix_final.to_csv(r'/Users/ranmo/Desktop/天池/幸福感/feature/mixmodel/df_mix_%d.csv'%i)
i+=1
print('xgboost:')
print(xgboost_mes)
print(np.mean(xgboost_mes))
print()
print('lightgbm:')
print(lightgbm_mes)
print(np.mean(lightgbm_mes))
print()
print('gdbt:')
print(gdbt_mes)
print(np.mean(gdbt_mes))
print()
print('mix:')
print(mix_mes)
print(np.mean(mix_mes))
print()
從結果上看,經過簡單特征工程處理的模型和原有模型能夠達到的最優結果是差不多的,所以確實是因為集成算法已經能夠很好地處理特征了。。
最后用隨機種子嘗試了最終的優化(在模型穩定的基礎上并無太大意義,只是看分數能不能高一點而已),baseline為0.47098。
over。