from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
#Load boston housing dataset as an example
X = np.array(train1[feature_use].fillna(-1))[1:train1.size,:]
Y = np.array(train1['target'])[1:train1.size]
#print(X)
#print(Y)
names = feature_use
#use linear regression as the model
lr = LinearRegression()
#rank all features, i.e continue the elimination until the last one
rfe = RFE(lr, n_features_to_select=1)
rfe.fit(X,Y)
print("Features sorted by their score:")
#print(sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), names), reverse=True))
sortedlist = sorted(zip(map(lambda x: round(x, 4), rfe.ranking_), names),
reverse=True)
print(sortedlist)
feature_use = []
for index in sortedlist[len(sortedlist)-70 : ]:
if index[0]>0:
feature_use.append(index[1])
print(feature_use)
上面的X为数据集的特征集合 Y为标签集合
在sortlist里对特征的重要性进行了排序
最近做机器学习的一点感悟是,特征的影响远比模型参数来的大,特征是现实世界在算法中的倒影。
在特征工程中要对业务有非常深的理解,强调返璞归真,删除无效特征,减少引起干扰的特征。
加特征的过程需要一个一个来,还要多思考这些特征之间的关系,是否是强烈线性相关的。
# random forest select features
'''
from sklearn.ensemble import RandomForestRegressor
import numpy as np
#Load boston housing dataset as an example
X = np.array(train1[feature_use].fillna(-1))[1:train1.size,:]
Y = np.array(train1['target'])[1:train1.size]
print(X)
print(Y)
names = feature_use
rf = RandomForestRegressor()
rf.fit(X, Y)
print("Features sorted by their score:")
print(sorted(zip(map(lambda x: round(x, 4), rf.feature_importances_), names),
reverse=True))
'''
原创文章,作者:Maggie-Hunter,如若转载,请注明出处:https://blog.ytso.com/192456.html