山东大学实训汇报

    技术2022-07-11  108

    博客浏览

    1.https://blog.csdn.net/qq_41032884/article/details/106619158

    2.https://blog.csdn.net/qq_41032884/article/details/106638006

    3.https://blog.csdn.net/qq_41032884/article/details/106676616

    4.https://blog.csdn.net/qq_41032884/article/details/106688866

    5.https://blog.csdn.net/qq_41032884/article/details/106708659

    6.https://blog.csdn.net/qq_41032884/article/details/106837553

    7.https://blog.csdn.net/qq_41032884/article/details/106865137

    8.https://blog.csdn.net/qq_41032884/article/details/106959074

    9. https://blog.csdn.net/qq_41032884/article/details/106986390

    10.https://blog.csdn.net/qq_41032884/article/details/107008654

    11.https://blog.csdn.net/qq_41032884/article/details/107025727

     

    工作要点

    我的工作主要内容是数据处理和分析,算法建模。主要完成工作为:

    慕课学习记录的处理和相关算法如gcforest、SVR、线性回归等多种算法建模,比较多项指标

    特征工程实现,尝试多种不同的特征选择等方法提高训练指标

    学生其他校园记录数据的预处理和数据分析

    深度学习建模训练,调参,分析结果,整理提交给同学

    工作难点

    主要的工作难点有几个方面:

    1.特征工程 如何进行特征选择,优化模型,我采用了多种方法,并且比照多项指标

    ''' 卡方检验选择特征 ''' from __future__ import division import time import numpy as np from sklearn.svm import SVR from sklearn.model_selection import GridSearchCV from sklearn.model_selection import learning_curve import matplotlib.pyplot as plt import pandas as pd from sklearn import metrics from sklearn.linear_model import BayesianRidge, LinearRegression, ElasticNet # 批量导入要实现的回归算法 from sklearn.model_selection import cross_val_score # 交叉检验 from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score # 批量导入指标算法 from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor # 集成算法 from sklearn.feature_selection import VarianceThreshold,SelectKBest,chi2,RFE,SelectFromModel import os file=[] for root,dirs,files in os.walk('data'): for name in files: file.append(os.path.join(root, name)) for f in file: if f=='data/.DS_Store': continue data=pd.read_csv(f,header=None,index_col=0) data_=data.iloc[:,3:] #print(data) data_=data_.sample(frac=1) dataset=np.array(data_) #dataset=np.loadtxt(dir) index=int(dataset.shape[0]*0.8) data_x=dataset[:,:-1] data_y=dataset[:,-1] X_train=data_x[:index,:] y_train=data_y[:index] X_test=data_x[index:,:] y_test=data_y[index:] model=SelectKBest(chi2, k=2) X_train=model.fit_transform(X_train, y_train) X_test=model.transform(X_test) model_br = BayesianRidge() # 建立贝叶斯岭回归模型对象 model_lr = LinearRegression() # 建立普通线性回归模型对象 model_etc = ElasticNet() # 建立弹性网络回归模型对象 model_svr = SVR() # 建立支持向量机回归模型对象 model_gbr = GradientBoostingRegressor() # 建立梯度增强回归模型对象 model_names = ['BayesianRidge', 'LinearRegression', 'ElasticNet', 'SVR', 'GBR'] # 不同模型的名称列表 model_dic = [model_br, model_lr, model_etc, model_svr, model_gbr] # 不同回归模型对象的集合 cv_score_list = [] # 交叉检验结果列表 pre_y_list = [] # 各个回归模型预测的y值列表 for model in model_dic: # 读出每个回归模型对象 scores = cross_val_score(model, X_train, y_train, cv=5) # 将每个回归模型导入交叉检验模型中做训练检验 cv_score_list.append(scores) # 将交叉检验结果存入结果列表 pre_y_list.append(model.fit(X_train, y_train).predict(X_test)) # 将回归训练中得到的预测y存入列表 model_metrics_name = [explained_variance_score, mean_absolute_error, mean_squared_error, r2_score] # 回归评估指标对象集 model_metrics_list = [] # 回归评估指标列表 for i in range(5): # 循环每个模型索引 tmp_list = [] # 每个内循环的临时结果列表 for m in model_metrics_name: # 循环每个指标对象 tmp_score = m(y_test, pre_y_list[i]) # 计算每个回归指标结果 tmp_list.append(tmp_score) # 将结果存入每个内循环的临时结果列表 model_metrics_list.append(tmp_list) # 将结果存入回归评估指标列表 df2 = pd.DataFrame(model_metrics_list, index=model_names, columns=['ev', 'mae', 'mse', 'r2']) # 建立回归指标的数据框 print('='*10,f,'='*10) print (df2) ''' 递归特征消除法 ''' from __future__ import division import time import numpy as np from sklearn.svm import SVR from sklearn.model_selection import GridSearchCV from sklearn.model_selection import learning_curve import matplotlib.pyplot as plt import pandas as pd from sklearn import metrics from sklearn.linear_model import BayesianRidge, LinearRegression, ElasticNet # 批量导入要实现的回归算法 from sklearn.model_selection import cross_val_score # 交叉检验 from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score # 批量导入指标算法 from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor # 集成算法 from sklearn.feature_selection import VarianceThreshold,SelectKBest,chi2,RFE,SelectFromModel import os file=[] for root,dirs,files in os.walk('data'): for name in files: file.append(os.path.join(root, name)) for f in file: if f=='data/.DS_Store': continue data=pd.read_csv(f,header=None,index_col=0) data_=data.iloc[:,3:] #print(data) data_=data_.sample(frac=1) dataset=np.array(data_) #dataset=np.loadtxt(dir) index=int(dataset.shape[0]*0.8) data_x=dataset[:,:-1] data_y=dataset[:,-1] #data_x=SelectKBest(chi2, k=8).fit_transform(data_x, data_y) X_train=data_x[:index,:] y_train=data_y[:index] X_test=data_x[index:,:] y_test=data_y[index:] model_br = BayesianRidge() # 建立贝叶斯岭回归模型对象 model_lr = LinearRegression() # 建立普通线性回归模型对象 model_etc = ElasticNet() # 建立弹性网络回归模型对象 model_svr = SVR() # 建立支持向量机回归模型对象 model_gbr = GradientBoostingRegressor() # 建立梯度增强回归模型对象 model_names = ['BayesianRidge', 'LinearRegression', 'ElasticNet', 'SVR', 'GBR'] # 不同模型的名称列表 model_dic = [model_br, model_lr, model_etc, model_svr, model_gbr] # 不同回归模型对象的集合 cv_score_list = [] # 交叉检验结果列表 pre_y_list = [] # 各个回归模型预测的y值列表 for model in model_dic: # 读出每个回归模型对象 scores = cross_val_score(model, X_train, y_train, cv=5) # 将每个回归模型导入交叉检验模型中做训练检验 cv_score_list.append(scores) # 将交叉检验结果存入结果列表 rfe=RFE(estimator=model, n_features_to_select=2) X_train=rfe.fit_transform(X_train, y_train) X_test=rfe.transform(X_test) pre_y_list.append(model.fit(X_train, y_train).predict(X_test)) # 将回归训练中得到的预测y存入列表 model_metrics_name = [explained_variance_score, mean_absolute_error, mean_squared_error, r2_score] # 回归评估指标对象集 model_metrics_list = [] # 回归评估指标列表 for i in range(5): # 循环每个模型索引 tmp_list = [] # 每个内循环的临时结果列表 for m in model_metrics_name: # 循环每个指标对象 tmp_score = m(y_test, pre_y_list[i]) # 计算每个回归指标结果 tmp_list.append(tmp_score) # 将结果存入每个内循环的临时结果列表 model_metrics_list.append(tmp_list) # 将结果存入回归评估指标列表 df2 = pd.DataFrame(model_metrics_list, index=model_names, columns=['ev', 'mae', 'mse', 'r2']) # 建立回归指标的数据框 print('='*10,f,'='*10) print (df2)

     

    ''' logistic惩罚项选择特征 ''' from __future__ import division import time import numpy as np from sklearn.svm import SVR from sklearn.model_selection import GridSearchCV from sklearn.model_selection import learning_curve import matplotlib.pyplot as plt import pandas as pd from sklearn import metrics from sklearn.linear_model import BayesianRidge, LinearRegression, ElasticNet # 批量导入要实现的回归算法 from sklearn.model_selection import cross_val_score # 交叉检验 from sklearn.metrics import explained_variance_score, mean_absolute_error, mean_squared_error, r2_score # 批量导入指标算法 from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor # 集成算法 from sklearn.feature_selection import VarianceThreshold,SelectKBest,chi2,RFE,SelectFromModel from sklearn.linear_model import LogisticRegression import os file=[] for root,dirs,files in os.walk('data'): for name in files: file.append(os.path.join(root, name)) for f in file: if f=='data/.DS_Store': continue data=pd.read_csv(f,header=None,index_col=0) data_=data.iloc[:,3:] #print(data) data_=data_.sample(frac=1) dataset=np.array(data_) #dataset=np.loadtxt(dir) index=int(dataset.shape[0]*0.8) data_x=dataset[:,:-1] data_y=dataset[:,-1] #data_x=SelectKBest(chi2, k=8).fit_transform(data_x, data_y) X_train=data_x[:index,:] y_train=data_y[:index] X_test=data_x[index:,:] y_test=data_y[index:] model=SelectFromModel(LogisticRegression(penalty='l2',C=0.1)) X_train=model.fit_transform(X_train,y_train) X_test=model.transform(X_test) model_br = BayesianRidge() # 建立贝叶斯岭回归模型对象 model_lr = LinearRegression() # 建立普通线性回归模型对象 model_etc = ElasticNet() # 建立弹性网络回归模型对象 model_svr = SVR() # 建立支持向量机回归模型对象 model_gbr = GradientBoostingRegressor() # 建立梯度增强回归模型对象 model_names = ['BayesianRidge', 'LinearRegression', 'ElasticNet', 'SVR', 'GBR'] # 不同模型的名称列表 model_dic = [model_br, model_lr, model_etc, model_svr, model_gbr] # 不同回归模型对象的集合 cv_score_list = [] # 交叉检验结果列表 pre_y_list = [] # 各个回归模型预测的y值列表 for model in model_dic: # 读出每个回归模型对象 scores = cross_val_score(model, X_train, y_train, cv=5) # 将每个回归模型导入交叉检验模型中做训练检验 cv_score_list.append(scores) # 将交叉检验结果存入结果列表 pre_y_list.append(model.fit(X_train, y_train).predict(X_test)) # 将回归训练中得到的预测y存入列表 model_metrics_name = [explained_variance_score, mean_absolute_error, mean_squared_error, r2_score] # 回归评估指标对象集 model_metrics_list = [] # 回归评估指标列表 for i in range(5): # 循环每个模型索引 tmp_list = [] # 每个内循环的临时结果列表 for m in model_metrics_name: # 循环每个指标对象 tmp_score = m(y_test, pre_y_list[i]) # 计算每个回归指标结果 tmp_list.append(tmp_score) # 将结果存入每个内循环的临时结果列表 model_metrics_list.append(tmp_list) # 将结果存入回归评估指标列表 df2 = pd.DataFrame(model_metrics_list, index=model_names, columns=['ev', 'mae', 'mse', 'r2']) # 建立回归指标的数据框 print('='*10,f,'='*10) print (df2)

    2. 样本数少的情况下该如何训练。一开始初步的想法是将任务转化为二分类任务,因此一开始的时候尝试了深度森林

    import argparse import numpy as np import os import pickle from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score from gcforest.gcforest import GCForest from gcforest.utils.config_utils import load_json def parse_args(): parser=argparse.ArgumentParser() parser.add_argument("--model",type=str,default='gcforest',help='Train Model File') parser.add_argument("--data",type=str,default='mooc_data.txt',help='Dataset') args=parser.parse_args() return args def get_toy_config(): config = {} ca_config = {} ca_config["random_state"] = 0 ca_config["max_layers"] = 100 ca_config["early_stopping_rounds"] = 3 ca_config["n_classes"] = 10 ca_config["estimators"] = [] ''' ca_config["estimators"].append( {"n_folds": 5, "type": "XGBClassifier", "n_estimators": 10, "max_depth": 5, "objective": "multi:softprob", "silent": True, "nthread": -1, "learning_rate": 0.1} ) ca_config["estimators"].append({"n_folds": 5, "type": "RandomForestClassifier", "n_estimators": 10, "max_depth": None, "n_jobs": -1}) ca_config["estimators"].append({"n_folds": 5, "type": "ExtraTreesClassifier", "n_estimators": 10, "max_depth": None, "n_jobs": -1}) ''' ca_config["estimators"].append({"n_folds": 5, "type": "LogisticRegression"}) config["cascade"] = ca_config return config if __name__ == "__main__": args = parse_args() if args.model =='gcforest': config = get_toy_config() else: config = load_json(args.model) file=args.data dir='data/'+file if os.path.exists(dir)==False: raise ValueError("The file does not exist!") f=open(dir,'r') lines=f.readlines() dataset=[] for line in lines: cols=line.split(',') for i in range(len(cols)-1): cols[i+1]=float(cols[i+1]) col_array=np.array(cols[1:-1]) dataset.append(col_array) dataset=np.array(dataset) #dataset=np.loadtxt(dir) np.random.shuffle(dataset) index=int(dataset.shape[0]*0.2) X_train=dataset[:index,:-1] y_train=dataset[:index,-1] X_test=dataset[index:,:-1] y_test=dataset[index:,-1] gc = GCForest(config) # If the model you use cost too much memory for you. # You can use these methods to force gcforest not keeping model in memory # gc.set_keep_model_in_mem(False), default is TRUE. X_train = X_train[:, np.newaxis, :] X_test = X_test[:, np.newaxis, :] #X_train_enc = gc.fit_transform(X_train, y_train) # X_enc is the concatenated predict_proba result of each estimators of the last layer of the GCForest model # X_enc.shape = # (n_datas, n_estimators * n_classes): If cascade is provided # (n_datas, n_estimators * n_classes, dimX, dimY): If only finegrained part is provided # You can also pass X_test, y_test to fit_transform method, then the accracy on test data will be logged when training. # X_train_enc, X_test_enc = gc.fit_transform(X_train, y_train, X_test=X_test, y_test=y_test) # WARNING: if you set gc.set_keep_model_in_mem(True), you would have to use # gc.fit_transform(X_train, y_train, X_test=X_test, y_test=y_test) to evaluate your model. y_pred = gc.predict(X_test) acc = accuracy_score(y_test, y_pred) print("Test Accuracy of GcForest = {:.2f} %".format(acc * 100)) # You can try passing X_enc to another classfier on top of gcForest.e.g. xgboost/RF. ''' X_test_enc = gc.transform(X_test) X_train_enc = X_train_enc.reshape((X_train_enc.shape[0], -1)) X_test_enc = X_test_enc.reshape((X_test_enc.shape[0], -1)) X_train_origin = X_train.reshape((X_train.shape[0], -1)) X_test_origin = X_test.reshape((X_test.shape[0], -1)) X_train_enc = np.hstack((X_train_origin, X_train_enc)) X_test_enc = np.hstack((X_test_origin, X_test_enc)) print("X_train_enc.shape={}, X_test_enc.shape={}".format(X_train_enc.shape, X_test_enc.shape)) clf = RandomForestClassifier(n_estimators=1000, max_depth=None, n_jobs=-1) clf.fit(X_train_enc, y_train) y_pred = clf.predict(X_test_enc) acc = accuracy_score(y_test, y_pred) print("Test Accuracy of Other classifier using gcforest's X_encode = {:.2f} %".format(acc * 100)) ''' # dump with open("test_gcforest.pkl", "wb") as f: pickle.dump(gc, f, pickle.HIGHEST_PROTOCOL) ''' with open("test.pkl", "rb") as f: gc = pickle.load(f) y_pred = gc.predict(X_test) acc = accuracy_score(y_test, y_pred) print("Test Accuracy of GcForest (save and load) = {:.2f} %".format(acc * 100)) '''

    后来认为该任务如果只简单划归为二分类未免太过粗暴,可以类比最简单的房价预测做回归模型,采用最简单粗暴的数据扩增手段,将538个样本扩增为一万多个。

    x_raw = load_student_data() y_raw = load_label() x_pair = [] y_pair = [] for index in range(len(y_raw)): for i in range(1, len(y_raw) - index): y_pair.append(y_raw[index] - y_raw[index + i]) print("y_pair build finish shape: %s" % len(y_pair)) for index in range(len(x_raw)): for i in range(1, len(x_raw) - index): x_pair.append(x_raw[index] - x_raw[index + i]) print("x_pair build finish shape: %s" % len(x_pair)) x = np.array(x_pair) y = np.array(y_pair)

     

    2.深度学习建模 拿到处理后的数据,如何设计出最适合模型的超参数,如网络层数、神经结点个数、迭代次数等

    def deep(): # 网络搭建 model = Sequential() model.add(Dense(input_dim=x_train.shape[1], units=1, kernel_initializer='uniform')) model.add(Activation('relu')) #model.add(Dense(512)) #model.add(Activation('relu')) model.add(Dense(256)) model.add(Activation('relu')) model.add(Dense(128)) #model.add(Dropout(0.8)) model.add(Activation('relu')) model.add(Dense(64)) #model.add(Dropout(0.8)) model.add(Activation('relu')) model.add(Dense(32)) #model.add(Dropout(0.8)) model.add(Activation('relu')) model.add(Dense(1)) #model.add(Activation('sigmoid')) model.compile(optimizer='adam', metrics=["mae"], loss='mse') model.fit(x_train, y_train, batch_size=16, epochs=50) score = model.evaluate(x_test, y_test, batch_size=16) print('mse score:', score[0]) print('mae score:', score[1]) #W, b = model.layers[0].get_weights() #print('Weights=', W, '\n biases=', b) # plotting the prediction y_pred = model.predict(x_test) print("raw y_pred") #print(y_pred) for mindex in y_pred: mindex[0] = int(mindex[0]) if mindex[0]<0: mindex[0]=0-mindex[0] print(mindex[0]) #print("--------------打印预测结果与实际值--------------") # y_pred = y_pred.astype(int) # for mindex in zip(y_test, y_pred): # print("y_test", mindex[0], "| y_pred", mindex[1]) # #print("--------------预测结果与实际值打印完毕-----------") deep()

    总结

    拿到数据后最先做的是数据分析

    实际应用中模型其实往往并不复杂,重要的是对数据的处理和特征工程

    要进行多方面比较,多调整模型,多调整参数,采用多个指标,总结归纳出最合适的方法

    Processed: 0.009, SQL: 9