本文共 10589 字,大约阅读时间需要 35 分钟。
1、前向分步算法:
考虑加法模型:
2、提升树(Boosting Tree)
提升方法实际采用加法模型与前向分步算法,以决策树为基函数的提升方法称为提升树,提升树模型可以表示为决策树加法模型:
3、梯度提升(gradient boosting)
当损失函数是平方误差和指数损失函数时,提升树的每一步的优化算法是很简单的,但是对于一般损失函数而言,每一步并不是那么容易优化,针对这一问题,Freidman提出了梯度提升算法,这是利用最速下降法的近似方法,其关键是利用损失函数的负梯度在当前模型的值:
4、提升树(Boosting Tree)的实现
#coding=utf-8import numpy as np# 加载数据def loadDataSet(fileName): #general function to parse tab -delimited floats dataMat = [] #assume last column is target value fr = open(fileName) for line in fr.readlines(): curLine = line.strip().split('\t') fltLine = map(float,curLine) #map all elements to float() dataMat.append(fltLine) return dataMat# 根据特征和特征值把数据集划分为两个数据集,一个比特征值大,一个比特征值小def splitData(data_array,col,value): array_1 = data_array[data_array[:,col] >= value,:] array_2 = data_array[data_array[:,col] < value,:] return array_1,array_2# 计算平方误差def getErr(data_array): return np.var(data_array[:,-1]) * data_array.shape[0]# 返回叶子节点def regLeaf(data_array): return np.mean(data_array[:,-1])#选择最佳的切分点,使用的方法为CART回归,为二叉树def get_best_split(data_array,ops = (1,4)): tolS = ops[0] # 误差阈值,用于控制迭代次数 tolN = ops[1] # 每个划分的最小样本个数 if len(set(data_array[:,-1])) == 1: return None,regLeaf(data_array) m,n = data_array.shape best_S = np.inf; # 保存最优的平方误差 best_col = 0; # 最优特征 best_value = 0; # 最优切分点 S = getErr(data_array) # 计算原始平方误差 for col in xrange(n-1): values = set(data_array[:,col]) for value in values: array_1,array_2 = splitData(data_array,col,value) if (array_1.shape[0] < tolN) or (array_2.shape[0] < tolN): continue total_error = getErr(array_1) + getErr(array_2) if total_error < best_S: best_col = col best_value = value best_S = total_error if (S - best_S) < tolS: return None,regLeaf(data_array) array_1,array_2 = splitData(data_array,best_col,best_value) if (array_1.shape[0] < tolN) or (array_2.shape[0] < tolN): return None,regLeaf(data_array) return best_col,best_value# 用一个对象来保存树的每个节点值(特征列,特征值,结果,左分支,右分支) class node: def __init__(self,col = -1,value = None, results = None, gb = None, lb = None): self.col = col self.value = value self.results = results self.gb = gb self.lb = lb# 建立GBDT决策树def buildTree(data_array,ops=(1,4)): col, val = get_best_split(data_array, ops) if col == None: return node(results = val) else: array_1,array_2 = splitData(data_array,col,val) greater_branch = buildTree(array_1,ops) less_branch = buildTree(array_2,ops) return node(col = col,value = val,gb = greater_branch ,lb = less_branch )# 输入一个样本的所有特征,返回样本回归结果def treeForeCast(tree, inData): if tree.results != None: return tree.results #print 'tree.col:',tree.col if inData[tree.col] > tree.value: return treeForeCast(tree.gb, inData) else: return treeForeCast(tree.lb, inData)# 返回一个测试集的所有回归结果值列表def createForeCast(tree, testData): m=len(testData) yHat = np.mat(np.zeros((m,1))) for i in range(m): yHat[i,0] = treeForeCast(tree, testData[i]) return yHat# 生成提升树def boostTree(data_array,num_iter,ops = (1,4)): m,n = data_array.shape x = data_array[:,0:-1] # 保存所有样本特征列 y = data_array[:,-1].reshape((m,1)) # 保存所有样本结果真实值 list_trees = [] for i in xrange(num_iter): print 'i: ',i if i == 0: tree = buildTree(data_array,ops) list_trees.append(tree) yHat = createForeCast(tree,x) else: r = y - np.array(yHat) # 计算残差 data_array = np.hstack((x,r)) # hstack()函数水平(按列顺序)的把数组给堆叠起来 tree = buildTree(data_array,ops) list_trees.append(tree) rHat = createForeCast(tree, x ) # 用残差拟合的结果 yHat = yHat + rHat return list_trees#打印树的节点信息def printtree(tree,indent=''): # Is this a leaf node? if tree.results!=None: print str(tree.results) else: # Print the criteria print str(tree.col)+':'+str(tree.value)+'? ' # Print the branches print indent+'T->', printtree(tree.gb,indent+' ') print indent+'F->', printtree(tree.lb,indent+' ')if __name__ == '__main__': data = loadDataSet('ex0.txt') data_array = np.array(data) # tree = buildTree(data_array) # printtree(tree) gbdt_results = boostTree(data_array,10)
5、GBDT(Gradient Boosting Decision Tree)的实现
# coding=utf-8import numpy as npfrom sklearn.ensemble import GradientBoostingRegressorgbdt=GradientBoostingRegressor( loss='ls' #损失函数,GBDT回归器可选'ls', 'lad', 'huber', 'quantile'。, learning_rate=0.1 #学习率/步长。, n_estimators=100 #迭代次数,和learning_rate存在trade-off关系。, subsample=1 # 样本采样比例。, min_samples_split=2 #最大特征数或比例。, min_samples_leaf=1 #最小特征数或比例。, max_depth=3 # 树的深度,以下参数多数用来设定决策树分裂停止条件。, init=None, random_state=None, max_features=None, alpha=0.9, verbose=0, max_leaf_nodes=None, warm_start=False)train_feat=np.array([[0.00598802,0.569231,0.647059,0.95122,-0.225434,0.837989,0.357258,-0.0030581,-0.383475], [0.161677,0.743195,0.682353,0.960976,-0.0867052,0.780527,0.282945,0.149847,-0.0529661], [0.113772,0.744379,0.541176,0.990244,-0.00578035,0.721468,0.43411,-0.318043,0.288136], [0.0538922,0.608284,0.764706,0.95122,-0.248555,0.821229,0.848604,-0.0030581,0.239407], [0.173653,0.866272,0.682353,0.95122,0.017341,0.704709,-0.0210016,-0.195719,0.150424]])train_id=np.array([320,361,364,336,358])test_feat=np.array([[0.00598802,0.569231,0.647059,0.95122,-0.225434,0.837989,0.357258,-0.0030581,-0.383475], [0.161677,0.743195,0.682353,0.960976,-0.0867052,0.780527,0.282945,0.149847,-0.0529661], [0.113772,0.744379,0.541176,0.990244,-0.00578035,0.721468,0.43411,-0.318043,0.288136], [0.0538922,0.608284,0.764706,0.95122,-0.248555,0.821229,0.848604,-0.0030581,0.239407], [0.173653,0.866272,0.682353,0.95122,0.017341,0.704709,-0.0210016,-0.195719,0.150424]])test_id=np.array([320,361,364,336,358])gbdt.fit(train_feat,train_id) # 第一个参数为样本特征,第二个参数为样本标签pred=gbdt.predict(test_feat) # 参数为测试样本特征total_err=0for i in range(pred.shape[0]): print pred[i],test_id[i] err=(pred[i]-test_id[i])/test_id[i] total_err+=err*errprint total_err/pred.shape[0]
转载地址:http://uzqe.baihongyu.com/