本文共 4527 字,大约阅读时间需要 15 分钟。
boosting是一种与bagging很类似的技术。不论是在boosting还是bagging中,所使用的多个分类器的类型都是一致的。但是在前者当中,不同的分类器是通过串行训练而获得的,每个新分类器都根据已训练出的分类器的性能来进行训练。boosting是通过集中关注被已有分类器分错的那些数据来获得新的分类器。
由于boosting分类的结果是基于所有分类器的加权求和结果,因此boosting和bagging不太一样。bagging中的分类器权重是相等的,而boosting中的分类器权重并不相等,每个权重代表的是其在上一轮迭代过程中的成功度。
boosting方法拥有多个版本,本章将只关注其中一个最流行的版本AdaBoost
最后直接上代码:
from numpy import *def loadSimpData(): #数据集 datMat = matrix([[ 1. , 2.1], [ 2. , 1.1], [ 1.3, 1. ], [ 1. , 1. ], [ 2. , 1. ]]) classLabels = [1.0, 1.0, -1.0, -1.0, 1.0] return datMat,classLabelsdef loadDataSet(fileName): #general function to parse tab -delimited floats numFeat = len(open(fileName).readline().split('\t')) #得到特征的个数 dataMat = []; labelMat = [] fr = open(fileName) for line in fr.readlines(): lineArr =[] curLine = line.strip().split('\t') for i in range(numFeat-1): lineArr.append(float(curLine[i])) dataMat.append(lineArr) labelMat.append(float(curLine[-1])) return dataMat,labelMat #返回数据集#单层的决策树生成函数def stumpClassify(dataMatrix,dimen,threshVal,threshIneq):#分类数据 retArray = ones((shape(dataMatrix)[0],1)) if threshIneq == 'lt': retArray[dataMatrix[:,dimen] <= threshVal] = -1.0 else: retArray[dataMatrix[:,dimen] > threshVal] = -1.0 return retArray #单层决策树的生成函数,这个函数是找到最优属性上的最优单层决策树def buildStump(dataArr,classLabels,D): dataMatrix = mat(dataArr); labelMat = mat(classLabels).T #矩阵转置 m,n = shape(dataMatrix) numSteps = 10.0; bestStump = {}; bestClasEst = mat(zeros((m,1))) minError = inf #init error sum, to +infinity for i in range(n):#遍历所有的属性值(特征值) rangeMin = dataMatrix[:,i].min(); rangeMax = dataMatrix[:,i].max(); stepSize = (rangeMax-rangeMin)/numSteps for j in range(-1,int(numSteps)+1):#在当前维度上遍历所有的范围值 for inequal in ['lt', 'gt']: #计算出一个阈值后,那么小于这个阈值为负样本还是大于这个阈值为负样本,不得而知,因此需要遍历,找出错误率最小的 threshVal = (rangeMin + float(j) * stepSize) #每一个阈值 predictedVals = stumpClassify(dataMatrix,i,threshVal,inequal) #call stump classify with i, j, lessThan errArr = mat(ones((m,1))) errArr[predictedVals == labelMat] = 0 weightedError = D.T*errArr #计算错误率 #print "split: dim %d, thresh %.2f, thresh ineqal: %s, the weighted error is %.3f" % (i, threshVal, inequal, weightedError) if weightedError < minError: minError = weightedError bestClasEst = predictedVals.copy() bestStump['dim'] = i bestStump['thresh'] = threshVal bestStump['ineq'] = inequal return bestStump,minError,bestClasEst #返回最优属性上的单层决策树#基于单层决策树的训练过程def adaBoostTrainDS(dataArr,classLabels,numIt=40): weakClassArr = [] m = shape(dataArr)[0] D = mat(ones((m,1))/m) #刚开始的时候,初始化权重向量D相等 aggClassEst = mat(zeros((m,1))) for i in range(numIt): #迭代次数 bestStump,error,classEst = buildStump(dataArr,classLabels,D) #在当前维度上找到最优的单层决策树 #print "D:",D.T alpha = float(0.5*log((1.0-error)/max(error,1e-16)))#calc alpha, throw in max(error,eps) to account for error=0 bestStump['alpha'] = alpha weakClassArr.append(bestStump) #将最优的单层决策树加入到树数组中 #print "classEst: ",classEst.T expon = multiply(-1*alpha*mat(classLabels).T,classEst) #关键点 exponent for D calc, getting messy D = multiply(D,exp(expon)) #关键点 Calc New D for next iteration D = D/D.sum() #关键点 #calc training error of all classifiers, if this is 0 quit for loop early (use break) aggClassEst += alpha*classEst #print "aggClassEst: ",aggClassEst.T aggErrors = multiply(sign(aggClassEst) != mat(classLabels).T,ones((m,1))) errorRate = aggErrors.sum()/m print ("total error: ",errorRate) if errorRate == 0.0: break return weakClassArr#adaboost分类函数def adaClassify(datToClass,classifierArr): dataMatrix = mat(datToClass)#do stuff similar to last aggClassEst in adaBoostTrainDS m = shape(dataMatrix)[0] aggClassEst = mat(zeros((m,1))) for i in range(len(classifierArr)): classEst = stumpClassify(dataMatrix, classifierArr[i]['dim'],\ classifierArr[i]['thresh'],\ classifierArr[i]['ineq'])#call stump classify aggClassEst += classifierArr[i]['alpha']*classEst print (aggClassEst) return sign(aggClassEst)