[机器学习][源码]机器学习实战ch5 Logistic回归

把代码保存于此,python3实现,详解就参考《机器学习实战》(Peter Harrington)啦...

logRegres.py : 

#http://blog.csdn.net/sinat_17196995/article/details/57418519
from numpy import *


# 5-1 gradient ascent algorithm
def loadDataSet():
dataMat=[];labelMat=[]
fr=open('testSet.txt')
for line in fr.readlines():
lineArr=line.strip().split()#逐行读入并切分,每行的前两个值为X1,X2
dataMat.append( [1.0,float(lineArr[0]),float(lineArr[1])] )#X0设为1.0,保存X1,X2
labelMat.append( int(lineArr[2]) )
return dataMat,labelMat


def sigmoid(intX):
return 1.0/( 1+exp(-intX) )


def gradAscent(dataMatIn,classLabels):
#change list form to numpy form
dataMatrix=mat(dataMatIn) #change to numpy matrix ,different features for col &sample for row
labelMat=mat(classLabels).transpose()
m,n=shape(dataMatrix)
#initialize
alpha=0.001 #step lengsth
maxCycles=500 #max iteration num
weights=ones((n,1))
#gradient ascent
for k in range(maxCycles):
h=sigmoid(dataMatrix*weights) #results of sigmoid function,100*1 vector
error=labelMat-h #compute the difference between real type and predict type
weights=weights + alpha* dataMatrix.transpose()*error
return weights
'''
import logRegres
from importlib import reload
reload(logRegres)
dataArr,labelMat=logRegres.loadDataSet()
weights=logRegres.gradAscent(dataArr,labelMat)
'''


#5-2 draw the decision line
def plotBestFit(weights):
import matplotlib.pyplot as plt
dataMat,labelMat=loadDataSet()
dataArr=array(dataMat)
n=shape(dataArr)[0]


xcord1=[];ycord1=[]
xcord2=[];ycord2=[]
for i in range(n):
if int(labelMat[i])==1:
xcord1.append(dataArr[i,1])
ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1])
ycord2.append(dataArr[i,2])


fig=plt.figure()
ax=fig.add_subplot(111)#参数1:子图总行数,参数2:子图总列数,参数3:子图位置
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='blue')
x=arange(-3.0,3.0,0.1)
y=(-weights[0]-weights[1]*x)/weights[2]# 分界线上的点满足:w0 x0 + w1 x1 + w2 x2=0 and x0=1,x=x1,y=x2,
ax.plot(x,y)
plt.xlabel('X1');plt.ylabel('X2')
plt.show()
'''
reload(logRegres)
logRegres.plotBestFit(weights.getA()) #getA(): weights from matrix to array
'''


#5-3,stochastic gradient ascent algorithm
def stocGradAscent0(dataMatrix,classLabels):
m,n=shape(dataMatrix)
alpha=0.01
weights=ones(n)
for j in range(500):#迭代200次,增加精度(自己加的此行)
for i in range(m):#按样本在数据集的顺序,一次处理一个样本
h=sigmoid(sum(dataMatrix[i]*weights)) 
error=classLabels[i]-h
weights += alpha* error*dataMatrix[i]
return weights
'''
from numpy import *
reload(logRegres)
weights=logRegres.stocGradAscent0(array(dataArr),labelMat)
logRegres.plotBestFit(weights)
'''


#5-4 改进5-3,alpha动态减少 + 样本随机选择
#@numIter: iteration time
def stocGradAscent1(dataMatrix,classLabels,numIter=150):
m,n=shape(dataMatrix)
weights=ones(n)


for j in range(numIter):#总体迭代次数
dataIndex=list( range(m) )
for i in range(m):
alpha=4/(1.0+j+i)+0.01 #每次都调整步长,alpha will descent as iteration rise,but does not be 0
randIndex=int(random.uniform(0,len(dataIndex))) #每次随机选择一个样本


h=sigmoid(sum(dataMatrix[i]*weights)) 
error=classLabels[i]-h
weights += alpha* error*dataMatrix[i]


del(dataIndex[randIndex]) #删除该样本
return weights
'''
reload(logRegres)
weights=logRegres.stocGradAscent1(array(dataArr),labelMat)
logRegres.plotBestFit(weights)
'''


#5-5 
#sigmoid回归分类函数
def classifyVector(inX,weights):
prob=sigmoid(sum(inX*weights))
if prob>0.5:return 1.0 #图5-1,0.5是分界点
else:return 0.0


#格式化处理 训练集 和 测试集,并训练 测试 模型 
def colicTest():
frTrain=open('horseColicTraining.txt')
frTest=open('horseColicTest.txt')
#get training set
trainingSet=[];traingingLabels=[]
for line in frTrain.readlines():
currLine=line.strip().split('\t')
lineArr=[]
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
traingingLabels.append(float(currLine[21])) #类别标号
#train the logistic model
trainWeights=stocGradAscent1(array(trainingSet),traingingLabels,500)
#test the model
errorCount=0;numTestVec=0.0
for line in frTest.readlines():
numTestVec += 1.0 #number of test samples
#get 1 test sample
currLine=line.strip().split('\t')
lineArr=[]
for i in range(21):
lineArr.append(float(currLine[i]))
#use the logistic model 
if int(classifyVector(array(lineArr),trainWeights))!=int(currLine[21]):
errorCount += 1
#get the testing result
errorRate=(float(errorCount)/numTestVec)
print("the error rate of this test is: %f" % errorRate)
return errorRate


#因为colicTest()函数用的 【随机】梯度上升算法,故重复多次,求平均值
def multiTest():
numTests=10 #重复实验的次数
errorSum=0.0
for k in range(numTests):
errorSum += colicTest()
print("after %d iterations, the average error rate is: %f" % (numTests,errorSum/float(numTests)))


'''
reload(logRegres)
logRegres.colicTest()
logRegres.multiTest()  
'''

你可能感兴趣的:(机器学习,python)