KNN思路:
1.利用测试集和训练数据的相似性(目标距离的远近),取前K个
2.计算前K个分类的数据多少
3.选择最多的一个分目标分类
算法思路:
不考虑时间复杂度
1、读取测试集,或者自动生成
2、遍历测试集
3、与训练集的每一行,进行计算距离(采用两点的距离公式),同时保存每一行计算的距离,和分类的值(生成一个新的矩阵或者集合)
4、选取前K个(这里要对距离和分类进行排序,排序算法)
5、要对前K个(已经排好离的分类,进行分组;并对C的数量进行排序,选取最多的就是目标)
下面是python 2.7的实现
from numpy import *
import operator
from os import listdir
import pylab as pl
from matplotlib import pyplot as plt
def file2Matrix(filename):
fr = open(filename)
numberOfLines = len(fr.readlines()) #get the number of lines in the file
returnMat = zeros((numberOfLines,3)) #prepare matrix to return
classLabelVector = [] #prepare labels return
fr = open(filename)
index = 0
for line in fr.readlines():
line = line.strip()
listFromLine = line.split('\t')
returnMat[index,:] = listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
return returnMat,classLabelVector
##
def createDataSet():
group = array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])
label =['A','A','B','B']
return label,group
def classify0(inX,dataSet,labels,k):
dataSetSize = dataSet.shape[0]
print 'dataSet.shape[1]',dataSet.shape[1]
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount={}
for a in range(k):
voteIlabel=labels[sortedDistIndicies[a]]
classCount[voteIlabel]=classCount.get(voteIlabel,0)+1
sortedClassCount = sorted(classCount.iteritems(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
def classify01(inX, dataSet, labels, k):
dataSetSize = dataSet.shape[0]
diffMat = tile(inX, (dataSetSize,1)) - dataSet
sqDiffMat = diffMat**2
sqDistances = sqDiffMat.sum(axis=1)
distances = sqDistances**0.5
sortedDistIndicies = distances.argsort()
classCount={}
for i in range(k):
voteIlabel = labels[sortedDistIndicies[i]]
classCount[voteIlabel] = classCount.get(voteIlabel,0) + 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
def autoNorm(datamingDataMat):
minValue = datamingDataMat.min(0)
maxValue = datamingDataMat.max(0)
#print minValue
#print maxValue
range = maxValue-minValue
print range
#print shape(datamingDataMat)
normDataSet = zeros(shape(datamingDataMat))
#print normDataSet
m = datamingDataMat.shape[0]
normDataSet = datamingDataMat-tile(minValue,(m,1))
normDataSet = datamingDataMat/tile(range,(m,1))
return normDataSet ,range,minValue
if __name__ == "__main__":
label,group = createDataSet()
print label
##pl.plot(group,'o')
##pl.show()
#classify0([0,0],group,label,3)
#读文件
#datamingDataMat,datingLabels = file2Matrix('E:\\数据挖掘资料\\《机器学习实战》源代码\\machinelearninginaction\\Ch02\\datingTestSet2.txt')
#
## print len(datingLabels)
## print datamingDataMat
## fig = plt.figure()
## ax = fig.add_subplot(111)
## ax.scatter(datamingDataMat[:,0],datamingDataMat[:,1],
## 15.0*array(datingLabels),15.0*array(datingLabels))
## plt.show()
## normMat,ranges,minValue =autoNorm(datamingDataMat)
##
## #print 'normorize:',normMat,'ranges:',ranges
##
## m = normMat.shape[0]
##
## print m
hoRatio = 0.10 #hold out 10%
datingDataMat,datingLabels = file2Matrix('E:\\数据挖掘资料\\《机器学习实战》源代码\\machinelearninginaction\\Ch02\\datingTestSet2.txt') #load data setfrom file
normMat, ranges, minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m*hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i,:],normMat[numTestVecs:m,:],datingLabels[numTestVecs:m],3)
print "the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i])
if (classifierResult != datingLabels[i]): errorCount += 1.0
print "the total error rate is: %f" % (errorCount/float(numTestVecs))
print errorCount
---------------------------------------------------------------------------------------
以下是java 代码的实现
public class tztKNN {