LWR 局部加权线性回归算法

看了机器学习的第三课,实现了一下LWR算法。
#include<iostream>
using namespace std;

const int Number = 6;
const int Dimesion = 3;
const float learningRate=0.001;     
const float errorThr=1; //variance threshold
const int MAX=1000;     //Max times of iteration

typedef struct Data{
	float vectorComponent[Dimesion];
}vectorData;

vectorData x[Number] = {
   /* {1,1,4},
    {1,2,5},
    {1,5,1},
    {1,4,2},*/
	{1,1,1},
    {1,1,3},
    {1,1,2},
    {1,2,3},
	{1,2,1},
	{1,2,2},
};
float y[Number]={2,10,5,13,5,8};
/lwr(局部线性回归)
float weightValue(vectorData xi,vectorData x){
	float weight = 0.0;
	for(int i=0;i<Dimesion;i++){
		weight+=pow(xi.vectorComponent[i]-x.vectorComponent[i],2);
	}
	float tempWeight = exp(-(weight/(2*36)));
	if(tempWeight<0.02)
		tempWeight = 0.0;
	return tempWeight;
}

float multiPly(vectorData x1,vectorData x2){
	float temp = 0.0;
	for(int i=0;i<Dimesion;i++){
		temp += x1.vectorComponent[i]*x2.vectorComponent[i];
	}
	return temp;
}

vectorData addVectorData(vectorData x1,vectorData x2){
	vectorData temp;
	for(int i=0;i<Dimesion;i++)
		temp.vectorComponent[i] = x1.vectorComponent[i]+x2.vectorComponent[i];
	return temp;
}

vectorData minusVectorData(vectorData x1,vectorData x2){
	vectorData temp;
	for(int i=0;i<Dimesion;i++)
		temp.vectorComponent[i] = x1.vectorComponent[i]-x2.vectorComponent[i];
	return temp;
}

vectorData numberMultiVectorData(float para,vectorData x1){
	vectorData temp;
	for(int i=0;i<Dimesion;i++)
		temp.vectorComponent[i] = x1.vectorComponent[i]*para;
	return temp;
}
float costFunction(vectorData parameter[],vectorData inputData[],float inputResultData[],vectorData object){
	float costValue = 0.0;
	float tempValue = 0.0;
	float weightedValue = 0.0;
	for(int i=0;i<Number;i++){
		tempValue = 0.0;
		
		//consider all the parameters although most of them is zero
		for(int j=0;j<Number;j++)
			tempValue += multiPly(parameter[j],inputData[i]);
		costValue += weightValue(inputData[i],object)*pow((inputResultData[i]-tempValue),2);	
	}

	return (costValue/2*4);
}


int LocallyWeightedAgression(vectorData parameter[],vectorData inputData[],float resultData[],vectorData objectVector){
	float tempValue = 0.0;
	float errorCost = 0.0;
	float weightedValue = 0.0;
	errorCost=costFunction(parameter,inputData,resultData,objectVector);
	if(errorCost<errorThr)
		return 1;
	for(int iteration=0;iteration<MAX;iteration++){

		//stochastic
		for(int i=0;i<Number;i++){
			//calculate the h(x)
			weightedValue = weightValue(inputData[i],objectVector);
			tempValue=0.0;
			for(int j=0;j<Number;j++)
				tempValue+=multiPly(parameter[j],inputData[i]);
			//update the parameter by stochastic(随机梯度下降)
			printf("the next parameter is ");
			for(int ii=0;ii<Number;ii++){
				parameter[ii] = addVectorData(parameter[ii],numberMultiVectorData(weightedValue*learningRate*(resultData[i]-tempValue),inputData[i]));
				if(multiPly(parameter[ii],parameter[ii])!=0){
					for(int jj=0;jj<Dimesion;jj++){
						printf("%f ",parameter[ii].vectorComponent[jj]);
					}
				}
			}
			printf("\n");
			errorCost=costFunction(parameter,inputData,resultData,objectVector);
			printf("error cost is %f\n",errorCost);
			if(errorCost<errorThr)
				break;
		}//end stochastic one time

	}//end when the iteration becomes MAX 

	//calculate the object vector
	float resultValue = 0.0;
	for(int i=0;i<Number;i++){
		resultValue += weightValue(inputData[i],objectVector)*multiPly(parameter[i],objectVector);
	}
	printf("result value is %f \n",resultValue);
	return 1;
}

int testLWA(){
	vectorData objectData = {1,1.5,1.5};
	vectorData localParameter[Number] = {0.0};
	LocallyWeightedAgression(localParameter,x,y,objectData);
	return 1;
}
int main(){
  //  DescendAlgorithm(parameter,x,y);
//	system("pause");
	//clearParameter(parameter);
	//Stochastic(parameter,x,y);
	//float ForTestData[] = {1,10,20};
	//testData(ForTestData);
	testLWA();
    system("pause");
    return 1;
}

设定的目标函数是平面方程 z=x^2+y^2
实验数据是1.5,1.5
计算结果是 5.124

你可能感兴趣的:(LWR 局部加权线性回归算法)