梯度下降代码

<pre name="code" class="cpp">#include<iostream>
using namespace std;

const int Number = 5;
const int Dimesion = 3;
const float learningRate=0.001;     
const float errorThr=1; //variance threshold
const int MAX=1000;     //Max times of iteration
float x[Number][Dimesion]={
    {1,1,4},
    {1,2,5},
    {1,5,1},
    {1,4,2},
};
float y[Number]={19,26,19,20};

float parameter[Dimesion] = {0,0,0};

//calculate the costFunction based on the present papramer
float costFunction(float *parameter,float inputData[][3]){
    float costValue = 0.0;
    float tempValue = 0.0;
    for(int i=0;i<Number;i++){
        tempValue = 0.0;
        for(int j=0;j<Dimesion;j++){
            tempValue += (parameter[j]*inputData[i][j]);
        }
	 costValue+=(tempValue-y[i])*(tempValue-y[i]);
    }
    return (costValue/2*Number);
}

//change the parameter by partial deriative
int changeParameter(float *parameter,float inputData[][3],float inputResultData[]){
    float tempValue = 0.0;
	float tempParameter[Dimesion];
	//store the parameter,because the parameter is keeping changing while update
	for(int i=0;i<Dimesion;i++)	
		tempParameter[i] = parameter[i];
   
	for(int i=0;i<Dimesion;i++){

        for(int j=0;j<Number;j++){
            tempValue = 0.0;
			//calculate the h(x)
            for(int ii=0;ii<Dimesion;ii++){
                tempValue += tempParameter[ii]*input[j][ii];
            }
			//update parameter
            parameter[i]+=learningRate*(inputResultData[j]-tempValue)*input[j][i];
        }//end updating one dimesion of parameter
    }
    return 1;
}

//batch gradient descent
float DescendAlgorithm(float *parameter,float inputData[][3],float inputResultData[]){
    float errorCost = 0.0;
    for(int i=0;i<MAX;i++){
        errorCost = 0.0;
		
		//tempX = &x;
        errorCost = costFunction(parameter,inputData);
        if(errorCost<errorThr)
            break;
		printf("now the error is %f\n",errorCost);
        changeParameter(parameter,inputData,inputResultData);
        printf("the next parameter is ");
        for(int i=0;i<Dimesion;i++){
            printf(" %f ",parameter[i]);
        }
        printf("\n");
    }
	return 1;
}

//随机梯度下降
int Stochastic(float *parameter,float inputData[][3],float inputResult[]){
	float tempValue = 0.0;
	float errorCost = 0.0;
	errorCost=costFunction(parameter,inputData);
	if(errorCost<errorThr)
		return 1;
	for(int iteration=0;iteration<MAX;iteration++){
		
		//stochastic
		for(int i=0;i<Number;i++){
			//calculate the h(x)
			tempValue=0.0;
			for(int ii=0;ii<Dimesion;ii++){
				tempValue+=inputData[i][ii]*parameter[ii];
			}
			//update the parameter by stochastic(随机梯度下降)
			printf("the next parameter is ");
			for(int j=0;j<Dimesion;j++){
				parameter[j] += (inputResult[i]-tempValue)*learningRate*inputData[i][j];
				printf("%f ",parameter[j]);
			}
			printf("\n");
			errorCost=costFunction(parameter,inputData);
			printf("error cost is %f\n",errorCost);
			if(errorCost<errorThr)
				break;
		}//end stochastic one time

	}//end when the iteration becomes MAX 
	return 1;
}
float testData(float *inputData){
	float resultData = 0;
	for(int i=0;i<Dimesion;i++){
		resultData+=parameter[i]*inputData[i];
	}
	printf("result of test data is %f\n",resultData);
	return 1;
}

int clearParameter(float parameter[]){
	for(int i=0;i<Dimesion;i++)
		parameter[i]=0;
	return 1;
}

int main(){
    DescendAlgorithm(parameter,x,y);
	system("pause");
	clearParameter(parameter);
	Stochastic(parameter,x,y);
	float ForTestData[] = {1,10,20};
	testData(ForTestData);
    system("pause");
    return 1;
}


 按照机器学习公开课的第一个讲义写的,那个讲的很清楚。官网可以下讲义。 
  
梯度下降是回归分析的基础内容,顺便写写。
梯度上升可以估计最大值,区别只在于改变参数的时候改减为加。自己的理解

你可能感兴趣的:(梯度下降代码)