.net 插件式开发——实现web框架中大数据算法嵌入(BP算法逼近)

 

  1. 关于算法的引入:插件式架构设计,可移植性强,利于算法的升级。

【插件式开发相关资料】https://www.cnblogs.com/lenic/p/4129096.html

  • 以BP算法为例:

1、首先定义一个接口规范

    /// 
    ///         //插件的统一入口
    /// 
    public interface IPluginPerfrom
    {
        /// 
        /// 统一算法插件入口
        /// 
        /// 输出参数的个数
        /// 输出参数
        /// 输入参数
        /// 
        string ExcuteAlgorithmPlug(int argsOutNumber, ref string[,] argsOut, string[,] argsIn, string DBConnectionString = null);
    }

  2、BP算法实现接口

// AForge Framework
// Approximation using Mutli-Layer Neural Network
//
// Copyright ?Andrew Kirillov, 2006
// [email protected]
//

using System;
using System.Drawing;
using System.Collections;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Windows.Forms;

using AForge;
using AForge.Neuro;
using AForge.Neuro.Learning;
using AForge.Controls;
using IPlugin;

using Newtonsoft.Json;
using System.Linq;
using System.Text;  



namespace BP
{
    /// 
    /// Summary description for Form1.
    /// 

    public class BP : IPluginPerfrom
    {
        //Chart chart = new Chart();
        //实现统一接口
        public string ExcuteAlgorithmPlug(int argsOutNumber, ref string[,] argsOut, string[,] argsIn, string DBConnectionString = null)
        {
            List resLists = new List();
            string dataStr = argsIn[0, 0]; //Json格式的数据
            List dht11Lists = dataStr.ToList();
            double[,] arr = new double[dht11Lists.Count+1, 2];


            for (int i = 0; i 
        /// 开始预测
        /// 
        /// 学习速率
        /// 
        /// alpha值
        /// 神经元的个数
        /// 迭代次数
        /// 
        private void startApproximation(string learningRateRow, string momentumRow, string alphaRaw, string neuronsRow, string iterationsRow)
        {
            // get learning rate
            #region 神经网络参数
            try
            {
                learningRate = Math.Max(0.00001, Math.Min(1, double.Parse(learningRateRow)));
            }
            catch
            {
                learningRate = 0.1;
            }
            // get momentum
            try
            {
                momentum = Math.Max(0, Math.Min(0.5, double.Parse(momentumRow)));
            }
            catch
            {
                momentum = 0;
            }
            // get sigmoid's alpha value
            try
            {
                sigmoidAlphaValue = Math.Max(0.001, Math.Min(50, double.Parse(alphaRaw)));
            }
            catch
            {
                sigmoidAlphaValue = 2;
            }
            // get neurons count in first layer
            try
            {
                neuronsInFirstLayer = Math.Max(5, Math.Min(50, int.Parse(neuronsRow)));
            }
            catch
            {
                neuronsInFirstLayer = 20;
            }
            // iterations
            try
            {
                iterations = Math.Max(0, int.Parse(iterationsRow));
            }
            catch
            {
                iterations = 1000;
            }
            #endregion
            needToStop = false;
            //double[,] solution =  SearchSolution();
          //  return data;
        }
        #endregion

        public double[,] SearchSolution(double[,] data)
        {
                // number of learning samples
                int samples = data.GetLength(0);
                
                // data transformation factor

               double maxX = Caculate.getMax(data,0);
               double minX = Caculate.getMin(data, 0);
               double LengthX = maxX - minX;

               double maxY = Caculate.getMax(data, 1);
               double minY = Caculate.getMin(data, 1);
                double LengthY = maxY - minY;

                double yFactor = 1.7 / LengthY; //ymax-ymin
                double yMin = minY;
                double xFactor = 2.0 / LengthX;
                double xMin = minX;

                // prepare learning data
                double[][] input = new double[samples][];
                double[][] output = new double[samples][];

                for (int i = 0; i < samples; i++)
                {
                    input[i] = new double[1];
                    output[i] = new double[1];

                    // set input
                    input[i][0] = (data[i, 0] - xMin) * xFactor - 1.0;
                    // set output
                    output[i][0] = (data[i, 1] - yMin) * yFactor - 0.85;
                }

                // create multi-layer neural network
                ActivationNetwork network = new ActivationNetwork(
                    new BipolarSigmoidFunction(sigmoidAlphaValue),
                    1, neuronsInFirstLayer, 1);
                // create teacher
                BackPropagationLearning teacher = new BackPropagationLearning(network);
                // set learning rate and momentum
                teacher.LearningRate = learningRate;
                teacher.Momentum = momentum;

                // iterations
                int iteration = 1;

                // solution array
                double[,] solution = new double[data.GetLength(0)+1, 2];
                double[] networkInput = new double[1];

                // calculate X values to be used with solution function
                for (int j = 0; j < data.GetLength(0) + 1; j++)
                {
                    solution[j, 0] = minX + (double)j * LengthY / data.GetLength(0) + 1;
                }

                // loop
                while (!needToStop)
                {
                    // run epoch of learning procedure:学习过程的运行过程
                    double error = teacher.RunEpoch(input, output) / samples;

                    // calculate solution:预测
                    for (int j = 0; j < data.GetLength(0) + 1; j++)
                    {
                        networkInput[0] = (solution[j, 0] - xMin) * xFactor - 1.0;
                        solution[j, 1] = (network.Compute(networkInput)[0] + 0.85) / yFactor + yMin;
                    }

                    // calculate error
                    double learningError = 0.0;
                    for (int j = 0, k = data.GetLength(0); j < k; j++)
                    {
                        networkInput[0] = input[j][0];
                        learningError += Math.Abs(data[j, 1] - ((network.Compute(networkInput)[0] + 0.85) / yFactor + yMin));
                    }

                    // increase current iteration
                    iteration++;

                    // check if we need to stop
                    if ((iterations != 0) && (iteration > iterations))
                        break;

                }
                return solution;
        }
    }
}

  注:以上的BP算法为BP算法的函数逼近,下一步是需要将BP的学习训练网络与预测过程分离,即实时进行学习训练,按任务的指定进行预测。

【BP通过反向传递误差来调整网络参数】

BP函数逼近算法通过最速下降法,通过反向传播不断调整网络的权值和阈值,不断地降低网络的误差,使得误差平方和最小。

BP神经网络模型包括输入层、隐含层和输出层。 输入层各神经网络负责接收来自外界的输入信息,并传递给中间层各神经元;中间层是内部信息处理层,负责信息变换,根据信息变化能力的需求。最后中间层可将信息传递到输出层。输出层输出期望值,经过一次迭代后,发现输出值与预期值的误差太大,则再次进入误差的反向传播过程。重复以上过程,调整各层的权值,知道BP网络的输出值的误差在可接受的范围内,则停止训练。网络学习过程结束。

 

【BP算法的具体步骤】

1、前馈计算

设置隐层的j个节点的输入和输出 I = f(Wij),O=f(Ij),其中f(Ij)为激励函数。

2、权值调整

设置误差函数Ep

调整输出层的权值

调整隐层的权值

 

转载于:https://www.cnblogs.com/Erma/p/9303992.html

你可能感兴趣的:(json,人工智能)