BRDFLafortuneFitting.Program.dfpmin C# (CSharp) Method

dfpmin() protected static method

Performs BFGS function minimzation on a quadratic form function evaluated by the provided delegate
protected static dfpmin ( double _Coefficients, double _ConvergenceTolerance, int &_PerformedIterationsCount, BFGSFunctionEval _FunctionEval, BFGSFunctionGradientEval _FunctionGradientEval, object _Params ) : double
_Coefficients double The array of initial coefficients (indexed from 1!!) that will also contain the resulting coefficients when the routine has converged
_ConvergenceTolerance double The tolerance error to accept as the minimum of the function
_PerformedIterationsCount int The amount of iterations performed to reach the minimum
_FunctionEval BFGSFunctionEval The delegate used to evaluate the function to minimize
_FunctionGradientEval BFGSFunctionGradientEval The delegate used to evaluate the gradient of the function to minimize
_Params object Some user params passed to the evaluation functions
return double
        protected static double dfpmin( double[] _Coefficients, double _ConvergenceTolerance, out int _PerformedIterationsCount, BFGSFunctionEval _FunctionEval, BFGSFunctionGradientEval _FunctionGradientEval, object _Params )
        {
            double		Minimum = double.MaxValue;
            int			n = _Coefficients.Length - 1;

            int			check,i,its,j;
            double		den,fac,fad,fae,fp,stpmax,sum=0.0,sumdg,sumxi,temp,test;

            double[]	dg = new double[1+n];
            double[]	g = new double[1+n];
            double[]	hdg = new double[1+n];
            double[][]	hessin = new double[1+n][];
            for ( i=1; i <= n; i++ )
                hessin[i] = new double[1+n];
            double[]	pnew = new double[1+n];
            double[]	xi = new double[1+n];

            // Initialize values
            fp = _FunctionEval( _Coefficients, _Params );
            _FunctionGradientEval( _Coefficients, g, _Params );

            for ( i=1; i <= n; i++ )
            {
                for ( j=1; j <= n; j++ )
                    hessin[i][j]=0.0;

                hessin[i][i] = 1.0;

                xi[i] = -g[i];
                sum += _Coefficients[i]*_Coefficients[i];
            }

            stpmax = STPMX * Math.Max( Math.Sqrt( sum ), n );
            for ( its=1; its <= ITMAX; its++ )
            {
                _PerformedIterationsCount = its;

                // The new function evaluation occurs in lnsrch
                lnsrch( n, _Coefficients, fp, g, xi, pnew, out Minimum, stpmax, out check, _FunctionEval, _Params );
                ENSUREVALID( _Coefficients );
                fp = Minimum;

                for ( i=1; i<=n; i++ )
                {
                    xi[i] = pnew[i] - _Coefficients[i];	// Update the line direction
                    _Coefficients[i] = pnew[i];			// as well as the current point
                }
                ENSUREVALID( _Coefficients );

                // Test for convergence on Delta X
                test = 0.0;
                for ( i=1; i <= n; i++ )
                {
                    temp = Math.Abs( xi[i] ) / Math.Max( Math.Abs( _Coefficients[i] ), 1.0 );
                    if ( temp > test )
                        test = temp;
                }

                if ( test < TOLX )
                    return Minimum;	// Done!

                // Save the old gradient
                for ( i=1; i <= n; i++ )
                    dg[i] = g[i];

                // Get the new one
                _FunctionGradientEval( _Coefficients, g, _Params );

                // Test for convergence on zero gradient
                test = 0.0;
                den = Math.Max( Minimum, 1.0 );
                for ( i=1; i <= n; i++ )
                {
                    temp = Math.Abs( g[i] ) * Math.Max( Math.Abs( _Coefficients[i] ), 1.0 ) / den;
                    if ( temp > test )
                        test = temp;
                }

                if ( test < _ConvergenceTolerance )
                    return Minimum;	// Done!

                // Compute difference of gradients
                for ( i=1; i <= n ; i++ )
                    dg[i] = g[i]-dg[i];

                // ...and difference times current hessian matrix
                for ( i=1; i <= n; i++ )
                {
                    hdg[i]=0.0;
                    for ( j=1; j <= n; j++ )
                        hdg[i] += hessin[i][j] * dg[j];
                }

                // Calculate dot products for the denominators
                fac = fae = sumdg = sumxi = 0.0;
                for ( i=1; i <= n; i++ )
                {
                    fac += dg[i] * xi[i];
                    fae += dg[i] * hdg[i];
                    sumdg += dg[i] * dg[i];
                    sumxi += xi[i] * xi[i];
                }

                if ( fac * fac > EPS * sumdg * sumxi )
                {
                    fac = 1.0 / fac;
                    fad = 1.0 / fae;

                    // The vector that makes BFGS different from DFP
                    for ( i=1; i <= n; i++ )
                        dg[i] = fac * xi[i] - fad * hdg[i];

                    // BFGS Hessian update formula
                    for ( i=1; i <= n; i++ )
                        for ( j=1; j <= n; j++ )
                            hessin[i][j] += fac * xi[i] * xi[j] -fad * hdg[i] * hdg[j] + fae * dg[i] * dg[j];
                }

                // Now, calculate the next direction to go
                for ( i=1; i <= n; i++ )
                {
                    xi[i] = 0.0;
                    for ( j=1; j <= n; j++ )
                        xi[i] -= hessin[i][j] * g[j];
                }
            }

            throw new Exception( "Too many iterations in dfpmin" );
        }