Accord.Imaging.FastRetinaKeypointDescriptor.Compute C# (CSharp) Method

Compute() public method

Describes the specified point (i.e. computes and sets the orientation and descriptor vector fields of the FastRetinaKeypoint.
public Compute ( IList points ) : void
points IList The point to be described.
return void
        public void Compute(IList<FastRetinaKeypoint> points)
        {
            const int CV_FREAK_SMALLEST_KP_SIZE = FastRetinaKeypointPattern.Size;
            const int CV_FREAK_NB_SCALES = FastRetinaKeypointPattern.Scales;
            const int CV_FREAK_NB_ORIENTATION = FastRetinaKeypointPattern.Orientations;

            var patternSizes = pattern.patternSizes;
            var pointsValues = pattern.pointsValues;
            var orientationPairs = pattern.orientationPairs;
            var descriptionPairs = pattern.descriptionPairs;
            var step = pattern.step;


            // used to save pattern scale index corresponding to each keypoints
            var scaleIndex = new List<int>(points.Count);
            for (int i = 0; i < points.Count; i++)
                scaleIndex.Add(0);


            // 1. Compute the scale index corresponding to the keypoint
            //  size and remove keypoints which are close to the border
            //
            if (IsScaleNormal)
            {
                for (int k = points.Count - 1; k >= 0; k--)
                {
                    // Is k non-zero? If so, decrement it and continue.
                    double ratio = points[k].Scale / CV_FREAK_SMALLEST_KP_SIZE;
                    scaleIndex[k] = Math.Max((int)(Math.Log(ratio) * step + 0.5), 0);

                    if (scaleIndex[k] >= CV_FREAK_NB_SCALES)
                        scaleIndex[k] = CV_FREAK_NB_SCALES - 1;

                    // Check if the description at this position and scale fits inside the image
                    if (points[k].X <= patternSizes[scaleIndex[k]] ||
                         points[k].Y <= patternSizes[scaleIndex[k]] ||
                         points[k].X >= Image.Width - patternSizes[scaleIndex[k]] ||
                         points[k].Y >= Image.Height - patternSizes[scaleIndex[k]])
                    {
                        points.RemoveAt(k);  // No, it doesn't. Remove the point.
                        scaleIndex.RemoveAt(k);
                    }
                }
            }

            else // if (!IsScaleNormal)
            {
                int scale = Math.Max((int)(Constants.Log3 * step + 0.5), 0);

                for (int k = points.Count - 1; k >= 0; k--)
                {
                    // equivalent to the formulae when the scale is normalized with 
                    // a constant size of keypoints[k].size = 3 * SMALLEST_KP_SIZE

                    scaleIndex[k] = scale;
                    if (scaleIndex[k] >= CV_FREAK_NB_SCALES)
                        scaleIndex[k] = CV_FREAK_NB_SCALES - 1;

                    if (points[k].X <= patternSizes[scaleIndex[k]] ||
                        points[k].Y <= patternSizes[scaleIndex[k]] ||
                        points[k].X >= Image.Width - patternSizes[scaleIndex[k]] ||
                        points[k].Y >= Image.Height - patternSizes[scaleIndex[k]])
                    {
                        points.RemoveAt(k);
                        scaleIndex.RemoveAt(k);
                    }
                }
            }


            // 2. Allocate descriptor memory, estimate 
            //    orientations, and extract descriptors
            //

            // For each interest (key/corners) point
            for (int k = 0; k < points.Count; k++)
            {
                int thetaIndex = 0;

                // Estimate orientation
                if (!IsOrientationNormal)
                {
                    // Orientation is not normalized, assign 0.
                    points[k].Orientation = thetaIndex = 0;
                }

                else // if (IsOrientationNormal)
                {
                    // Get intensity values in the unrotated patch
                    for (int i = 0; i < pointsValues.Length; i++)
                        pointsValues[i] = mean(points[k].X, points[k].Y, scaleIndex[k], 0, i);

                    int a = 0, b = 0;
                    for (int m = 0; m < orientationPairs.Length; m++)
                    {
                        var p = orientationPairs[m];
                        int delta = (pointsValues[p.i] - pointsValues[p.j]);
                        a += delta * (p.weight_dx) / 2048;
                        b += delta * (p.weight_dy) / 2048;
                    }

                    points[k].Orientation = Math.Atan2(b, a) * (180.0 / Math.PI);
                    thetaIndex = (int)(CV_FREAK_NB_ORIENTATION * points[k].Orientation * (1 / 360.0) + 0.5);

                    if (thetaIndex < 0) // bound in interval
                        thetaIndex += CV_FREAK_NB_ORIENTATION;
                    if (thetaIndex >= CV_FREAK_NB_ORIENTATION)
                        thetaIndex -= CV_FREAK_NB_ORIENTATION;
                }

                // Extract descriptor at the computed orientation
                for (int i = 0; i < pointsValues.Length; i++)
                    pointsValues[i] = mean(points[k].X, points[k].Y, scaleIndex[k], thetaIndex, i);


                // Extract either the standard descriptors of 512-bits (64 bytes)
                //   or the extended descriptors of 1024-bits (128 bytes) length.
                //
                if (!Extended)
                {
                    points[k].Descriptor = new byte[64];
                    for (int m = 0; m < descriptionPairs.Length; m++)
                    {
                        var p = descriptionPairs[m];
                        byte[] descriptor = points[k].Descriptor;

                        unchecked
                        {
                            if (pointsValues[p.i] > pointsValues[p.j])
                                descriptor[m / 8] |= (byte)(1 << m % 8);
                            else descriptor[m / 8] &= (byte)~(1 << m % 8);
                        }

                    }
                }

                else // if (Extended)
                {
                    points[k].Descriptor = new byte[128];
                    for (int i = 1, m = 0; i < pointsValues.Length; i++)
                    {
                        for (int j = 0; j < i; j++, m++)
                        {
                            byte[] descriptor = points[k].Descriptor;

                            unchecked
                            {
                                if (pointsValues[i] > pointsValues[j])
                                    descriptor[m / 8] |= (byte)(1 << m % 8);
                                else descriptor[m / 8] &= (byte)~(1 << m % 8);
                            }
                        }
                    }
                }
            }
        }

Usage Example

コード例 #1
0
        /// <summary>
        ///   Process image looking for interest points.
        /// </summary>
        ///
        /// <param name="image">Source image data to process.</param>
        ///
        /// <returns>Returns list of found interest points.</returns>
        ///
        public List <FastRetinaKeypoint> ProcessImage(UnmanagedImage image)
        {
            // check image format
            if (
                (image.PixelFormat != PixelFormat.Format8bppIndexed) &&
                (image.PixelFormat != PixelFormat.Format24bppRgb) &&
                (image.PixelFormat != PixelFormat.Format32bppRgb) &&
                (image.PixelFormat != PixelFormat.Format32bppArgb)
                )
            {
                throw new UnsupportedImageFormatException("Unsupported pixel format of the source image.");
            }

            // make sure we have grayscale image
            if (image.PixelFormat == PixelFormat.Format8bppIndexed)
            {
                grayImage = image;
            }
            else
            {
                // create temporary grayscale image
                grayImage = Grayscale.CommonAlgorithms.BT709.Apply(image);
            }


            // 1. Extract corners points from the image.
            List <IntPoint> corners = Detector.ProcessImage(grayImage);

            var features = new List <FastRetinaKeypoint>();

            for (int i = 0; i < corners.Count; i++)
            {
                features.Add(new FastRetinaKeypoint(corners[i].X, corners[i].Y));
            }


            // 2. Compute the integral for the given image
            integral = IntegralImage.FromBitmap(grayImage);


            // 3. Compute feature descriptors if required
            descriptor = null;
            if (featureType != FastRetinaKeypointDescriptorType.None)
            {
                descriptor = GetDescriptor();
                descriptor.Compute(features);
            }

            return(features);
        }
All Usage Examples Of Accord.Imaging.FastRetinaKeypointDescriptor::Compute