/// <summary>
/// Detector
/// </summary>
/// <param name="inputTexture">Input Unity texture</param>
/// <param name="texParams">Texture parameters (flipped, rotated etc.)</param>
/// <param name="detect">Flag signalling whether we need detection on this frame</param>
public virtual Point[] ProcessTexture(T texture, Unity.TextureConversionParams texParams)
{
// convert Unity texture to OpenCv::Mat
ImportTexture(texture, texParams);
DataStabilizer.ThresholdFactor = 1;
// convert to grayscale and normalize
Mat gray = new Mat();
Cv2.CvtColor(processingImage, gray, ColorConversionCodes.BGR2GRAY);
Cv2.Blur(gray, gray, new Size(10, 10));
// fix shadows
// Cv2.EqualizeHist(gray, gray);
// detect matching regions (faces bounding)
Rect[] rawFaces = cascadeFaces.DetectMultiScale(gray, 1.2, 6);
if (Faces.Count != rawFaces.Length)
{
Faces.Clear();
}
// now per each detected face draw a marker and detect eyes inside the face rect
int facesCount = 0;
Point[] maxFace = lastFace;
double maxFaceSize = 0;
for (int i = 0; i < rawFaces.Length; ++i)
{
Rect faceRect = rawFaces[i];
using (Mat grayFace = new Mat(gray, faceRect))
{
// another trick: confirm the face with eye detector, will cut some false positives
if (cutFalsePositivesWithEyesSearch && null != cascadeEyes)
{
Rect[] eyes = cascadeEyes.DetectMultiScale(grayFace);
if (eyes.Length == 0 || eyes.Length > 2)
{
continue;
}
}
// get face object
Demo.DetectedFace face = null;
if (Faces.Count < i + 1)
{
face = new Demo.DetectedFace(DataStabilizer, faceRect);
Faces.Add(face);
}
else
{
face = Faces[i];
face.SetRegion(faceRect);
}
// shape
facesCount++;
if (null != shapeFaces)
{
Point[] marks = shapeFaces.DetectLandmarks(gray, faceRect);
// we have 68-point predictor
if (marks.Length == 68)
{
double size = Point.DistancePow2(marks[0], marks[16]);
if (size > maxFaceSize)
{
maxFaceSize = size;
maxFace = marks;
}
}
}
}
}
if (maxFaceSize != 0)
{
lastFace = maxFace;
}
return(maxFace);
}