开发者

Eye Detection Using Opencv

I am working on object detection (eyes) using OpenCV. Following is the code as it is not able to identify an eye object (exactly or near by). Can anyone help me solve this problem?

if(imageView.image) {
  cvSetErrMode(CV_ErrModeParent);

  IplImage *image = [self CreateIplImageFromUIImage:imageView.image];

  // Scaling down
  IplImage *small_image = cvCreateImage(cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3);
  cvPyrDown(image, small_image, CV_GAUSSIAN_5x5);
  int scale = 2;

  // Load XML
  NSString *path1=[[NSBundle mainBundle] pathForResource:@"haarcascade_eye" ofType:@"xml"];
  NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
  CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
  CvHaarClassifierCascade* cascade1= (CvHaarClassifierCascade*)cvLoad([path1 cStringUsingEn开发者_高级运维coding:NSASCIIStringEncoding], NULL, NULL,NULL);
  CvMemStorage* storage = cvCreateMemStorage(0);

  // Detect faces and draw rectangle on them
  CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage, 1.2f, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(20, 20));
  cvReleaseImage(&small_image);

  // Create canvas to show the results
  CGImageRef imageRef = imageView.image.CGImage;
  CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
  CGContextRef contextRef = CGBitmapContextCreate(NULL, imageView.image.size.width, imageView.image.size.height,
              8, imageView.image.size.width * 4,
              colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
  CGContextDrawImage(contextRef, CGRectMake(0, 0, imageView.image.size.width, imageView.image.size.height), imageRef);

  CGContextSetLineWidth(contextRef, 4);
  CGContextSetRGBStrokeColor(contextRef, 0.0, 0.0, 1.0, 0.5);

  CvRect cvrect;

  // Draw results on the iamge
  for(int i = 0; i < faces->total; i++)
  {
   NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];

   // Calc the rect of faces
   cvrect = *(CvRect*)cvGetSeqElem(faces, i);
   CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));

   if(overlayImage) 
   {
    CGContextDrawImage(contextRef, face_rect, overlayImage.CGImage);
   }
   else
   {
    CGContextStrokeRect(contextRef, face_rect);
   }
   [pool release];
  }

  cvClearMemStorage(storage);

   // cvSetImageROI(image,cvRect((cvrect.x * scale),(cvrect.y * (scale +((cvrect.height * scale)/5.5))), (cvrect.width * scale), (cvrect.height * scale)/3.0));
  cvSetImageROI(image, cvRect(80,100,300,300));
  CvSeq* eyes=cvHaarDetectObjects(image, cascade1, storage, 1.15, 3, 0, cvSize(25, 15));
  for(int i=0;i<eyes->total;i++)
  {
   NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
   CvRect  cvrect= *(CvRect*)cvGetSeqElem(eyes, i);
       // cvRectangle(img,cvPoint(cvrect.x * scale, cvrect.y * scale),cvPoint(cvrect.x * scale + cvrect.width * scale, cvrect.y * scale+cvrect.height * scale);
   CGRect eyes_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));    
   if(overlayImage) {
   CGContextDrawImage(contextRef, eyes_rect, overlayImage.CGImage);
   }
   else
   {
   CGContextStrokeRect(contextRef, eyes_rect);
   }
   [pool release];
  }
      cvResetImageROI(image);




  imageView.image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(contextRef)];
  CGContextRelease(contextRef);
  CGColorSpaceRelease(colorSpace);


  cvReleaseMemStorage(&storage);
  cvReleaseHaarClassifierCascade(&cascade);
  //int i;

  [self hideProgressIndicator];
 }

}


this seems based on facedetect sample code included with opencv 2.1 The xml file you are using is included in the data directory of the distro.

I am not certain how scale invariant it is. I suggest you change the scale to get it to work for your input image scale.

Have a look at the python sample code facedetect.py for some clues


set face_rect frame while passing parameters for setImageRIO

CGRect face_rect;

// Draw results on the iamge
for(int i = 0; i < faces->total; i++) {
    NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];

    // Calc the rect of faces
    CvRect cvrect = *(CvRect*)cvGetSeqElem(faces, i);

     face_rect = CGContextConvertRectToDeviceSpace(contextRef, 
                            CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
    CGContextStrokeRect(contextRef, face_rect);

    [pool release];


}

cvClearMemStorage(storage);

cvSetImageROI(image, cvRect(face_rect.origin.x,face_rect.origin.y,face_rect.size.width,face_rect.size.height));

CvSeq *eye  =   cvHaarDetectObjects(small_image, cascade, storage,1.1, 3, 0, cvSize(10, 10));

for(int i=0;i<eye->total;i++)
{
    NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
    CvRect  cvrect= *(CvRect*)cvGetSeqElem(eye, i);

    CGRect eyes_rect;

    CGContextSetRGBStrokeColor(contextRef, 1.1, 1.0, 0.0, 0.5);
        //left eye detection
        eyes_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake((cvrect.x *scale + 20), (cvrect.y * scale)+(cvrect.height - 30 ) , cvrect.width - 30, 40 )); 

        CGContextStrokeRect(contextRef, eyes_rect);

        //right eye detection
    CGRect eyes_Right   =   CGContextConvertRectToDeviceSpace(contextRef, CGRectMake((cvrect.x *scale + cvrect.width+ 10), (cvrect.y * scale)+(cvrect.height - 30 ) , cvrect.width - 30, 40 ));

        CGContextStrokeRect(contextRef, eyes_Right);    

    [pool release];
}
0

上一篇:

下一篇:

精彩评论

暂无评论...
验证码 换一张
取 消

最新问答

问答排行榜