使用 Opencv 进行眼睛检测
我正在使用 OpenCV 进行物体检测(眼睛)。以下是代码,因为它无法识别眼睛物体(准确或附近)。谁能帮我解决这个问题吗?
if(imageView.image) {
cvSetErrMode(CV_ErrModeParent);
IplImage *image = [self CreateIplImageFromUIImage:imageView.image];
// Scaling down
IplImage *small_image = cvCreateImage(cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3);
cvPyrDown(image, small_image, CV_GAUSSIAN_5x5);
int scale = 2;
// Load XML
NSString *path1=[[NSBundle mainBundle] pathForResource:@"haarcascade_eye" ofType:@"xml"];
NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
CvHaarClassifierCascade* cascade1= (CvHaarClassifierCascade*)cvLoad([path1 cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL,NULL);
CvMemStorage* storage = cvCreateMemStorage(0);
// Detect faces and draw rectangle on them
CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage, 1.2f, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(20, 20));
cvReleaseImage(&small_image);
// Create canvas to show the results
CGImageRef imageRef = imageView.image.CGImage;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef contextRef = CGBitmapContextCreate(NULL, imageView.image.size.width, imageView.image.size.height,
8, imageView.image.size.width * 4,
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, imageView.image.size.width, imageView.image.size.height), imageRef);
CGContextSetLineWidth(contextRef, 4);
CGContextSetRGBStrokeColor(contextRef, 0.0, 0.0, 1.0, 0.5);
CvRect cvrect;
// Draw results on the iamge
for(int i = 0; i < faces->total; i++)
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
// Calc the rect of faces
cvrect = *(CvRect*)cvGetSeqElem(faces, i);
CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
if(overlayImage)
{
CGContextDrawImage(contextRef, face_rect, overlayImage.CGImage);
}
else
{
CGContextStrokeRect(contextRef, face_rect);
}
[pool release];
}
cvClearMemStorage(storage);
// cvSetImageROI(image,cvRect((cvrect.x * scale),(cvrect.y * (scale +((cvrect.height * scale)/5.5))), (cvrect.width * scale), (cvrect.height * scale)/3.0));
cvSetImageROI(image, cvRect(80,100,300,300));
CvSeq* eyes=cvHaarDetectObjects(image, cascade1, storage, 1.15, 3, 0, cvSize(25, 15));
for(int i=0;i<eyes->total;i++)
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
CvRect cvrect= *(CvRect*)cvGetSeqElem(eyes, i);
// cvRectangle(img,cvPoint(cvrect.x * scale, cvrect.y * scale),cvPoint(cvrect.x * scale + cvrect.width * scale, cvrect.y * scale+cvrect.height * scale);
CGRect eyes_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
if(overlayImage) {
CGContextDrawImage(contextRef, eyes_rect, overlayImage.CGImage);
}
else
{
CGContextStrokeRect(contextRef, eyes_rect);
}
[pool release];
}
cvResetImageROI(image);
imageView.image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(contextRef)];
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
cvReleaseMemStorage(&storage);
cvReleaseHaarClassifierCascade(&cascade);
//int i;
[self hideProgressIndicator];
}
}
I am working on object detection (eyes) using OpenCV. Following is the code as it is not able to identify an eye object (exactly or near by). Can anyone help me solve this problem?
if(imageView.image) {
cvSetErrMode(CV_ErrModeParent);
IplImage *image = [self CreateIplImageFromUIImage:imageView.image];
// Scaling down
IplImage *small_image = cvCreateImage(cvSize(image->width/2,image->height/2), IPL_DEPTH_8U, 3);
cvPyrDown(image, small_image, CV_GAUSSIAN_5x5);
int scale = 2;
// Load XML
NSString *path1=[[NSBundle mainBundle] pathForResource:@"haarcascade_eye" ofType:@"xml"];
NSString *path = [[NSBundle mainBundle] pathForResource:@"haarcascade_frontalface_default" ofType:@"xml"];
CvHaarClassifierCascade* cascade = (CvHaarClassifierCascade*)cvLoad([path cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL, NULL);
CvHaarClassifierCascade* cascade1= (CvHaarClassifierCascade*)cvLoad([path1 cStringUsingEncoding:NSASCIIStringEncoding], NULL, NULL,NULL);
CvMemStorage* storage = cvCreateMemStorage(0);
// Detect faces and draw rectangle on them
CvSeq* faces = cvHaarDetectObjects(small_image, cascade, storage, 1.2f, 2, CV_HAAR_DO_CANNY_PRUNING, cvSize(20, 20));
cvReleaseImage(&small_image);
// Create canvas to show the results
CGImageRef imageRef = imageView.image.CGImage;
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
CGContextRef contextRef = CGBitmapContextCreate(NULL, imageView.image.size.width, imageView.image.size.height,
8, imageView.image.size.width * 4,
colorSpace, kCGImageAlphaPremultipliedLast|kCGBitmapByteOrderDefault);
CGContextDrawImage(contextRef, CGRectMake(0, 0, imageView.image.size.width, imageView.image.size.height), imageRef);
CGContextSetLineWidth(contextRef, 4);
CGContextSetRGBStrokeColor(contextRef, 0.0, 0.0, 1.0, 0.5);
CvRect cvrect;
// Draw results on the iamge
for(int i = 0; i < faces->total; i++)
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
// Calc the rect of faces
cvrect = *(CvRect*)cvGetSeqElem(faces, i);
CGRect face_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
if(overlayImage)
{
CGContextDrawImage(contextRef, face_rect, overlayImage.CGImage);
}
else
{
CGContextStrokeRect(contextRef, face_rect);
}
[pool release];
}
cvClearMemStorage(storage);
// cvSetImageROI(image,cvRect((cvrect.x * scale),(cvrect.y * (scale +((cvrect.height * scale)/5.5))), (cvrect.width * scale), (cvrect.height * scale)/3.0));
cvSetImageROI(image, cvRect(80,100,300,300));
CvSeq* eyes=cvHaarDetectObjects(image, cascade1, storage, 1.15, 3, 0, cvSize(25, 15));
for(int i=0;i<eyes->total;i++)
{
NSAutoreleasePool * pool = [[NSAutoreleasePool alloc] init];
CvRect cvrect= *(CvRect*)cvGetSeqElem(eyes, i);
// cvRectangle(img,cvPoint(cvrect.x * scale, cvrect.y * scale),cvPoint(cvrect.x * scale + cvrect.width * scale, cvrect.y * scale+cvrect.height * scale);
CGRect eyes_rect = CGContextConvertRectToDeviceSpace(contextRef, CGRectMake(cvrect.x * scale, cvrect.y * scale, cvrect.width * scale, cvrect.height * scale));
if(overlayImage) {
CGContextDrawImage(contextRef, eyes_rect, overlayImage.CGImage);
}
else
{
CGContextStrokeRect(contextRef, eyes_rect);
}
[pool release];
}
cvResetImageROI(image);
imageView.image = [UIImage imageWithCGImage:CGBitmapContextCreateImage(contextRef)];
CGContextRelease(contextRef);
CGColorSpaceRelease(colorSpace);
cvReleaseMemStorage(&storage);
cvReleaseHaarClassifierCascade(&cascade);
//int i;
[self hideProgressIndicator];
}
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(2)
这似乎基于 opencv 2.1 中包含的 FaceDetect 示例代码
您正在使用的 xml 文件包含在发行版的数据目录中。
我不确定它的尺度不变性如何。我建议您更改比例以使其适合您的输入图像比例。
查看 python 示例代码faceDetect.py以获取一些线索
this seems based on facedetect sample code included with opencv 2.1
The xml file you are using is included in the data directory of the distro.
I am not certain how scale invariant it is. I suggest you change the scale to get it to work for your input image scale.
Have a look at the python sample code facedetect.py for some clues
设置face_rect框架,同时为setImageRIO
CGRect传递参数face_rect;
cvClearMemStorage(存储);
set face_rect frame while passing parameters for setImageRIO
CGRect face_rect;
cvClearMemStorage(storage);