图像稳定

发布于 2024-10-04 03:48:24 字数 7034 浏览 4 评论 0原文

嘿, 我正在做一个项目,通过使用光流方法来稳定视频序列。 到目前为止我已经把光流做得很好了。但我面前有两个分支可以处理它。 1-获得光流后,我找到了图像位移的平均值,然后我从第二帧的特征中减去了平均值,我的问题是下一步做什么?

2-或者我可以使用openCV函数来稳定图像,我计算了变换矩阵,然后使用了cvPerspectiveTransform,然后使用了cvWarpPerspective,但是我收到了错误,这是“坏标志”,

你可以看到代码,我想要的是如何稳定图像?我想知道你能提供什么解决方案吗?

enter code here
#include <stdio.h>
#include <stdlib.h>    
//#include "/usr/include/opencv/cv.h"    
#include <cv.h>    
#include <cvaux.h>    
#include <highgui.h>    
#include <math.h>    
#include <iostream>

#define PI 3.1415926535898

double rads(double degs)
{
    return (PI/180 * degs);
}

CvCapture *cap;

IplImage *img;    
IplImage *frame;     
IplImage *frame1;    
IplImage *frame3;    
IplImage *frame2;    
IplImage *temp_image1;    
IplImage *temp_image2;    
IplImage *frame1_1C;     
IplImage *frame2_1C;     
IplImage *eig_image;     
IplImage *temp_image;     
IplImage *pyramid1 = NULL;    
IplImage *pyramid2 = NULL;

char * mapx;
char * mapy;

int h;
int corner_count;
CvMat* M = cvCreateMat(3,3,CV_32FC1);
CvPoint p,q,l,s;
double hypotenuse;
double angle;

int line_thickness = 1, line_valid = 1, pos = 0;
CvScalar line_color;
CvScalar target_color[4] = { // in BGR order
        {{   0,   0, 255,   0 }},  // red    
        {{   0, 255,   0,   0 }},  // green    
        {{ 255,   0,   0,   0 }}, // blue    
        {{   0, 255, 255,   0 }}   // yellow    
};

inline static double square(int a)    
{
return a * a;  
}

char* IntToChar(int num){return NULL;}

/*{
    char* retstr = static_cast<char*>(calloc(12, sizeof(char)));

    if (sprintf(retstr, "%i", num) > 0)
    {
        return retstr;
    }
    else
    {
        return NULL;
    }
}*/

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
    if ( *img != NULL ) 
         return;

    *img = cvCreateImage( size, depth, channels );

    if ( *img == NULL )
    {
        fprintf(stderr, "Error: Couldn't allocate image.  Out of memory?\n");
        exit(-1);
    }
}

void clearImage (IplImage *img)
{ 
    for (int i=0; i<img->imageSize; i++)    
        img->imageData[i] = (char) 0;    
}

int main()
{
    cap = cvCaptureFromCAM(0);    
    //cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi");

    CvSize frame_size;

    // Reading the video's frame size
    frame_size.height = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT );
    frame_size.width  = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_WIDTH );    
    cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);

    while(true)    
    {
    frame = cvQueryFrame( cap );

        if (frame == NULL)
        {    
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;    
        }

        // Allocating another image if it is not allocated already.     
        allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );    
        cvConvertImage(frame, frame1_1C, 0);    
        allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );    
        cvConvertImage(frame, frame1, 0);

        //Get the second frame of video.    
        frame = cvQueryFrame( cap );

        if (frame == NULL)    
        {
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;
        }

        if(!frame) 
        {    
            printf("bad video \n");    
            exit(0);
        }

        allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );   
        cvConvertImage(frame, frame2_1C, 0);    
        allocateOnDemand( &frame2, frame_size, IPL_DEPTH_8U, 3 );    
        cvConvertImage(frame, frame2, 0);

        CvSize optical_flow_window = cvSize(5,5);    
        eig_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );    
        temp_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );

        CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

        // Feature tracking 
        CvPoint2D32f frame1_features[4];
        CvPoint2D32f frame2_features[4];

        //cvCornerEigenValsAndVecs(eig_image, temp_image, 1 );    
        corner_count = 4;

        cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);    
        cvFindCornerSubPix( frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria);

        if ( corner_count <= 0 )    
            printf( "\nNo features detected.\n" );    
        else    
            printf( "\nNumber of features found = %d\n", corner_count );

        //Locus Kande method.     
        char optical_flow_found_feature[20];    
        float optical_flow_feature_error[20];

        allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );    
        allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

        cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL);

    /*
    double sumOfDistancesX = 0;    
    double sumOfDistancesY = 0;

    int debug = 0;

     CvFont font1, font2;    
     CvScalar red, green, blue;    
     IplImage* seg_in = NULL;    
     IplImage *seg_out = NULL;

     allocateOnDemand( &seg_in,  frame_size, IPL_DEPTH_8U, 3 );    
     allocateOnDemand( &seg_out, frame_size, IPL_DEPTH_8U, 3 );

     clearImage(seg_in);    
     clearImage(seg_in);    

     for( int i=0; i <corner_count; i++ )
     {

         if ( optical_flow_found_feature[i] == 0 )  
             continue;    
         p.x = (int) frame1_features[i].x;    
         p.y = (int) frame1_features[i].y;    
         q.x = (int) frame2_features[i].x;    
         q.y = (int) frame2_features[i].y;
         angle = atan2( (double) p.y - q.y, (double) p.x - q.x );

          sumOfDistancesX += q.x - p.x;     
          sumOfDistancesY += q.y - p.y;

          //cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));    
      }
      */

      /*    
      int averageDistanceX = sumOfDistancesX / corner_count;    
      int averageDistanceY = sumOfDistancesY / corner_count;    
      l.x = averageDistanceX - q.x;    
      s.y = averageDistanceY - q.y;
      */

#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform

       //CvMat* N = cvCreateMat(3,3,CV_32FC1);

       cvGetPerspectiveTransform(frame2_features, frame1_features, M);
       cvPerspectiveTransform(frame1_features, frame2_features, M);    
       cvWarpPerspective( frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0) );

        cvShowImage("Optical Flow", frame1);    
        cvWaitKey(50);
    }

    cvReleaseCapture(&cap);    
    cvReleaseMat(&M);    

    return 0;    
}

Hey,
I'm doing a project to stabilize video sequence by using optical flow method.
I have done well the optical flow so far. But I have 2 branches in front of me to work on it..
1- after getting the optical flow, I have found the average of the image displacement and then I have subtracted the average from the features of the second frame, my question is what to do next?

2- Or I could use the openCV function in order stabilize image, which I calculated the transformation Matrix and then I used cvPerspectiveTransform then cvWarpPerspective, but I'm getting error which is "bad flag"

you can see the code, what I want is what to do to stabilize the image? I wanna any solution you can provide?

enter code here
#include <stdio.h>
#include <stdlib.h>    
//#include "/usr/include/opencv/cv.h"    
#include <cv.h>    
#include <cvaux.h>    
#include <highgui.h>    
#include <math.h>    
#include <iostream>

#define PI 3.1415926535898

double rads(double degs)
{
    return (PI/180 * degs);
}

CvCapture *cap;

IplImage *img;    
IplImage *frame;     
IplImage *frame1;    
IplImage *frame3;    
IplImage *frame2;    
IplImage *temp_image1;    
IplImage *temp_image2;    
IplImage *frame1_1C;     
IplImage *frame2_1C;     
IplImage *eig_image;     
IplImage *temp_image;     
IplImage *pyramid1 = NULL;    
IplImage *pyramid2 = NULL;

char * mapx;
char * mapy;

int h;
int corner_count;
CvMat* M = cvCreateMat(3,3,CV_32FC1);
CvPoint p,q,l,s;
double hypotenuse;
double angle;

int line_thickness = 1, line_valid = 1, pos = 0;
CvScalar line_color;
CvScalar target_color[4] = { // in BGR order
        {{   0,   0, 255,   0 }},  // red    
        {{   0, 255,   0,   0 }},  // green    
        {{ 255,   0,   0,   0 }}, // blue    
        {{   0, 255, 255,   0 }}   // yellow    
};

inline static double square(int a)    
{
return a * a;  
}

char* IntToChar(int num){return NULL;}

/*{
    char* retstr = static_cast<char*>(calloc(12, sizeof(char)));

    if (sprintf(retstr, "%i", num) > 0)
    {
        return retstr;
    }
    else
    {
        return NULL;
    }
}*/

inline static void allocateOnDemand( IplImage **img, CvSize size, int depth, int channels )
{
    if ( *img != NULL ) 
         return;

    *img = cvCreateImage( size, depth, channels );

    if ( *img == NULL )
    {
        fprintf(stderr, "Error: Couldn't allocate image.  Out of memory?\n");
        exit(-1);
    }
}

void clearImage (IplImage *img)
{ 
    for (int i=0; i<img->imageSize; i++)    
        img->imageData[i] = (char) 0;    
}

int main()
{
    cap = cvCaptureFromCAM(0);    
    //cap = cvCaptureFromAVI("/home/saif/Desktop/NAO.. the project/jj/Test3.avi");

    CvSize frame_size;

    // Reading the video's frame size
    frame_size.height = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_HEIGHT );
    frame_size.width  = (int) cvGetCaptureProperty( cap, CV_CAP_PROP_FRAME_WIDTH );    
    cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);

    while(true)    
    {
    frame = cvQueryFrame( cap );

        if (frame == NULL)
        {    
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;    
        }

        // Allocating another image if it is not allocated already.     
        allocateOnDemand( &frame1_1C, frame_size, IPL_DEPTH_8U, 1 );    
        cvConvertImage(frame, frame1_1C, 0);    
        allocateOnDemand( &frame1, frame_size, IPL_DEPTH_8U, 3 );    
        cvConvertImage(frame, frame1, 0);

        //Get the second frame of video.    
        frame = cvQueryFrame( cap );

        if (frame == NULL)    
        {
            fprintf(stderr, "Error: Hmm. The end came sooner than we thought.\n");
            return -1;
        }

        if(!frame) 
        {    
            printf("bad video \n");    
            exit(0);
        }

        allocateOnDemand( &frame2_1C, frame_size, IPL_DEPTH_8U, 1 );   
        cvConvertImage(frame, frame2_1C, 0);    
        allocateOnDemand( &frame2, frame_size, IPL_DEPTH_8U, 3 );    
        cvConvertImage(frame, frame2, 0);

        CvSize optical_flow_window = cvSize(5,5);    
        eig_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );    
        temp_image = cvCreateImage( frame_size, IPL_DEPTH_32F, 1 );

        CvTermCriteria optical_flow_termination_criteria = cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 );

        // Feature tracking 
        CvPoint2D32f frame1_features[4];
        CvPoint2D32f frame2_features[4];

        //cvCornerEigenValsAndVecs(eig_image, temp_image, 1 );    
        corner_count = 4;

        cvGoodFeaturesToTrack(frame1_1C,eig_image , temp_image, frame1_features, &corner_count, 0.1, .01, NULL, 5, 1);    
        cvFindCornerSubPix( frame1_1C, frame1_features, corner_count,cvSize(5, 5) ,optical_flow_window , optical_flow_termination_criteria);

        if ( corner_count <= 0 )    
            printf( "\nNo features detected.\n" );    
        else    
            printf( "\nNumber of features found = %d\n", corner_count );

        //Locus Kande method.     
        char optical_flow_found_feature[20];    
        float optical_flow_feature_error[20];

        allocateOnDemand( &pyramid1, frame_size, IPL_DEPTH_8U, 1 );    
        allocateOnDemand( &pyramid2, frame_size, IPL_DEPTH_8U, 1 );

        cvCalcOpticalFlowPyrLK(frame1_1C, frame2_1C, pyramid1, pyramid2, frame1_features, frame2_features, corner_count, optical_flow_window, 5, optical_flow_found_feature, NULL, optical_flow_termination_criteria, NULL);

    /*
    double sumOfDistancesX = 0;    
    double sumOfDistancesY = 0;

    int debug = 0;

     CvFont font1, font2;    
     CvScalar red, green, blue;    
     IplImage* seg_in = NULL;    
     IplImage *seg_out = NULL;

     allocateOnDemand( &seg_in,  frame_size, IPL_DEPTH_8U, 3 );    
     allocateOnDemand( &seg_out, frame_size, IPL_DEPTH_8U, 3 );

     clearImage(seg_in);    
     clearImage(seg_in);    

     for( int i=0; i <corner_count; i++ )
     {

         if ( optical_flow_found_feature[i] == 0 )  
             continue;    
         p.x = (int) frame1_features[i].x;    
         p.y = (int) frame1_features[i].y;    
         q.x = (int) frame2_features[i].x;    
         q.y = (int) frame2_features[i].y;
         angle = atan2( (double) p.y - q.y, (double) p.x - q.x );

          sumOfDistancesX += q.x - p.x;     
          sumOfDistancesY += q.y - p.y;

          //cvRemap(frame2,frame1,averageDistanceX , averageDistanceY,CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS, cvScalarAll(0));    
      }
      */

      /*    
      int averageDistanceX = sumOfDistancesX / corner_count;    
      int averageDistanceY = sumOfDistancesY / corner_count;    
      l.x = averageDistanceX - q.x;    
      s.y = averageDistanceY - q.y;
      */

#define cvWarpPerspectiveQMatrix cvGetPerspectiveTransform

       //CvMat* N = cvCreateMat(3,3,CV_32FC1);

       cvGetPerspectiveTransform(frame2_features, frame1_features, M);
       cvPerspectiveTransform(frame1_features, frame2_features, M);    
       cvWarpPerspective( frame2_features, frame1_features, M,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0) );

        cvShowImage("Optical Flow", frame1);    
        cvWaitKey(50);
    }

    cvReleaseCapture(&cap);    
    cvReleaseMat(&M);    

    return 0;    
}

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(1

澜川若宁 2024-10-11 03:48:24

您不想从第二个图像中减去平均位移,而是想通过平均位移来变换(移动)第二个图像,以便它“匹配”第一个图像。您使用的“位移”取决于您的情况。

  • 如果您的相机在晃动但静止,否则您希望使用两个连续帧之间的平均位移作为第二帧的变换向量。对于每个新帧,您计算变换后的第一帧和新帧之间的位移,并变换新帧。
  • 如果您的相机移动并晃动(即山地自行车头盔上安装的相机),您需要首先找到几帧中帧之间的平均位移,然后根据该平均位移与它之间的位移之间的差异来转换序列中的各个帧和前一帧。

编辑
对于选项 2,您基本上需要做的是计算最后几帧中帧之间的平均运动的平均值。您可以通过多种方式做到这一点,但我建议使用卡尔曼滤波器之类的东西。然后,对于新帧,您可以计算该帧与(校正后的)前一帧之间的移动。从您获得的运动中减去到该点的平均运动,然后将新帧移动该差值。

You don't want to subtract the average displacement from the second image, you want to transform (move) the second image by the average displacement so that it "matches" the first. The "displacement" you use depends on your situation.

  • If your camera is shaking but stationary otherwise you want the use the average displacement between two consecutive frames as transformation vector for the second frame. With each new frame you compute the displacement between the transformed first frame and the new frame, and transform the new frame.
  • If your camera moves and shakes (i.e. helmet mounted camera on a mountainbiker) you want to first find an average displacement between frames over a few frames and then transform the individual frames in a sequence by the difference between that average displacement and the displacement between it and the previous frame.

EDIT
What you basically need to do for option 2 is calculate the average of the average movement between frames over the last few frames. This you could do in any number of ways, but I'd suggest using something like a kalman filter. Then, for a new frame you calculate the movement between that and the (corrected) previous frame. From the movement you get you subtract the average movement up to that point and you move the new frame by that difference.

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文