如何从ac语言程序中调用LSD(LineSegmentDetector)?

发布于 2024-11-16 14:40:01 字数 1105 浏览 7 评论 0原文

我正在使用 LSD 来检测图像中的直线,我下载的代码包含调用 LSD 的最小示例,但它是静态的(即它仅输出主函数中的值)我想将代码应用于视频,这是输出静态结果的最小示例。

#include <stdio.h>
#include "lsd.h"

int main(void)
{
  image_double image;
  ntuple_list out;
  unsigned int x,y,i,j;
  unsigned int X = 512;  /* x image size */
  unsigned int Y = 512;  /* y image size */

  /* create a simple image: left half black, right half gray */
  image = new_image_double(X,Y);
  for(x=0;x<X;x++)
    for(y=0;y<Y;y++)
      image->data[ x + y * image->xsize ] = x<X/2 ? 0.0 : 64.0; /* image(x,y) */
   IplImage* imgInTmp = cvLoadImage("C:\Documents and Settings\Eslam farag\My Documents\Visual Studio 2008\Projects\line\hand.JPEG", 0);

  /* call LSD */

  out = lsd(image);

  /* print output */
  printf("%u line segments found:\n",out->size);
  for(i=0;i<out->size;i++)
    {
      for(j=0;j<out->dim;j++)
        printf("%f ",out->values[ i * out->dim + j ]);
      printf("\n");
    }

  /* free memory */
  free_image_double(image);
  free_ntuple_list(out);

  return 0;
}

如果有人可以帮助我在视频上应用代码,我会很高兴。谢谢 此致,

i'm using LSD to detect straight lines in an image, the code that i have downloaded contains a Minimal example of calling LSD but it's static (i.e it outputs only the value in the main function) i want to apply the code on a video, that's the minimal example that outputs static results.

#include <stdio.h>
#include "lsd.h"

int main(void)
{
  image_double image;
  ntuple_list out;
  unsigned int x,y,i,j;
  unsigned int X = 512;  /* x image size */
  unsigned int Y = 512;  /* y image size */

  /* create a simple image: left half black, right half gray */
  image = new_image_double(X,Y);
  for(x=0;x<X;x++)
    for(y=0;y<Y;y++)
      image->data[ x + y * image->xsize ] = x<X/2 ? 0.0 : 64.0; /* image(x,y) */
   IplImage* imgInTmp = cvLoadImage("C:\Documents and Settings\Eslam farag\My Documents\Visual Studio 2008\Projects\line\hand.JPEG", 0);

  /* call LSD */

  out = lsd(image);

  /* print output */
  printf("%u line segments found:\n",out->size);
  for(i=0;i<out->size;i++)
    {
      for(j=0;j<out->dim;j++)
        printf("%f ",out->values[ i * out->dim + j ]);
      printf("\n");
    }

  /* free memory */
  free_image_double(image);
  free_ntuple_list(out);

  return 0;
}

if anyone can help me to apply the code on video i will be pleased.thanks
best regards,

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(2

北音执念 2024-11-23 14:40:01

由于我找不到完整的示例,因此我分享了我编写的代码,该代码使用 OpenCV 从磁盘加载视频文件并对其执行一些图像处理。

该应用程序将文件名作为输入(在命令行上),并使用 OpenCV 内置函数 cvCvtColor() 将视频的每一帧转换为其等效的灰度图像。

我在代码中添加了一些注释,以帮助您理解基本任务。

read_video.cpp

#include <stdio.h>
#include <highgui.h>
#include <cv.h>

int main(int argc, char* argv[])
{
    cvNamedWindow("video", CV_WINDOW_AUTOSIZE);

    CvCapture *capture = cvCaptureFromAVI(argv[1]);
    if(!capture)
    {
        printf("!!! cvCaptureFromAVI failed (file not found?)\n");
        return -1;
    }

    IplImage* frame;
    char key = 0;
    while (key != 'q') // Loop for querying video frames. Pressing Q will quit
    {
        frame = cvQueryFrame( capture );
        if( !frame )
        {
            printf("!!! cvQueryFrame failed\n");
            break;
        }

        /* Let's do a grayscale conversion just 4 fun */

        // A grayscale image has only one channel, and most probably the original
        // video works with 3 channels (RGB). So, for the conversion to work, we
        // need to allocate an image with only 1 channel to store the result of 
        // this operation.
        IplImage* gray_frame = 0;
        gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);
        if (!gray_frame)
        {
            printf("!!! cvCreateImage failed!\n" );
            return -1;
        }

        cvCvtColor(frame, gray_frame, CV_RGB2GRAY); // The conversion itself

        // Display processed frame on window
        cvShowImage("video", gray_frame);

        // Release allocated resources
        cvReleaseImage(&gray_frame);

        key = cvWaitKey(33);
    }

    cvReleaseCapture(&capture);
    cvDestroyWindow("video");
}

编译为:

g++ read_video.cpp -o read `pkg-config --cflags --libs opencv`

如果您想知道如何迭代帧的像素来进行自定义处理,您需要检查以下答案,因为它展示了如何进行手动灰度转换。就这样:OpenCV cvSet2d.....这是做什么的< /a>

Since I couldn't find a complete example, I'm sharing a code I wrote that uses OpenCV to load a video file from the disk and perform some image processing on it.

The application takes a filename as input (on the cmd line) and converts each frame of the video to it's grayscale equivalent using OpenCV built-in function cvCvtColor() to do this.

I added some comments on the code to help you understand the basic tasks.

read_video.cpp:

#include <stdio.h>
#include <highgui.h>
#include <cv.h>

int main(int argc, char* argv[])
{
    cvNamedWindow("video", CV_WINDOW_AUTOSIZE);

    CvCapture *capture = cvCaptureFromAVI(argv[1]);
    if(!capture)
    {
        printf("!!! cvCaptureFromAVI failed (file not found?)\n");
        return -1;
    }

    IplImage* frame;
    char key = 0;
    while (key != 'q') // Loop for querying video frames. Pressing Q will quit
    {
        frame = cvQueryFrame( capture );
        if( !frame )
        {
            printf("!!! cvQueryFrame failed\n");
            break;
        }

        /* Let's do a grayscale conversion just 4 fun */

        // A grayscale image has only one channel, and most probably the original
        // video works with 3 channels (RGB). So, for the conversion to work, we
        // need to allocate an image with only 1 channel to store the result of 
        // this operation.
        IplImage* gray_frame = 0;
        gray_frame = cvCreateImage(cvSize(frame->width, frame->height), frame->depth, 1);
        if (!gray_frame)
        {
            printf("!!! cvCreateImage failed!\n" );
            return -1;
        }

        cvCvtColor(frame, gray_frame, CV_RGB2GRAY); // The conversion itself

        // Display processed frame on window
        cvShowImage("video", gray_frame);

        // Release allocated resources
        cvReleaseImage(&gray_frame);

        key = cvWaitKey(33);
    }

    cvReleaseCapture(&capture);
    cvDestroyWindow("video");
}

Compiled with:

g++ read_video.cpp -o read `pkg-config --cflags --libs opencv`

If you want to know how to iterate through the pixels of the frame to do your custom processing, you need to check the following answer because it shows how to do a manual grayscale conversion. There you go: OpenCV cvSet2d.....what does this do

天煞孤星 2024-11-23 14:40:01

的代码示例

#include "lsd.h"

void Test_LSD(IplImage* img)
{
    IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    cvCvtColor(img, grey, CV_BGR2GRAY);
    image_double image;
    ntuple_list out;
    unsigned int x,y,i,j;
    image = new_image_double(img->width,img->height);
    for(x=0;x<grey->width;x++)
    for(y=0;y<grey->height;y++)
    {
      CvScalar s= cvGet2D(grey,y,x);
      double pix= s.val[0];
      image->data[ x + y * image->xsize ]= pix; /* image(x,y) */
    }

    /* call LSD */
    out = lsd(image);
    //out= lsd_scale(image,1);

    /* print output */
    printf("%u line segments found:\n",out->size);
    vector<Line> vec;
    for(i=0;i<out->size;i++)
    {
      //for(j=0;j<out->dim;j++)
      {
        //printf("%f ",out->values[ i * out->dim + j ]);
          Line line;
          line.x1= out->values[ i * out->dim + 0];
          line.y1= out->values[ i * out->dim + 1];
          line.x2= out->values[ i * out->dim + 2];
          line.y2= out->values[ i * out->dim + 3];
          vec.push_back(line);
      }
      //printf("\n");
    }

    IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    cvZero(black);
    draw_lines(vec,black);
    /*cvNamedWindow("img", 0);
    cvShowImage("img", img);*/
    cvSaveImage("lines_detect.png",black/*img*/);
    /* free memory */
    free_image_double(image);
    free_ntuple_list(out);
}

这是使用 LSD 与 opencv或这种方式

IplImage* get_lines(IplImage* img,vector<Line>& vec_lines)
{
    //to grey
    //IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    //cvCvtColor(img, grey, CV_BGR2GRAY);

    image_double image;
    ntuple_list out;
    unsigned int x,y,i,j;
    image = new_image_double(img->width,img->height);
    for(x=0;x</*grey*/img->width;x++)
    for(y=0;y</*grey*/img->height;y++)
    {
      CvScalar s= cvGet2D(/*grey*/img,y,x);
      double pix= s.val[0];
      image->data[ x + y * image->xsize ]= pix;
    }

    /* call LSD */
    out = lsd(image);
    //out= lsd_scale(image,1);

    /* print output */
    //printf("%u line segments found:\n",out->size);
    //vector<Line> vec;
    for(i=0;i<out->size;i++)
    {
      //for(j=0;j<out->dim;j++)
      {
        //printf("%f ",out->values[ i * out->dim + j ]);
          Line line;
          line.x1= out->values[ i * out->dim + 0];
          line.y1= out->values[ i * out->dim + 1];
          line.x2= out->values[ i * out->dim + 2];
          line.y2= out->values[ i * out->dim + 3];
          /*vec*/vec_lines.push_back(line);
      }
      //printf("\n");
    }

    IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    cvZero(black);
    for(int i=0;i<vec_lines.size();++i)
    {
        //if(vec[i].x1==vec[i].x2||vec[i].y1==vec[i].y2)
        cvLine(black,cvPoint(vec_lines[i].x1,vec_lines[i].y1),cvPoint(vec_lines[i].x2,vec_lines[i].y2),CV_RGB(255,255,255),1, CV_AA);
    }
    /*cvNamedWindow("img", 0);
    cvShowImage("img", img);*/
    //cvSaveImage("lines_detect.png",black/*img*/);
    /* free memory */
    //cvReleaseImage(&grey);
    free_image_double(image);
    free_ntuple_list(out);

    return black;
}

here is example of the code using LSD with opencv

#include "lsd.h"

void Test_LSD(IplImage* img)
{
    IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    cvCvtColor(img, grey, CV_BGR2GRAY);
    image_double image;
    ntuple_list out;
    unsigned int x,y,i,j;
    image = new_image_double(img->width,img->height);
    for(x=0;x<grey->width;x++)
    for(y=0;y<grey->height;y++)
    {
      CvScalar s= cvGet2D(grey,y,x);
      double pix= s.val[0];
      image->data[ x + y * image->xsize ]= pix; /* image(x,y) */
    }

    /* call LSD */
    out = lsd(image);
    //out= lsd_scale(image,1);

    /* print output */
    printf("%u line segments found:\n",out->size);
    vector<Line> vec;
    for(i=0;i<out->size;i++)
    {
      //for(j=0;j<out->dim;j++)
      {
        //printf("%f ",out->values[ i * out->dim + j ]);
          Line line;
          line.x1= out->values[ i * out->dim + 0];
          line.y1= out->values[ i * out->dim + 1];
          line.x2= out->values[ i * out->dim + 2];
          line.y2= out->values[ i * out->dim + 3];
          vec.push_back(line);
      }
      //printf("\n");
    }

    IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 3);
    cvZero(black);
    draw_lines(vec,black);
    /*cvNamedWindow("img", 0);
    cvShowImage("img", img);*/
    cvSaveImage("lines_detect.png",black/*img*/);
    /* free memory */
    free_image_double(image);
    free_ntuple_list(out);
}

or this way

IplImage* get_lines(IplImage* img,vector<Line>& vec_lines)
{
    //to grey
    //IplImage* grey = cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    //cvCvtColor(img, grey, CV_BGR2GRAY);

    image_double image;
    ntuple_list out;
    unsigned int x,y,i,j;
    image = new_image_double(img->width,img->height);
    for(x=0;x</*grey*/img->width;x++)
    for(y=0;y</*grey*/img->height;y++)
    {
      CvScalar s= cvGet2D(/*grey*/img,y,x);
      double pix= s.val[0];
      image->data[ x + y * image->xsize ]= pix;
    }

    /* call LSD */
    out = lsd(image);
    //out= lsd_scale(image,1);

    /* print output */
    //printf("%u line segments found:\n",out->size);
    //vector<Line> vec;
    for(i=0;i<out->size;i++)
    {
      //for(j=0;j<out->dim;j++)
      {
        //printf("%f ",out->values[ i * out->dim + j ]);
          Line line;
          line.x1= out->values[ i * out->dim + 0];
          line.y1= out->values[ i * out->dim + 1];
          line.x2= out->values[ i * out->dim + 2];
          line.y2= out->values[ i * out->dim + 3];
          /*vec*/vec_lines.push_back(line);
      }
      //printf("\n");
    }

    IplImage* black= cvCreateImage(cvGetSize(img), IPL_DEPTH_8U, 1);
    cvZero(black);
    for(int i=0;i<vec_lines.size();++i)
    {
        //if(vec[i].x1==vec[i].x2||vec[i].y1==vec[i].y2)
        cvLine(black,cvPoint(vec_lines[i].x1,vec_lines[i].y1),cvPoint(vec_lines[i].x2,vec_lines[i].y2),CV_RGB(255,255,255),1, CV_AA);
    }
    /*cvNamedWindow("img", 0);
    cvShowImage("img", img);*/
    //cvSaveImage("lines_detect.png",black/*img*/);
    /* free memory */
    //cvReleaseImage(&grey);
    free_image_double(image);
    free_ntuple_list(out);

    return black;
}
~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文