在 Qt 中从 IPLImage 转换为 QImage

发布于 2024-11-10 07:00:14 字数 6328 浏览 0 评论 0原文

我想在qt窗口中显示图像,所以我使用了Qlabel->setpixmap,

但是如何从IPLImage转换为QImage以在标签中显示它?

我找到了以下函数来转换它,但我不知道如何在调用语句中使用它

QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
    uchar *qImageBuffer = NULL;
    int width = iplImage->width;
    int widthStep = iplImage->widthStep;
    int height = iplImage->height;
    switch (iplImage->depth)
    {
        case IPL_DEPTH_8U:
        if (iplImage->nChannels == 1)
        {
        // OpenCV image is stored with one byte grey pixel. We convert it
       // to an 8 bit depth QImage.
        //

        qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
        uchar *QImagePtr = qImageBuffer;
        const uchar *iplImagePtr = (const uchar *) iplImage->imageData;

        for (int y = 0; y < height; y++)
        {
            // Copy line by line
            memcpy(QImagePtr, iplImagePtr, width);
            QImagePtr += width;
            iplImagePtr += widthStep;
        }

        }
        else if (iplImage->nChannels == 3)
        {
            /* OpenCV image is stored with 3 byte color pixels (3 channels).
            We convert it to a 32 bit depth QImage.
            */
            qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    // We cannot help but copy manually.
                    QImagePtr[0] = iplImagePtr[0];
                    QImagePtr[1] = iplImagePtr[1];
                    QImagePtr[2] = iplImagePtr[2];
                    QImagePtr[3] = 0;

                    QImagePtr += 4;
                    iplImagePtr += 3;
                }
            iplImagePtr += widthStep-3*width;
            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
        }
        break;
        case IPL_DEPTH_16U:
        if (iplImage->nChannels == 1)
        {
        /* OpenCV image is stored with 2 bytes grey pixel. We convert it
        to an 8 bit depth QImage.
        */
            qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            //const uint16_t *iplImagePtr = (const uint16_t *);
            const unsigned int *iplImagePtr = (const unsigned int *)iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                // We take only the highest part of the 16 bit value. It is
                //similar to dividing by 256.
                *QImagePtr++ = ((*iplImagePtr++) >> 8);
                }
                iplImagePtr += widthStep/sizeof(unsigned int)-width;
            }
        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);

        }
        break;
        case IPL_DEPTH_32F:
         if (iplImage->nChannels == 1)
         {
        /* OpenCV image is stored with float (4 bytes) grey pixel. We
        convert it to an 8 bit depth QImage.
        */
             qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
             uchar *QImagePtr = qImageBuffer;
             const float *iplImagePtr = (const float *) iplImage->imageData;
             for (int y = 0; y < height; y++)
             {
                 for (int x = 0; x < width; x++)
                 {
                     uchar p;
                     float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
                     if (pf < 0) p = 0;
                     else if (pf > 255) p = 255;
                     else p = (uchar) pf;

                     *QImagePtr++ = p;
                  }
             iplImagePtr += widthStep/sizeof(float)-width;
             }
         }
         else
         {
             qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
         }
       break;
       case IPL_DEPTH_64F:
         if (iplImage->nChannels == 1)
         {
            /* OpenCV image is stored with double (8 bytes) grey pixel. We
            convert it to an 8 bit depth QImage.
            */
            qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            const double *iplImagePtr = (const double *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    uchar p;
                    double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);

                    if (pf < 0) p = 0;
                    else if (pf > 255) p = 255;
                    else p = (uchar) pf;

                    *QImagePtr++ = p;
                }
}

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
        }
        break;
        default:
        qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
    }
    QImage *qImage;
    QVector<QRgb> vcolorTable;
    if (iplImage->nChannels == 1)
    {
        // We should check who is going to destroy this allocation.
        QRgb *colorTable = new QRgb[256];
        for (int i = 0; i < 256; i++)
        {
           colorTable[i] = qRgb(i, i, i);
           vcolorTable[i] = colorTable[i];
        }
        qImage = new QImage(qImageBuffer, width, height, QImage::Format_Indexed8);
        qImage->setColorTable(vcolorTable);
    }
    else
    {
        qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
    }
    *data = qImageBuffer;
    return qImage;
}

参数是: const IplImage * iplImage、uchar **data、double mini、double maxi

什么是 data、mini、max?如何从 IPLImage 获取它以在调用语句中使用它?

多谢 :)

I would like to display image in qt window , so I used Qlabel->setpixmap

but how can I convert from IPLImage to QImage to display it in the label??

I found the follwing function to convert it but I did not know how to use it in call statement

QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
    uchar *qImageBuffer = NULL;
    int width = iplImage->width;
    int widthStep = iplImage->widthStep;
    int height = iplImage->height;
    switch (iplImage->depth)
    {
        case IPL_DEPTH_8U:
        if (iplImage->nChannels == 1)
        {
        // OpenCV image is stored with one byte grey pixel. We convert it
       // to an 8 bit depth QImage.
        //

        qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
        uchar *QImagePtr = qImageBuffer;
        const uchar *iplImagePtr = (const uchar *) iplImage->imageData;

        for (int y = 0; y < height; y++)
        {
            // Copy line by line
            memcpy(QImagePtr, iplImagePtr, width);
            QImagePtr += width;
            iplImagePtr += widthStep;
        }

        }
        else if (iplImage->nChannels == 3)
        {
            /* OpenCV image is stored with 3 byte color pixels (3 channels).
            We convert it to a 32 bit depth QImage.
            */
            qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    // We cannot help but copy manually.
                    QImagePtr[0] = iplImagePtr[0];
                    QImagePtr[1] = iplImagePtr[1];
                    QImagePtr[2] = iplImagePtr[2];
                    QImagePtr[3] = 0;

                    QImagePtr += 4;
                    iplImagePtr += 3;
                }
            iplImagePtr += widthStep-3*width;
            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
        }
        break;
        case IPL_DEPTH_16U:
        if (iplImage->nChannels == 1)
        {
        /* OpenCV image is stored with 2 bytes grey pixel. We convert it
        to an 8 bit depth QImage.
        */
            qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            //const uint16_t *iplImagePtr = (const uint16_t *);
            const unsigned int *iplImagePtr = (const unsigned int *)iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                // We take only the highest part of the 16 bit value. It is
                //similar to dividing by 256.
                *QImagePtr++ = ((*iplImagePtr++) >> 8);
                }
                iplImagePtr += widthStep/sizeof(unsigned int)-width;
            }
        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);

        }
        break;
        case IPL_DEPTH_32F:
         if (iplImage->nChannels == 1)
         {
        /* OpenCV image is stored with float (4 bytes) grey pixel. We
        convert it to an 8 bit depth QImage.
        */
             qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
             uchar *QImagePtr = qImageBuffer;
             const float *iplImagePtr = (const float *) iplImage->imageData;
             for (int y = 0; y < height; y++)
             {
                 for (int x = 0; x < width; x++)
                 {
                     uchar p;
                     float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
                     if (pf < 0) p = 0;
                     else if (pf > 255) p = 255;
                     else p = (uchar) pf;

                     *QImagePtr++ = p;
                  }
             iplImagePtr += widthStep/sizeof(float)-width;
             }
         }
         else
         {
             qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
         }
       break;
       case IPL_DEPTH_64F:
         if (iplImage->nChannels == 1)
         {
            /* OpenCV image is stored with double (8 bytes) grey pixel. We
            convert it to an 8 bit depth QImage.
            */
            qImageBuffer = (uchar *) malloc(width*height*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            const double *iplImagePtr = (const double *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    uchar p;
                    double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);

                    if (pf < 0) p = 0;
                    else if (pf > 255) p = 255;
                    else p = (uchar) pf;

                    *QImagePtr++ = p;
                }
}

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
        }
        break;
        default:
        qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
    }
    QImage *qImage;
    QVector<QRgb> vcolorTable;
    if (iplImage->nChannels == 1)
    {
        // We should check who is going to destroy this allocation.
        QRgb *colorTable = new QRgb[256];
        for (int i = 0; i < 256; i++)
        {
           colorTable[i] = qRgb(i, i, i);
           vcolorTable[i] = colorTable[i];
        }
        qImage = new QImage(qImageBuffer, width, height, QImage::Format_Indexed8);
        qImage->setColorTable(vcolorTable);
    }
    else
    {
        qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
    }
    *data = qImageBuffer;
    return qImage;
}

The parameter was:
const IplImage * iplImage, uchar **data, double mini, double maxi

what are data,mini,max? how can I get it from my IPLImage to use it in call statement?

Thanks alot :)

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(3

不忘初心 2024-11-17 07:00:14

看起来代码没有使用 data ,而 minimaxi 用于将某些图像格式使用的浮点值转换为整数值范围为 0-255。

我会尝试使用 NULL 来获取数据。 minimaxi 实际上取决于图像数据,我不知道合理的范围是多少。但是,如果您的 IplImage 未存储为浮点值,那么这些值应该没有任何区别。

Looks like data is not used by the code, and mini and maxi are used for converting floating point values that certain image formats use to integer values in the range 0-255.

I'd try using NULL for data. mini and maxi really depend on the image data, and I don't know what reasonable ranges are. But if your IplImage is not stored as floating point values then these values shouldn't make any difference.

夏有森光若流苏 2024-11-17 07:00:14

您可以简单地使用 QImage(data,widht,height,format) 创建一个 QImage,其中数据由其他东西(例如 IPLImage)拥有,并且 data 是 IPLImage 数据 ptr,只要 QImage 和 IPLImage 中的格式相同(例如RGB888 = 8U_C3)

You can simply create a QImage where the data is owned by something else (eg the IPLImage) using the QImage(data,widht,height,format) and data is the IPLImage data ptr as long as the format isthe samein both QImage and IPLImage (eg RGB888 = 8U_C3)

つ可否回来 2024-11-17 07:00:14

我在代码中发现了一些错误......也许其中还有更多错误,但现在对我来说看起来不错。具有Format_Index8的QImage有时需要(取决于图像分辨率......)在右侧添加2个字节(不知道为什么,但似乎是这样的)。
这是新的改编代码

QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
    uchar *qImageBuffer = NULL;
    int width = iplImage->width;
    int widthStep = iplImage->widthStep;
    int height = iplImage->height;
    QImage *qImage;
    switch (iplImage->depth)
    {
        case IPL_DEPTH_8U:
        if (iplImage->nChannels == 1)
        {
        // OpenCV image is stored with one byte grey pixel. We convert it
       // to an 8 bit depth QImage.
        qImage = new QImage(width,height,QImage::Format_Indexed8);
        uchar *QImagePtr = qImage->scanLine(0);
        qImageBuffer = qImage->scanLine(0);

        const uchar *iplImagePtr = (const uchar *) iplImage->imageData;

        for (int y = 0; y < height; y++)
        {
            // Copy line by line
            QImagePtr = qImage->scanLine(y);            
            memcpy(QImagePtr, iplImagePtr, width);
            iplImagePtr += widthStep;

        }
        /*
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                // We take only the highest part of the 16 bit value. It is
                //similar to dividing by 256.
                //*QImagePtr++ = ((*iplImagePtr++) >> 8);
                *QImagePtr = *iplImagePtr;
                QImagePtr++;
                iplImagePtr++;
                }

                iplImagePtr += widthStep/sizeof(uchar)-width;
            }*/
        }
        else if (iplImage->nChannels == 3)
            {
            /* OpenCV image is stored with 3 byte color pixels (3 channels).
            We convert it to a 32 bit depth QImage.
            */
            qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    // We cannot help but copy manually.
                    QImagePtr[0] = iplImagePtr[0];
                    QImagePtr[1] = iplImagePtr[1];
                    QImagePtr[2] = iplImagePtr[2];
                    QImagePtr[3] = 0;

                    QImagePtr += 4;
                    iplImagePtr += 3;
                }
            iplImagePtr += widthStep-3*width;
            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
        }
        break;
        case IPL_DEPTH_16U:
        if (iplImage->nChannels == 1)
        {
        /* OpenCV image is stored with 2 bytes grey pixel. We convert it
        to an 8 bit depth QImage.
        */
            qImage = new QImage(width,height,QImage::Format_Indexed8);
            uchar *QImagePtr = qImage->scanLine(0);
            qImageBuffer = qImage->scanLine(0);

            //const uint16_t *iplImagePtr = (const uint16_t *);
            const unsigned short *iplImagePtr = (const unsigned short *)iplImage->imageData;


            for (int y = 0; y < height; y++)
            {
                QImagePtr = qImage->scanLine(y);
                for (int x = 0; x < width; x++)
                {
                // We take only the highest part of the 16 bit value. It is
                //similar to dividing by 256.
                //*QImagePtr++ = ((*iplImagePtr++) >> 8);
                //change here 16 bit could be everything !! set max min to your desire
                *QImagePtr = 255*(((*iplImagePtr) - mini) / (maxi - mini));
                QImagePtr++;
                iplImagePtr++;
                }

                iplImagePtr += widthStep/sizeof(unsigned short)-width;
            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);

        }
        break;
        case IPL_DEPTH_32F:
         if (iplImage->nChannels == 1)
         {
        /* OpenCV image is stored with float (4 bytes) grey pixel. We
        convert it to an 8 bit depth QImage.
        */
             qImage = new QImage(width,height,QImage::Format_Indexed8);
             uchar *QImagePtr = qImage->scanLine(0);
             qImageBuffer = qImage->scanLine(0);

             const float *iplImagePtr = (const float *) iplImage->imageData;
             for (int y = 0; y < height; y++)
             {
             QImagePtr = qImage->scanLine(y);
                 for (int x = 0; x < width; x++)
                 {
                     uchar p;
                     float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
                     if (pf < 0) p = 0;
                     else if (pf > 255) p = 255;
                     else p = (uchar) pf;

                     *QImagePtr++ = p;
                  }

             iplImagePtr += widthStep/sizeof(float)-width;
             }
         }
         else
         {
             qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
         }
       break;
       case IPL_DEPTH_64F:
         if (iplImage->nChannels == 1)
         {
            /* OpenCV image is stored with double (8 bytes) grey pixel. We
            convert it to an 8 bit depth QImage.
            */
             qImage = new QImage(width,height,QImage::Format_Indexed8);
             uchar *QImagePtr = qImage->scanLine(0);
             qImageBuffer = qImage->scanLine(0);

            const double *iplImagePtr = (const double *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
             QImagePtr = qImage->scanLine(y);
                for (int x = 0; x < width; x++)
                {
                    uchar p;
                    double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);

                    if (pf < 0) p = 0;
                    else if (pf > 255) p = 255;
                    else p = (uchar) pf;

                    *QImagePtr++ = p;
                }

            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
        }
        break;
        default:
        qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
    }

    QVector<QRgb> vcolorTable;
    if (iplImage->nChannels == 1)
    {
        // We should check who is going to destroy this allocation.
        vcolorTable.resize(256);
        for (int i = 0; i < 256; i++)
        {
           vcolorTable[i] = qRgb(i, i, i);
        }
        //Qt vector is difficult to use... start with std to qvector
        //here I allocate QImage using qt constructor (Forma_Indexed8 adds sometimes 2 bytes on the right side !!! o.O not specified nowhere !!!)
        //qImage = new QImage(tmpImg->scanLine(0), width, height, QImage::Format_Indexed8);
        qImage->setColorTable(vcolorTable);
    }
    else
    {
        qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
    }
    *data = qImageBuffer;
    return qImage;
}

我不知道3个频道是否也有相同的错误,但我希望没有

I have found some bugs in the code.... maybe there are still more bugs in it but for now it looks fine for me. QImage with Format_Index8 needs sometimes (depending on the image resolution....) 2 byte added on the right side (don t know why but it seems like to be like this).
Here is the new adapted code

QImage *IplImageToQImage(const IplImage * iplImage, uchar **data, double mini, double maxi)
{
    uchar *qImageBuffer = NULL;
    int width = iplImage->width;
    int widthStep = iplImage->widthStep;
    int height = iplImage->height;
    QImage *qImage;
    switch (iplImage->depth)
    {
        case IPL_DEPTH_8U:
        if (iplImage->nChannels == 1)
        {
        // OpenCV image is stored with one byte grey pixel. We convert it
       // to an 8 bit depth QImage.
        qImage = new QImage(width,height,QImage::Format_Indexed8);
        uchar *QImagePtr = qImage->scanLine(0);
        qImageBuffer = qImage->scanLine(0);

        const uchar *iplImagePtr = (const uchar *) iplImage->imageData;

        for (int y = 0; y < height; y++)
        {
            // Copy line by line
            QImagePtr = qImage->scanLine(y);            
            memcpy(QImagePtr, iplImagePtr, width);
            iplImagePtr += widthStep;

        }
        /*
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                // We take only the highest part of the 16 bit value. It is
                //similar to dividing by 256.
                //*QImagePtr++ = ((*iplImagePtr++) >> 8);
                *QImagePtr = *iplImagePtr;
                QImagePtr++;
                iplImagePtr++;
                }

                iplImagePtr += widthStep/sizeof(uchar)-width;
            }*/
        }
        else if (iplImage->nChannels == 3)
            {
            /* OpenCV image is stored with 3 byte color pixels (3 channels).
            We convert it to a 32 bit depth QImage.
            */
            qImageBuffer = (uchar *) malloc(width*height*4*sizeof(uchar));
            uchar *QImagePtr = qImageBuffer;
            const uchar *iplImagePtr = (const uchar *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
                for (int x = 0; x < width; x++)
                {
                    // We cannot help but copy manually.
                    QImagePtr[0] = iplImagePtr[0];
                    QImagePtr[1] = iplImagePtr[1];
                    QImagePtr[2] = iplImagePtr[2];
                    QImagePtr[3] = 0;

                    QImagePtr += 4;
                    iplImagePtr += 3;
                }
            iplImagePtr += widthStep-3*width;
            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=8U and %d channels\n", iplImage->nChannels);
        }
        break;
        case IPL_DEPTH_16U:
        if (iplImage->nChannels == 1)
        {
        /* OpenCV image is stored with 2 bytes grey pixel. We convert it
        to an 8 bit depth QImage.
        */
            qImage = new QImage(width,height,QImage::Format_Indexed8);
            uchar *QImagePtr = qImage->scanLine(0);
            qImageBuffer = qImage->scanLine(0);

            //const uint16_t *iplImagePtr = (const uint16_t *);
            const unsigned short *iplImagePtr = (const unsigned short *)iplImage->imageData;


            for (int y = 0; y < height; y++)
            {
                QImagePtr = qImage->scanLine(y);
                for (int x = 0; x < width; x++)
                {
                // We take only the highest part of the 16 bit value. It is
                //similar to dividing by 256.
                //*QImagePtr++ = ((*iplImagePtr++) >> 8);
                //change here 16 bit could be everything !! set max min to your desire
                *QImagePtr = 255*(((*iplImagePtr) - mini) / (maxi - mini));
                QImagePtr++;
                iplImagePtr++;
                }

                iplImagePtr += widthStep/sizeof(unsigned short)-width;
            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=16U and %d channels\n", iplImage->nChannels);

        }
        break;
        case IPL_DEPTH_32F:
         if (iplImage->nChannels == 1)
         {
        /* OpenCV image is stored with float (4 bytes) grey pixel. We
        convert it to an 8 bit depth QImage.
        */
             qImage = new QImage(width,height,QImage::Format_Indexed8);
             uchar *QImagePtr = qImage->scanLine(0);
             qImageBuffer = qImage->scanLine(0);

             const float *iplImagePtr = (const float *) iplImage->imageData;
             for (int y = 0; y < height; y++)
             {
             QImagePtr = qImage->scanLine(y);
                 for (int x = 0; x < width; x++)
                 {
                     uchar p;
                     float pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);
                     if (pf < 0) p = 0;
                     else if (pf > 255) p = 255;
                     else p = (uchar) pf;

                     *QImagePtr++ = p;
                  }

             iplImagePtr += widthStep/sizeof(float)-width;
             }
         }
         else
         {
             qDebug("IplImageToQImage: image format is not supported : depth=32F and %d channels\n", iplImage->nChannels);
         }
       break;
       case IPL_DEPTH_64F:
         if (iplImage->nChannels == 1)
         {
            /* OpenCV image is stored with double (8 bytes) grey pixel. We
            convert it to an 8 bit depth QImage.
            */
             qImage = new QImage(width,height,QImage::Format_Indexed8);
             uchar *QImagePtr = qImage->scanLine(0);
             qImageBuffer = qImage->scanLine(0);

            const double *iplImagePtr = (const double *) iplImage->imageData;
            for (int y = 0; y < height; y++)
            {
             QImagePtr = qImage->scanLine(y);
                for (int x = 0; x < width; x++)
                {
                    uchar p;
                    double pf = 255 * ((*iplImagePtr++) - mini) / (maxi - mini);

                    if (pf < 0) p = 0;
                    else if (pf > 255) p = 255;
                    else p = (uchar) pf;

                    *QImagePtr++ = p;
                }

            }

        }
        else
        {
            qDebug("IplImageToQImage: image format is not supported : depth=64F and %d channels\n", iplImage->nChannels);
        }
        break;
        default:
        qDebug("IplImageToQImage: image format is not supported : depth=%d and %d channels\n", iplImage->depth, iplImage->nChannels);
    }

    QVector<QRgb> vcolorTable;
    if (iplImage->nChannels == 1)
    {
        // We should check who is going to destroy this allocation.
        vcolorTable.resize(256);
        for (int i = 0; i < 256; i++)
        {
           vcolorTable[i] = qRgb(i, i, i);
        }
        //Qt vector is difficult to use... start with std to qvector
        //here I allocate QImage using qt constructor (Forma_Indexed8 adds sometimes 2 bytes on the right side !!! o.O not specified nowhere !!!)
        //qImage = new QImage(tmpImg->scanLine(0), width, height, QImage::Format_Indexed8);
        qImage->setColorTable(vcolorTable);
    }
    else
    {
        qImage = new QImage(qImageBuffer, width, height, QImage::Format_RGB32);
    }
    *data = qImageBuffer;
    return qImage;
}

I don t know if 3 channels have also the same bug but I hope not

~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文