使用片段着色器的块过滤器

发布于 2024-09-01 00:59:22 字数 2975 浏览 4 评论 0原文

我使用 Apple 的 OpenGL Shader Builder(类似于 Nvidia 的 fx 编辑器的工具)遵循本教程但更简单)。

我可以轻松应用过滤器,但我不明白它们是否工作正常(如果是,我该如何改进输出)。例如模糊滤镜:OpenGL 本身对纹理进行一些图像处理,因此如果它们以比原始图像更高的分辨率显示,则它们已经被 OpenGL 模糊了。其次,模糊部分比未处理的部分更亮,我认为这没有意义,因为它只需要来自直接邻域的像素。这是由

float step_w = (1.0/width);

我不太明白的定义的:像素是使用浮点值索引的?

模糊图像 http://img218.imageshack.us/img218/6468/blurzt.png< /a>

编辑:我忘记附上我使用的确切代码:

片段着色器

// Originally taken from: http://www.ozone3d.net/tutorials/image_filtering_p2.php#part_2

#define KERNEL_SIZE 9

float kernel[KERNEL_SIZE];

uniform sampler2D colorMap;
uniform float width;
uniform float height;

float step_w = (1.0/width);
float step_h = (1.0/height);

// float step_w = 20.0;
// float step_h = 20.0;

vec2 offset[KERNEL_SIZE];

void main(void)
{
   int i = 0;
   vec4 sum = vec4(0.0);

   offset[0] = vec2(-step_w, -step_h);  // south west
   offset[1] = vec2(0.0, -step_h);      // south
   offset[2] = vec2(step_w, -step_h);       // south east

   offset[3] = vec2(-step_w, 0.0);      // west
   offset[4] = vec2(0.0, 0.0);          // center
   offset[5] = vec2(step_w, 0.0);       // east

   offset[6] = vec2(-step_w, step_h);       // north west
   offset[7] = vec2(0.0, step_h);       // north
   offset[8] = vec2(step_w, step_h);        // north east


// Gaussian kernel
// 1 2 1
// 2 4 2
// 1 2 1


   kernel[0] = 1.0;     kernel[1] = 2.0;    kernel[2] = 1.0;
   kernel[3] = 2.0; kernel[4] = 4.0;    kernel[5] = 2.0;
   kernel[6] = 1.0;     kernel[7] = 2.0;    kernel[8] = 1.0;


// TODO make grayscale first
// Laplacian Filter
// 0   1   0
// 1  -4   1
// 0   1   0

/*
kernel[0] = 0.0;    kernel[1] = 1.0;    kernel[2] = 0.0;
kernel[3] = 1.0;    kernel[4] = -4.0;   kernel[5] = 1.0;
kernel[6] = 0.0;   kernel[7] = 2.0; kernel[8] = 0.0;
*/

// Mean Filter
// 1  1  1
// 1  1  1
// 1  1  1

/*
kernel[0] = 1.0;    kernel[1] = 1.0;    kernel[2] = 1.0;
kernel[3] = 1.0;    kernel[4] = 1.0;    kernel[5] = 1.0;
kernel[6] = 1.0;   kernel[7] = 1.0; kernel[8] = 1.0;
*/

   if(gl_TexCoord[0].s<0.5)
   {
       // For every pixel sample the neighbor pixels and sum up
       for( i=0; i<KERNEL_SIZE; i++ )
       {
            // select the pixel with the concerning offset
            vec4 tmp = texture2D(colorMap, gl_TexCoord[0].st + offset[i]);
            sum += tmp * kernel[i];
       }

        sum /= 16.0;
   }
   else if( gl_TexCoord[0].s>0.51 )
   {
        sum = texture2D(colorMap, gl_TexCoord[0].xy);
   }
   else // Draw a red line
   {
        sum = vec4(1.0, 0.0, 0.0, 1.0);
   }

   gl_FragColor = sum;
}

顶点着色器

void main(void)
{
    gl_TexCoord[0] = gl_MultiTexCoord0;
    gl_Position = ftransform();
}

I was following this tutorial using Apple's OpenGL Shader Builder (tool similar to Nvidia's fx composer, but simpler).

I could easily apply the filters, but I don't understand if they worked correct (and if so how can I improve the output). For example the blur filter: OpenGL itself does some image processing on the textures, so if they are displayed in a higher resolution than the original image, they are blurred already by OpenGL. Second the blurred part is brighter then the part not processed, I think this does not make sense, since it just takes pixels from the direct neighborhood. This is defined by

float step_w = (1.0/width);

Which I don't quite understand: The pixels are indexed using floating point values??

Blurred Image http://img218.imageshack.us/img218/6468/blurzt.png

Edit: I forgot to attach the exact code I used:

Fragment Shader

// Originally taken from: http://www.ozone3d.net/tutorials/image_filtering_p2.php#part_2

#define KERNEL_SIZE 9

float kernel[KERNEL_SIZE];

uniform sampler2D colorMap;
uniform float width;
uniform float height;

float step_w = (1.0/width);
float step_h = (1.0/height);

// float step_w = 20.0;
// float step_h = 20.0;

vec2 offset[KERNEL_SIZE];

void main(void)
{
   int i = 0;
   vec4 sum = vec4(0.0);

   offset[0] = vec2(-step_w, -step_h);  // south west
   offset[1] = vec2(0.0, -step_h);      // south
   offset[2] = vec2(step_w, -step_h);       // south east

   offset[3] = vec2(-step_w, 0.0);      // west
   offset[4] = vec2(0.0, 0.0);          // center
   offset[5] = vec2(step_w, 0.0);       // east

   offset[6] = vec2(-step_w, step_h);       // north west
   offset[7] = vec2(0.0, step_h);       // north
   offset[8] = vec2(step_w, step_h);        // north east


// Gaussian kernel
// 1 2 1
// 2 4 2
// 1 2 1


   kernel[0] = 1.0;     kernel[1] = 2.0;    kernel[2] = 1.0;
   kernel[3] = 2.0; kernel[4] = 4.0;    kernel[5] = 2.0;
   kernel[6] = 1.0;     kernel[7] = 2.0;    kernel[8] = 1.0;


// TODO make grayscale first
// Laplacian Filter
// 0   1   0
// 1  -4   1
// 0   1   0

/*
kernel[0] = 0.0;    kernel[1] = 1.0;    kernel[2] = 0.0;
kernel[3] = 1.0;    kernel[4] = -4.0;   kernel[5] = 1.0;
kernel[6] = 0.0;   kernel[7] = 2.0; kernel[8] = 0.0;
*/

// Mean Filter
// 1  1  1
// 1  1  1
// 1  1  1

/*
kernel[0] = 1.0;    kernel[1] = 1.0;    kernel[2] = 1.0;
kernel[3] = 1.0;    kernel[4] = 1.0;    kernel[5] = 1.0;
kernel[6] = 1.0;   kernel[7] = 1.0; kernel[8] = 1.0;
*/

   if(gl_TexCoord[0].s<0.5)
   {
       // For every pixel sample the neighbor pixels and sum up
       for( i=0; i<KERNEL_SIZE; i++ )
       {
            // select the pixel with the concerning offset
            vec4 tmp = texture2D(colorMap, gl_TexCoord[0].st + offset[i]);
            sum += tmp * kernel[i];
       }

        sum /= 16.0;
   }
   else if( gl_TexCoord[0].s>0.51 )
   {
        sum = texture2D(colorMap, gl_TexCoord[0].xy);
   }
   else // Draw a red line
   {
        sum = vec4(1.0, 0.0, 0.0, 1.0);
   }

   gl_FragColor = sum;
}

Vertex Shader

void main(void)
{
    gl_TexCoord[0] = gl_MultiTexCoord0;
    gl_Position = ftransform();
}

如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。

扫码二维码加入Web技术交流群

发布评论

需要 登录 才能够评论, 你可以免费 注册 一个本站的账号。

评论(2

那片花海 2024-09-08 00:59:22

纹理坐标通常从 (0,0) (左下)到 (1,1) (右上角),所以实际上,它们是浮点数。

因此,如果您有纹理坐标 (u,v),则“原始”坐标由 (u*textureWidth, v*textureHeight) 计算。

如果结果值不是整数,可能有不同的处理方法:

  • 只需取结果的 floorceil 即可使数字
  • 在 然而,我认为每种着色语言都有一种通过其“原始”(即积分索引

)访问纹理的方法。

Texture coordinates conventionally reach from (0,0) (bottom left) to (1,1) (top right), so in fact, they are floats.

So if you have texturecoordinates (u,v), the "original" coordinates are computed by (u*textureWidth, v*textureHeight).

If the resulting values are not integral numbers, there may be different ways to handle that:

  • just take floor or ceil of the result in order to make the number integral
  • interpolate between the neighbouring texels

However I think every shading language has a method to access a texture by their "original", i.e. integral index.

划一舟意中人 2024-09-08 00:59:22

@Nils,感谢您发布此代码。一段时间以来,我一直在尝试找出一种在 GPU 上进行卷积的简单方法。
我尝试了你的代码,我自己也遇到了同样的调光问题。我是这样解决的。

  • 您必须小心步长以使用纹理宽度而不是
    图像宽度。当
    纹理绑定在opengl中。
  • 您还必须确保您的正常化
    通过总结内核中的所有值并除以该值来计算内核。
  • 如果您单独对 RG 和 B 进行卷积而不使用
    照明(样本的第四个组成部分)。

这是一个不存在调光问题的解决方案,并且还不需要 3x3 内核的偏移阵列。

我已经包含了 8 个对我有用且无需调暗的内核。

uniform sampler2D colorMap;
uniform float width;
uniform float height;


const mat3 SobelVert= mat3( 1.0, 2.0, 1.0, 0.0, 0.0, 0.0, -1.0, -2.0, -1.0 );
const mat3 SobelHorz= mat3( 1.0, 0.0, -1.0, 2.0, 0.0, -2.0, 1.0, 0.0, -1.0 );
const mat3 SimpleBlur= (1.0/9.0)*mat3( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 );
const mat3 Sharpen= mat3( 0.0, -1.0, 0.0, -1.0, 5.0, -1.0, 0.0, -1.0, 0.0 );
const mat3 GaussianBlur= (1.0/16.0)*mat3( 1.0, 2.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 1.0 );
const mat3 SimpleHorzEdge= mat3( 0.0, 0.0, 0.0, -3.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 SimpleVertEdge= mat3( 0.0, -3.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 ClearNone= mat3( 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 );

void main(void)
{
   vec4 sum = vec4(0.0);
   if(gl_TexCoord[0].x <0.5)
   {
      mat3 I, R, G, B;
      vec3 sample;

      // fetch the 3x3 neighbourhood and use the RGB vector's length as intensity value
      for (int i=0; i<3; i++){
        for (int j=0; j<3; j++) {
          sample = texture2D(colorMap, gl_TexCoord[0].xy + vec2(i-1,j-1)/vec2(width, height)).rgb;
          I[i][j] = length(sample); //intensity (or illumination)
          R[i][j] = sample.r; 
          G[i][j] = sample.g;
          B[i][j] = sample.b;  
        }
      }

      //apply the kernel convolution
      mat3 convolvedMatR = matrixCompMult( SimpleBlur, R);
      mat3 convolvedMatG = matrixCompMult( SimpleBlur, G);
      mat3 convolvedMatB = matrixCompMult( SimpleBlur, B);
      float convR = 0.0;
      float convG = 0.0;
      float convB = 0.0;
      //sum the result
      for (int i=0; i<3; i++){
        for (int j=0; j<3; j++) {
          convR += convolvedMatR[i][j];
          convG += convolvedMatG[i][j];
          convB += convolvedMatB[i][j];
        }
      }
      sum = vec4(vec3(convR, convG, convB), 1.0);

  }
   else if( gl_TexCoord[0].x >0.51 )
   {
        sum = texture2D(colorMap, gl_TexCoord[0].xy );
   }
   else // Draw a red line
   {
        sum = vec4(1.0, 0.0, 0.0, 1.0);
   }

   gl_FragColor = sum;
}

@Nils, thanks for posting this code. I've been trying to figure out a simple way to do a convolution on the GPU for some time now.
I tried your code out and ran into the same dimming problem myself. Here's how I solved it.

  • You have to be careful with your step size to use texture width not
    image width. It usually gets re-sized to a power of 2 when the
    texture is bound in opengl.
  • You must also be sure to normalize your
    kernel by summing up all values in your kernel and dividing by that.
  • Also it helps if you convolve R G and B separately without the
    illumination, (the fourth component of the sample).

Here's a solution that doesn't have the dimming issue and that also bypasses the need for an offset array for 3x3 kernels .

I've included 8 Kernels that worked for me without dimming.

uniform sampler2D colorMap;
uniform float width;
uniform float height;


const mat3 SobelVert= mat3( 1.0, 2.0, 1.0, 0.0, 0.0, 0.0, -1.0, -2.0, -1.0 );
const mat3 SobelHorz= mat3( 1.0, 0.0, -1.0, 2.0, 0.0, -2.0, 1.0, 0.0, -1.0 );
const mat3 SimpleBlur= (1.0/9.0)*mat3( 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0 );
const mat3 Sharpen= mat3( 0.0, -1.0, 0.0, -1.0, 5.0, -1.0, 0.0, -1.0, 0.0 );
const mat3 GaussianBlur= (1.0/16.0)*mat3( 1.0, 2.0, 1.0, 2.0, 4.0, 2.0, 1.0, 2.0, 1.0 );
const mat3 SimpleHorzEdge= mat3( 0.0, 0.0, 0.0, -3.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 SimpleVertEdge= mat3( 0.0, -3.0, 0.0, 0.0, 3.0, 0.0, 0.0, 0.0, 0.0 );
const mat3 ClearNone= mat3( 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0 );

void main(void)
{
   vec4 sum = vec4(0.0);
   if(gl_TexCoord[0].x <0.5)
   {
      mat3 I, R, G, B;
      vec3 sample;

      // fetch the 3x3 neighbourhood and use the RGB vector's length as intensity value
      for (int i=0; i<3; i++){
        for (int j=0; j<3; j++) {
          sample = texture2D(colorMap, gl_TexCoord[0].xy + vec2(i-1,j-1)/vec2(width, height)).rgb;
          I[i][j] = length(sample); //intensity (or illumination)
          R[i][j] = sample.r; 
          G[i][j] = sample.g;
          B[i][j] = sample.b;  
        }
      }

      //apply the kernel convolution
      mat3 convolvedMatR = matrixCompMult( SimpleBlur, R);
      mat3 convolvedMatG = matrixCompMult( SimpleBlur, G);
      mat3 convolvedMatB = matrixCompMult( SimpleBlur, B);
      float convR = 0.0;
      float convG = 0.0;
      float convB = 0.0;
      //sum the result
      for (int i=0; i<3; i++){
        for (int j=0; j<3; j++) {
          convR += convolvedMatR[i][j];
          convG += convolvedMatG[i][j];
          convB += convolvedMatB[i][j];
        }
      }
      sum = vec4(vec3(convR, convG, convB), 1.0);

  }
   else if( gl_TexCoord[0].x >0.51 )
   {
        sum = texture2D(colorMap, gl_TexCoord[0].xy );
   }
   else // Draw a red line
   {
        sum = vec4(1.0, 0.0, 0.0, 1.0);
   }

   gl_FragColor = sum;
}
~没有更多了~
我们使用 Cookies 和其他技术来定制您的体验包括您的登录状态等。通过阅读我们的 隐私政策 了解更多相关信息。 单击 接受 或继续使用网站,即表示您同意使用 Cookies 和您的相关数据。
原文