OpenGL 3.1 光照混乱,使用 phong 着色
经过几个小时的痛苦尝试找出为什么我的灯光混乱后,我仍然不知所措。
OpenGL 法线是正确的(背面剔除不会导致任何三角形消失)
我计算法线以便插值照明,同一面上的所有三角形也具有相同的法线。
如果有人有任何想法,将不胜感激。
我对 OpenGL 绝对是新手,所以这在我的代码中有点明显。
这是我的着色器:
顶点着色器
#版本 330 核心 布局(位置 = 0)在 vec3 位置; vec3 vertexColor 中的布局(位置 = 1); 在 vec3 vNormal 中; 输出 vec3 片段颜色; // 输出数据;将为每个片段进行插值。 统一 mat4 MVP; 统一 mat4 变换矩阵; 均匀 vec4 LightPosition; // 将按片段解释的输出值 输出 vec3 fN; 输出 vec3 fE; 输出 vec3 fL; 无效主() { fN = v法线; fE = 位置.xyz; fL = LightPosition.xyz; if( LightPosition.w != 0.0 ) { fL = LightPosition.xyz - Position.xyz; } // 输出顶点在剪辑空间中的位置:MVP * 位置 vec4 v = vec4(位置,1); // 变换为齐次 4D 向量 gl_Position = MVP * v; //gl_Position = MVP * v; // 每个顶点的颜色将被插值 // 产生每个片段的颜色 //片段颜色=顶点颜色; // 在某个时刻取出 }
和fragmentShader,使用phong着色
<前><代码>#版本 330 //输出vec3颜色; // 来自顶点着色器的每个片段的插值 在 vec3 fN 中; 在 vec3 fL 中; 在 vec3 fE 中; 输出 vec4 fColor; 均匀 vec4 环境产品、漫反射产品、镜面产品; 统一mat4模型视图; 均匀 vec4 LightPosition; 均匀的漂浮光泽; 在 vec3 片段颜色中; // 来自顶点着色器的插值 无效主() { // 标准化输入光照向量 vec3 N = 归一化(fN); vec3 E = 归一化(fE); vec3 L = 归一化(fL); vec3 H = 归一化( L + E ); vec4 环境 = 环境产品; 浮动Kd = max(点(L,N), 0.0); vec4 漫反射 = Kd*DiffuseProduct; float Ks = pow(max(dot(N, H), 0.0), 光泽度); vec4 镜面反射 = Ks*SpecularProduct; // 如果光线位于顶点后面,则丢弃镜面高光 if( 点(L, N) < 0.0 ) { 镜面反射 = vec4(0.0, 0.0, 0.0, 1.0); } fColor = 环境光 + 漫反射 + 镜面反射; fColor.a = 1.0; //颜色 = vec3(1,0,0); // 输出颜色=顶点着色器中指定的颜色, // 在所有 3 个周围顶点之间插值 //颜色=片段颜色; } 无效 setMatrices() { GL 浮点视场 = 45; // 相机的变焦 glm::vec3cameraPosition(4,3,3), // 相机在世界空间中的位置 // 更改以查看发生的情况 cameraTarget(0,0,0), // 你想看的地方,在世界空间中 向上向量(0,-1,0); // 投影矩阵:45°视野,4:3比例,显示范围:0.1单位<-> 100 单位 glm::mat4 投影 = glm::perspective(FoV, 3.0f / 3.0f, 0.001f, 100.0f); // 当屏幕尺寸/比例改变时,这里的比例需要改变 // 相机矩阵 glm::mat4 视图 = glm::lookAt( cameraPosition, // 相机位于世界空间中的 (4,3,3) cameraTarget, // 并查看原点 upVector // 头部朝上(设置为 0,-1,0 以上下颠倒) ); // 模型矩阵:单位矩阵(模型将位于原点) glm::mat4 模型 = glm::mat4(1.0f); // 每个模型的变化! // 我们的 ModelViewProjection :3 个矩阵的乘法 glm::mat4 MVP = 投影 * 视图 * 模型 * 变换矩阵; //矩阵乘法是相反的 // 获取我们的“MVP”制服的句柄。 // 仅在初始化时。 GLuint MatrixID = glGetUniformLocation(programID, "MVP"); // 将我们的变换发送到当前绑定的着色器, // 穿着“MVP”制服 // 对于你渲染的每个模型,因为 MVP 会不同(至少 M 部分) glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]); RotationID = glGetUniformLocation(programID,"transformMatrix"); //灯光 cubeNormal = glGetAttribLocation( 程序ID, "vNormal" ); } 无效 setBuffers() { // 获取顶点数组对象 GLuint VAO; glGenVertexArrays(1, &VAO); glBindVertexArray(VAO); glUseProgram(程序ID); // 立方体缓冲区对象 glGenBuffers(1, &CubeVertexbuffer); // 生成1个缓冲区,将生成的标识符放入vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer); // 以下命令将讨论我们的“vertexbuffer”缓冲区 glBufferData(GL_ARRAY_BUFFER, sizeof(CubeBufferData), CubeBufferData, GL_STATIC_DRAW); // 将我们的顶点提供给 OpenGL。 // 立方体普通对象 glGenBuffers(1, &CubeNormalbuffer); // 生成1个缓冲区,将生成的标识符放入vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, CubeNormalbuffer); // 以下命令将讨论我们的“vertexbuffer”缓冲区 glBufferData(GL_ARRAY_BUFFER, sizeof(CubeNormalBufferData), CubeNormalBufferData, GL_STATIC_DRAW); // 将我们的顶点提供给 OpenGL。 //八面体缓冲对象 glGenBuffers(1, &OctaVertexbuffer); // 生成1个缓冲区,将生成的标识符放入vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer); // 以下命令将讨论我们的“vertexbuffer”缓冲区 glBufferData(GL_ARRAY_BUFFER, sizeof(八面体BufData), 八面体BufData, GL_STATIC_DRAW); // 将我们的顶点提供给 OpenGL。 //四面体缓冲对象 glGenBuffers(1, &TetraVertexbuffer); // 生成1个缓冲区,将生成的标识符放入vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer); // 以下命令将讨论我们的“vertexbuffer”缓冲区 glBufferData(GL_ARRAY_BUFFER, sizeof(tetrahedronBufData), tetrahedronBufData, GL_STATIC_DRAW); // 将我们的顶点提供给 OpenGL。 //十二面体缓冲对象 glGenBuffers(1, &DodecaVertexbuffer); // 生成1个缓冲区,将生成的标识符放入vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer); // 以下命令将讨论我们的“vertexbuffer”缓冲区 glBufferData(GL_ARRAY_BUFFER, sizeof(十二面体BufData), 十二面体BufData, GL_STATIC_DRAW); // 将我们的顶点提供给 OpenGL。 //二十面体缓冲对象 glGenBuffers(1, &icosaVertexbuffer); // 生成1个缓冲区,将生成的标识符放入vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer); // 以下命令将讨论我们的“vertexbuffer”缓冲区 glBufferData(GL_ARRAY_BUFFER, sizeof(二十面体BufData), 二十面体BufData, GL_STATIC_DRAW); // 将我们的顶点提供给 OpenGL。 //球体缓冲区对象 glGenBuffers(1, &sphereVertexbuffer); // 生成1个缓冲区,将生成的标识符放入vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer); // 以下命令将讨论我们的“vertexbuffer”缓冲区 glBufferData(GL_ARRAY_BUFFER, sizeof(sphereBufData), sphereBufData, GL_STATIC_DRAW); // 将我们的顶点提供给 OpenGL。 glGenBuffers(1, &colorbuffer); glBindBuffer(GL_ARRAY_BUFFER, 颜色缓冲区); glBufferData(GL_ARRAY_BUFFER, sizeof(g_color_buffer_data), g_color_buffer_data, GL_STATIC_DRAW); // 照明的东西 // 初始化着色器光照参数 point4 light_position= { 0.0, 20.0, -10.0, 0.0 }; color4 light_ambient ={ 0.2, 0.2, 0.2, 1.0 }; color4 light_diffuse ={ 1.0, 1.0, 1.0, 1.0 }; color4 light_specular ={ 1.0, 1.0, 1.0, 1.0 }; color4材质_环境={1.0,0.0,1.0,1.0}; color4材质漫反射={1.0,0.8,0.0,1.0}; color4材质_镜面 ={ 1.0, 0.8, 0.0, 1.0 }; 浮点材质光泽度 = 20.0; color4环境产品; color4漫反射产品; color4镜面反射产品; 整数我; for (i = 0; i < 3; i++) { 环境产品[i] = 光环境[i] * 材料环境[i]; diffuse_product[i] = light_diffuse[i] *material_diffuse[i]; 镜面反射积[i] = 光镜面反射[i] * 材质镜面反射[i]; } //printColor("漫反射", diffus_product); //printColor("镜面反射", specular_product); glUniform4fv( glGetUniformLocation(programID, "AmbientProduct"), 1、环境产品); glUniform4fv( glGetUniformLocation(programID, "DiffuseProduct"), 1、扩散产品); glUniform4fv( glGetUniformLocation(programID, "SpecularProduct"), 1、镜面反射产品); glUniform4fv( glGetUniformLocation(programID, "LightPosition"), 1、光位置); glUniform1f( glGetUniformLocation(programID, "光泽"), 材料光泽度); }
等等......
void display()
{
setMatrices(); // initilize Matrices
// Use our shader
//glUseProgram(programID);
glClearColor(0.0f, 0.0f, 0.3f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// 2nd attribute buffer : colors
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(0); // 1rst attribute buffer : vertices
// enum platosShapes{tet, cube, octah, dodec, icos};
switch(shapeInUse)
{
case tet:
{
glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 4*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case cube:
{
//GLuint cubeNormal = glGetAttribLocation( programID, "vNormal" );
glEnableVertexAttribArray( cubeNormal );
glVertexAttribPointer( cubeNormal, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid *) (sizeof(CubeNormalBufferData)) );
//glDisableVertexAttribArray( cubeNormal );
glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 12*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case octah:
{
glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 8*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case dodec:
{
glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLE_FAN, 0, 5 * 6); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
glDrawArrays(GL_TRIANGLE_FAN, (5 * 6) + 1, 30);
//glutSolidDodecahedron();
//glDrawArrays(GL_TRIANGLE_STRIP,0,5*12);
}
break;
case icos:
{
glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 3*20); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case sphere:
{
glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
//glDrawElements(GL_TRIANGLES, cnt2, GL_UNSIGNED_INT, 0)
glDrawArrays(GL_TRIANGLE_FAN, 0, 100);
}
}
glDisableVertexAttribArray(0);
glFlush();
}
还有更多......
void calculateNormals(GLfloat bufData[], GLfloat normBufData[], int size) // probalby works
{
int count = 0;
GLfloat temp[9];
for(int i = 0; i < size; i++)
{
temp[count] = bufData[i];
count++;
if((i+1) % 9 == 0)
{
count = 0;
//for(int i = 0; i < 9; i++)
//{
// cout << temp[i] << "!,";
// if((i + 1) % 3 == 0)
// cout << "\n";
//}
calculateCross(temp, normBufData);
}
}
printNormals(normBufData, size);
}
void calculateCross(GLfloat bufData[], GLfloat normBufData[]) // probably works
{
static int counter = 0; // need to reset in bettween new buffers
glm::vec3 C1;
glm::vec3 C2;
glm::vec3 normal;
//cout << bufData[0] << "," << bufData[1] << "," << bufData[2] << " buf 1 \n";
//cout << bufData[3] << "," << bufData[4] << "," << bufData[5] << " buf 2 \n";
//cout << bufData[6] << "," << bufData[7] << "," << bufData[8] << " buf 3 \n\n";
//C1.x = bufData[3] - bufData[0];
//C1.y = bufData[4] - bufData[1];
//C1.z = bufData[5] - bufData[2];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
C1.x = bufData[0] - bufData[3];
C1.y = bufData[1] - bufData[4];
C1.z = bufData[2] - bufData[5];
C2.x = bufData[0] - bufData[6];
C2.y = bufData[1] - bufData[7];
C2.z = bufData[2] - bufData[8];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
//cout << C1.x << " 1x \n";
//cout << C1.y << " 1y \n";
//cout << C1.z << " 1z \n";
//cout << C2.x << " 2x \n";
//cout << C2.y << " 2y \n";
//cout << C2.z << " 2z \n";
normal = glm::cross(C1, C2);
//cout << "\nNORMAL : " << normal.x << "," << normal.y << "," << normal.z << " counter = " << counter << "\n";
for(int j = 0; j < 3; j++)
{
for(int i = 0; i < 3; i++)
{
normBufData[counter] = normal.x;
normBufData[counter + 1] = normal.y;
normBufData[counter + 2] = normal.z;
}
counter+=3;
}
}
和main......
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(700, 700); // Window Size
glutCreateWindow("Michael - Lab 3");
glutDisplayFunc(display);
glutTimerFunc(10, timeFucn, 10);
glutIdleFunc(Idle);
glutKeyboardFunc(keyboard);
glewExperimental = GL_TRUE;
glewInit();
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST); // Enable depth test
glDepthFunc(GL_LESS); // Accept fragment if it closer to the camera than the former one
GenerateSphere(); // this function generates points for the sphere
programID = LoadShader( "VertexShader.glsl", "FragmentShader.glsl" ); // Create and compile our GLSL program from the shaders
setBuffers(); // initilize buffers
calculateNormals(CubeBufferData,CubeNormalBufferData,108); // calculate norms
//printNormals(CubeNormalBufferData);
glutMainLoop();
}
After many painful hours of attempting to figure out why my lighting is messed up I am still at a loss.
The OpenGL normals are correct (backface culling does not cause any of my triangles to disappear)
I calculate my normals in order to interpolate for lighting, all the triangles on the same faces also have the same normals.
If any one has any thoughts that would be appreciated.
I am definitely new to OpenGL, so that is a bit obvious in my code.
here are my shaders:
vertex shader
#version 330 core layout(location = 0) in vec3 Position; layout(location = 1) in vec3 vertexColor; in vec3 vNormal; out vec3 fragmentColor; // Output data ; will be interpolated for each fragment. uniform mat4 MVP; uniform mat4 transformMatrix; uniform vec4 LightPosition; // output values that will be interpretated per-fragment out vec3 fN; out vec3 fE; out vec3 fL; void main() { fN = vNormal; fE = Position.xyz; fL = LightPosition.xyz; if( LightPosition.w != 0.0 ) { fL = LightPosition.xyz - Position.xyz; } // Output position of the vertex, in clip space : MVP * position vec4 v = vec4(Position,1); // Transform in homoneneous 4D vector gl_Position = MVP * v; //gl_Position = MVP * v; // The color of each vertex will be interpolated // to produce the color of each fragment //fragmentColor = vertexColor; // take out at some point }
and the fragmentShader, using phong shading
#version 330 //out vec3 color; // per-fragment interpolated values from the vertex shader in vec3 fN; in vec3 fL; in vec3 fE; out vec4 fColor; uniform vec4 AmbientProduct, DiffuseProduct, SpecularProduct; uniform mat4 ModelView; uniform vec4 LightPosition; uniform float Shininess; in vec3 fragmentColor; // Interpolated values from the vertex shaders void main() { // Normalize the input lighting vectors vec3 N = normalize(fN); vec3 E = normalize(fE); vec3 L = normalize(fL); vec3 H = normalize( L + E ); vec4 ambient = AmbientProduct; float Kd = max(dot(L, N), 0.0); vec4 diffuse = Kd*DiffuseProduct; float Ks = pow(max(dot(N, H), 0.0), Shininess); vec4 specular = Ks*SpecularProduct; // discard the specular highlight if the light's behind the vertex if( dot(L, N) < 0.0 ) { specular = vec4(0.0, 0.0, 0.0, 1.0); } fColor = ambient + diffuse + specular; fColor.a = 1.0; //color = vec3(1,0,0); // Output color = color specified in the vertex shader, // interpolated between all 3 surrounding vertices //color = fragmentColor; } void setMatrices() { GLfloat FoV = 45; // the zoom of the camera glm::vec3 cameraPosition(4,3,3), // the position of your camera, in world space // change to see what happends cameraTarget(0,0,0), // where you want to look at, in world space upVector(0,-1,0); // Projection matrix : 45° Field of View, 4:3 ratio, display range : 0.1 unit <-> 100 units glm::mat4 Projection = glm::perspective(FoV, 3.0f / 3.0f, 0.001f, 100.0f); // ratio needs to change here when the screen size/ratio changes // Camera matrix glm::mat4 View = glm::lookAt( cameraPosition, // Camera is at (4,3,3), in World Space cameraTarget, // and looks at the origin upVector // Head is up (set to 0,-1,0 to look upside-down) ); // Model matrix : an identity matrix (model will be at the origin) glm::mat4 Model = glm::mat4(1.0f); // Changes for each model ! // Our ModelViewProjection : multiplication of our 3 matrices glm::mat4 MVP = Projection * View * Model * transformMatrix; //matrix multiplication is the other way around // Get a handle for our "MVP" uniform. // Only at initialisation time. GLuint MatrixID = glGetUniformLocation(programID, "MVP"); // Send our transformation to the currently bound shader, // in the "MVP" uniform // For each model you render, since the MVP will be different (at least the M part) glUniformMatrix4fv(MatrixID, 1, GL_FALSE, &MVP[0][0]); RotationID = glGetUniformLocation(programID,"transformMatrix"); //lighting cubeNormal = glGetAttribLocation( programID, "vNormal" ); } void setBuffers() { // Get a vertex array object GLuint VAO; glGenVertexArrays(1, &VAO); glBindVertexArray(VAO); glUseProgram(programID); // cube buffer objects glGenBuffers(1, &CubeVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBufferData(GL_ARRAY_BUFFER, sizeof(CubeBufferData), CubeBufferData, GL_STATIC_DRAW); // Give our vertices to OpenGL. // cube normal objects glGenBuffers(1, &CubeNormalbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, CubeNormalbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBufferData(GL_ARRAY_BUFFER, sizeof(CubeNormalBufferData), CubeNormalBufferData, GL_STATIC_DRAW); // Give our vertices to OpenGL. //octahedron buffer objects glGenBuffers(1, &OctaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBufferData(GL_ARRAY_BUFFER, sizeof(octahedronBufData), octahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL. //tetrahedron buffer objects glGenBuffers(1, &TetraVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBufferData(GL_ARRAY_BUFFER, sizeof(tetrahedronBufData), tetrahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL. //dodecahedron buffer objects glGenBuffers(1, &DodecaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBufferData(GL_ARRAY_BUFFER, sizeof(dodecahedronBufData), dodecahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL. //icosahedron buffer objects glGenBuffers(1, &icosaVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBufferData(GL_ARRAY_BUFFER, sizeof(icosahedronBufData), icosahedronBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL. //sphere buffer objects glGenBuffers(1, &sphereVertexbuffer); // Generate 1 buffer, put the resulting identifier in vertexbuffer glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer); // The following commands will talk about our 'vertexbuffer' buffer glBufferData(GL_ARRAY_BUFFER, sizeof(sphereBufData), sphereBufData, GL_STATIC_DRAW); // Give our vertices to OpenGL. glGenBuffers(1, &colorbuffer); glBindBuffer(GL_ARRAY_BUFFER, colorbuffer); glBufferData(GL_ARRAY_BUFFER, sizeof(g_color_buffer_data), g_color_buffer_data, GL_STATIC_DRAW); // lighting stuff // Initialize shader lighting parameters point4 light_position= { 0.0, 20.0, -10.0, 0.0 }; color4 light_ambient ={ 0.2, 0.2, 0.2, 1.0 }; color4 light_diffuse ={ 1.0, 1.0, 1.0, 1.0 }; color4 light_specular ={ 1.0, 1.0, 1.0, 1.0 }; color4 material_ambient ={ 1.0, 0.0, 1.0, 1.0 }; color4 material_diffuse ={ 1.0, 0.8, 0.0, 1.0 }; color4 material_specular ={ 1.0, 0.8, 0.0, 1.0 }; float material_shininess = 20.0; color4 ambient_product; color4 diffuse_product; color4 specular_product; int i; for (i = 0; i < 3; i++) { ambient_product[i] = light_ambient[i] * material_ambient[i]; diffuse_product[i] = light_diffuse[i] * material_diffuse[i]; specular_product[i] = light_specular[i] * material_specular[i]; } //printColor("diffuse", diffuse_product); //printColor("specular", specular_product); glUniform4fv( glGetUniformLocation(programID, "AmbientProduct"), 1, ambient_product ); glUniform4fv( glGetUniformLocation(programID, "DiffuseProduct"), 1, diffuse_product ); glUniform4fv( glGetUniformLocation(programID, "SpecularProduct"), 1, specular_product ); glUniform4fv( glGetUniformLocation(programID, "LightPosition"), 1, light_position ); glUniform1f( glGetUniformLocation(programID, "Shininess"), material_shininess ); }
and some more....
void display()
{
setMatrices(); // initilize Matrices
// Use our shader
//glUseProgram(programID);
glClearColor(0.0f, 0.0f, 0.3f, 0.0f);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// 2nd attribute buffer : colors
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, colorbuffer);
glVertexAttribPointer(
1, // attribute. No particular reason for 1, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glEnableVertexAttribArray(0); // 1rst attribute buffer : vertices
// enum platosShapes{tet, cube, octah, dodec, icos};
switch(shapeInUse)
{
case tet:
{
glBindBuffer(GL_ARRAY_BUFFER, TetraVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 4*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case cube:
{
//GLuint cubeNormal = glGetAttribLocation( programID, "vNormal" );
glEnableVertexAttribArray( cubeNormal );
glVertexAttribPointer( cubeNormal, 3, GL_FLOAT, GL_FALSE, 0,
(const GLvoid *) (sizeof(CubeNormalBufferData)) );
//glDisableVertexAttribArray( cubeNormal );
glBindBuffer(GL_ARRAY_BUFFER, CubeVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 12*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case octah:
{
glBindBuffer(GL_ARRAY_BUFFER, OctaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 8*3); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case dodec:
{
glBindBuffer(GL_ARRAY_BUFFER, DodecaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLE_FAN, 0, 5 * 6); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
glDrawArrays(GL_TRIANGLE_FAN, (5 * 6) + 1, 30);
//glutSolidDodecahedron();
//glDrawArrays(GL_TRIANGLE_STRIP,0,5*12);
}
break;
case icos:
{
glBindBuffer(GL_ARRAY_BUFFER, icosaVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
glDrawArrays(GL_TRIANGLES, 0, 3*20); // Starting from vertex 0; 3 vertices total -> 1 triangle // need to know amount of vertices here // and change to triangle strips accordingly
}
break;
case sphere:
{
glBindBuffer(GL_ARRAY_BUFFER, sphereVertexbuffer);
glVertexAttribPointer(
0, // attribute 0. No particular reason for 0, but must match the layout in the shader.
3, // size
GL_FLOAT, // type
GL_FALSE, // normalized?
0, // stride
(void*)0 // array buffer offset
);
//glDrawElements(GL_TRIANGLES, cnt2, GL_UNSIGNED_INT, 0)
glDrawArrays(GL_TRIANGLE_FAN, 0, 100);
}
}
glDisableVertexAttribArray(0);
glFlush();
}
and some more........
void calculateNormals(GLfloat bufData[], GLfloat normBufData[], int size) // probalby works
{
int count = 0;
GLfloat temp[9];
for(int i = 0; i < size; i++)
{
temp[count] = bufData[i];
count++;
if((i+1) % 9 == 0)
{
count = 0;
//for(int i = 0; i < 9; i++)
//{
// cout << temp[i] << "!,";
// if((i + 1) % 3 == 0)
// cout << "\n";
//}
calculateCross(temp, normBufData);
}
}
printNormals(normBufData, size);
}
void calculateCross(GLfloat bufData[], GLfloat normBufData[]) // probably works
{
static int counter = 0; // need to reset in bettween new buffers
glm::vec3 C1;
glm::vec3 C2;
glm::vec3 normal;
//cout << bufData[0] << "," << bufData[1] << "," << bufData[2] << " buf 1 \n";
//cout << bufData[3] << "," << bufData[4] << "," << bufData[5] << " buf 2 \n";
//cout << bufData[6] << "," << bufData[7] << "," << bufData[8] << " buf 3 \n\n";
//C1.x = bufData[3] - bufData[0];
//C1.y = bufData[4] - bufData[1];
//C1.z = bufData[5] - bufData[2];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
C1.x = bufData[0] - bufData[3];
C1.y = bufData[1] - bufData[4];
C1.z = bufData[2] - bufData[5];
C2.x = bufData[0] - bufData[6];
C2.y = bufData[1] - bufData[7];
C2.z = bufData[2] - bufData[8];
//C2.x = bufData[6] - bufData[0];
//C2.y = bufData[7] - bufData[1];
//C2.z = bufData[8] - bufData[2];
//cout << C1.x << " 1x \n";
//cout << C1.y << " 1y \n";
//cout << C1.z << " 1z \n";
//cout << C2.x << " 2x \n";
//cout << C2.y << " 2y \n";
//cout << C2.z << " 2z \n";
normal = glm::cross(C1, C2);
//cout << "\nNORMAL : " << normal.x << "," << normal.y << "," << normal.z << " counter = " << counter << "\n";
for(int j = 0; j < 3; j++)
{
for(int i = 0; i < 3; i++)
{
normBufData[counter] = normal.x;
normBufData[counter + 1] = normal.y;
normBufData[counter + 2] = normal.z;
}
counter+=3;
}
}
and main.....
int main(int argc, char **argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB);
glutInitWindowSize(700, 700); // Window Size
glutCreateWindow("Michael - Lab 3");
glutDisplayFunc(display);
glutTimerFunc(10, timeFucn, 10);
glutIdleFunc(Idle);
glutKeyboardFunc(keyboard);
glewExperimental = GL_TRUE;
glewInit();
glEnable(GL_CULL_FACE);
glEnable(GL_DEPTH_TEST); // Enable depth test
glDepthFunc(GL_LESS); // Accept fragment if it closer to the camera than the former one
GenerateSphere(); // this function generates points for the sphere
programID = LoadShader( "VertexShader.glsl", "FragmentShader.glsl" ); // Create and compile our GLSL program from the shaders
setBuffers(); // initilize buffers
calculateNormals(CubeBufferData,CubeNormalBufferData,108); // calculate norms
//printNormals(CubeNormalBufferData);
glutMainLoop();
}
如果你对这篇内容有疑问,欢迎到本站社区发帖提问 参与讨论,获取更多帮助,或者扫码二维码加入 Web 技术交流群。
绑定邮箱获取回复消息
由于您还没有绑定你的真实邮箱,如果其他用户或者作者回复了您的评论,将不能在第一时间通知您!
发布评论
评论(3)
您忘记在调用
glVertexAttribPointer(cubeNormal, 3,....);
之前将缓冲区对象与法线绑定。因此,法线的实际数据是从颜色缓冲区中获取的,这导致了最奇怪的 Phong 评估结果。顺便说一句,很好的编码风格:)
You forgot to bind the buffer object with normals before calling
glVertexAttribPointer( cubeNormal, 3,....);
. Therefore, the actual data for normals is taken from the color buffer, which causes weirdest Phong evaluation result.BTW, nice coding style :)
Phong 和 Gouraud 阴影不适用于所有平面的物体,例如立方体。
Phong and Gouraud shadings are not applicable to objects with all planar surfaces, e.g. a cube.
我遇到了出现在这个立方体中的问题,当我发送其正常值时,我注意到我发送的偏移值不正确并修复了问题。
I had the problem of appearing in this cube, and when I sent its normal values, I noticed that I sent the offset value incorrectly and fixed the problem.