Summer Semester 2022
This contains the exercise submissions for the GPU Programming lecture at Ulm University. Below are all the exercise submissions, which were submitted separately during the semester.
Vertex shader vertexShader.glsl including the vertex
transformation:
#version 450
uniform mat4 worldViewProjMatrix;
in vec4 sPos;
in vec3 sNormal;
out vec3 normal;
void main() {
normal = sNormal;
// Model space -> world space -> view space -> projection space
gl_Position = worldViewProjMatrix * sPos;
}The lecture example computed a color for each vertex by scaling the input color with the dot product of the light direction and the vertex normal. In the exercise, this calculation is moved to the pixel shader. This might produce a different result as the pixel shader input may be interpolated between multiple vertex shader outputs, and interpolating the normal is different from (and perhaps less useful than) interpolating the vertex color.
In which coordinate system the light is defined?
The light direction is specified in model space, since the dot product with the vertex normal is used, which is also in model space.
Will this implementation work if we have multiple instances of the same model?
Yes, if a separate worldViewProjMatrix is supplied for
each model, multiple instances could be displayed at different
locations. The lighting calculations are not affected, as they operate
in model space.
An improved result is achieved by moving the lighting calculation from the vertex shader to the fragment shader, interpolating normal and position instead of color.
Vertex Shader:
#version 150
uniform mat4 worldViewMatrix;
uniform mat4 ProjMatrix;
in vec4 sPos;
in vec3 sNormal;
out vec3 position;
out vec3 normal;
void main() {
position = sPos.xyz;
normal = sNormal;
gl_Position = ProjMatrix * worldViewMatrix * sPos;
}Fragment Shader:
#version 150
uniform vec3 lightPosition;
uniform vec4 inObjColor;
uniform vec4 inLightColor;
uniform vec3 camPos;
in vec3 position;
in vec3 normal;
out vec4 outColor;
void main() {
vec3 normalizedLightDir = normalize(lightPosition - position);
vec3 normalizedNormal = normalize(normal);
vec4 ambient = vec4(0.1, 0.1, 0.1, 1.0);
vec4 diffuse =
dot(normalizedLightDir, normalizedNormal) * inObjColor * inLightColor;
vec3 hVec = normalize(normalizedLightDir + normalize(camPos.xyz - position));
vec4 specular = pow(dot(hVec, normalizedNormal), 64.0) * vec4(1.0);
outColor = ambient + diffuse + specular;
}A fallof term was introduced with the required scaling
factor. The falloff is multiplied with the existing Lambertian
reflection calculating in the diffuse lighting calculation:
#version 150
uniform vec3 lightPosition;
uniform vec4 inObjColor;
uniform vec4 inLightColor;
uniform vec3 camPos;
in vec3 position;
in vec3 normal;
out vec4 outColor;
void main() {
vec3 normalizedLightDir = normalize(lightPosition - position);
float dist = length(lightPosition - position);
vec3 normalizedNormal = normalize(normal);
vec4 ambient = vec4(0.1, 0.1, 0.1, 1.0);
float At_e = 0.15;
float fallof = 1 / exp(At_e * dist);
vec4 diffuse = dot(normalizedLightDir, normalizedNormal) * inObjColor *
inLightColor * fallof;
vec3 hVec = normalize(normalizedLightDir + normalize(camPos.xyz - position));
vec4 specular = pow(dot(hVec, normalizedNormal), 64.0) * vec4(1.0);
outColor = ambient + diffuse + specular;
}At the edge of the model the normal is approximately orthogonal to the view direction. This is checked by testing if the dot product between view direction and normal is close to zero:
vec3 viewDirection = normalize(camPos.xyz - position);
if (dot(viewDirection, normalizedNormal) < 0.4) {
outColor = vec4(0, 0, 0, 0);
}The color is set to zero if the test determines the pixel belongs to the silhouette.
#version 150
uniform vec3 lightPosition;
uniform vec4 inObjColor;
uniform vec4 inLightColor;
uniform vec3 camPos;
in vec3 position;
in vec3 normal;
out vec4 outColor;
void main() {
vec3 normalizedLightDir = normalize(lightPosition - position);
float dist = length(lightPosition - position);
vec3 normalizedNormal = normalize(normal);
vec4 ambient = vec4(0.1, 0.1, 0.1, 1.0);
float At_e = 0.15;
float fallof = 1 / exp(At_e * dist);
vec4 diffuse = dot(normalizedLightDir, normalizedNormal) * inObjColor *
inLightColor * fallof;
vec3 hVec = normalize(normalizedLightDir + normalize(camPos.xyz - position));
vec4 specular = pow(dot(hVec, normalizedNormal), 64.0) * vec4(1.0);
outColor = ambient + diffuse + specular;
vec3 viewDirection = normalize(camPos.xyz - position);
if (dot(viewDirection, normalizedNormal) < 0.4) {
outColor = vec4(0, 0, 0, 0);
}
}By calculating the partial derivatives of the world space position of the vertex, we obtain two vectors tangential to the model surface. Using the cross product, the surface normal is calculated:
// Compute the normal
vec3 v1 = dFdx(position);
vec3 v2 = dFdy(position);
vec3 normalizedNormal = normalize(cross(v1, v2));First, the light direction is transformed to tangent space, with z=up instead of y=up:
// Compute the transformation matrix to go from world space to tangent space.
// y and z cols/rows are switched compared to lecture, we want z=up
mat3 transMat = mat3(tangent.x, binormal.x, normal.x,
tangent.z, binormal.z, normal.z,
tangent.y, binormal.y, normal.y);Then, a linear seach is performed along the light ray for geometry obstructing the path between light source and current position:
// Compute the occlusion
float occlusion = 1.0;
for(int i = 0; i < MAX_ITERS; i++){
if(curTexCoords.z < HEIGHT_SCALE * texture(displTexture, curTexCoords.xy).r) {
// Point below surface!
occlusion = 0.1;
break;
}
curTexCoords += transLightDir * DISPLACEMENT;
}A utility function is implemented to check if a point along the ray is below the surface:
bool belowSurface(float currH, vec2 stin, vec2 stout) {
vec2 newTexCoords = stin * currH + stout * (1.0 - currH);
float height = texture(dispTexture, newTexCoords).r;
if (currH <= height) {
return true;
}
return false;
}This is then used to implement the binary search algorithm:
float hMin = 0;
float hMax = 1;
for (int i = 0; i < MAX_ITER; i++) {
float middle = hMin + 0.5 * (hMax - hMin);
if (belowSurface(middle, stin, stout)) {
hMin = middle;
} else {
hMax = middle;
}
}
intersection = hMin + 0.5 * (hMax - hMin);Combination of linear and binary search combines benefits of good performance and accuracy. Different iteration limits are defined:
#define MAX_ITER_LINEAR 50
#define MAX_ITER_BINARY 10The linear search runs first and determines a rough interval around the intersection:
// Binary search interval
float hMin = 1.0;
float hMax = 0.0;
// Linear search from h=1 (stin) towards h=0 (stout)
for (int i = 0; i < MAX_ITER_LINEAR; ++i) {
currH -= 1.0 / float(MAX_ITER_LINEAR);
if (belowSurface(currH, stin, stout)) {
// First point below surface -> Binary search
hMin = currH;
break;
}
hMax = currH;
}Then, binary search refines the solution within this interval:
for (int i = 0; i < MAX_ITER_BINARY; i++) {
float middle = hMin + 0.5 * (hMax - hMin);
if (belowSurface(middle, stin, stout)) {
hMin = middle;
} else {
hMax = middle;
}
}
intersection = hMin + 0.5 * (hMax - hMin);The horizon angle is computed in multiple directions, as defined by
currentScreenVect:
for (int i = 0; i < NUM_ROTATIONS; i++) {
// 2D ray in screen space
vec2 currentScreenVect = rotationMatrix(i * augment) * initScreenVect;Along this ray (in screen space), the world space position is sampled using the provided input texture:
for (int j = 1; j <= NUM_SAMPLES_DIRECTION; j++) {
// Get the current position along the ray.
vec4 auxPosition = texture(posTex, uv + j * DISPLACEMENT * currentScreenVect);The angle between the normal and the point along the ray is computed:
// Compute the angle between the normal and the displacement vector
// to the position we are checking.
float angle = acos(dot(normalPos, normalize(auxPosition - currPos)));The minimum angle along each direction determines the occlusion in that direction, the occlusion values for each direction are averaged.
If the distance to the point along the ray exceeds a set maximum, it is clamped, which sets its contribution to the current ray occlusion to zero.
// Compute the distance between the center point (currPos)
// and the position we are checking.
float distance = length(auxPosition - currPos);
distance = min(MAX_EFFECT_DISTANCE, distance);Since the cylinder SDF is already provided, it is sufficient to call
it in f():
float f(vec3 samplePoint) {
// Slowly spin the whole scene
samplePoint = rotateY(iTime / 2.0) * samplePoint;
return cylinderSDF(samplePoint, 4, 0.5);
}To achieve a deformation similar to the one displayed in the example image, the y-coordinate of the sample point is shifted. The amount of displacement is animated using the current time and z-position (which is the position along the axis of the cylinder):
float f(vec3 samplePoint) {
// Slowly spin the whole scene
samplePoint = rotateY(iTime / 2.0) * samplePoint;
// Deform and animate the cylinder
samplePoint.y += sin(iTime - samplePoint.z * 1.5);
return cylinderSDF(samplePoint, 4, 0.5);
}The cylinder SDF is evaluated twice, and combined using
unionSDF. The sample point for the second cylinder is
rotated by 90°.
#define M_PI 3.1415926535897932384626433832795
float f(vec3 samplePoint) {
// Slowly spin the whole scene
vec3 samplePoint1 = rotateY(iTime / 2.0) * samplePoint;
// Second cylinder is rotated another 90deg
vec3 samplePoint2 = rotateY(M_PI / 2) * samplePoint1;
// Deform and animate the cylinder
samplePoint1.y += sin(iTime - samplePoint1.z * 1.5);
// Second cylinder is rotated -> animate along x
samplePoint2.y += sin(iTime - samplePoint2.z * 1.5);
float cylinder1 = cylinderSDF(samplePoint1, 4, 0.5);
float cylinder2 = cylinderSDF(samplePoint2, 4, 0.5);
return unionSDF(cylinder1, cylinder2);
}When using the output of our SDF f(samplePoint) directly
as the step size, artifacts appear. This is because of the
transformations we apply to the sample point in f, which
results in f(p) not being the correct distance between the
object and p, but the distance between the object and the
transformed and (non uniformly) scaled samplePoint. This is
not a problem for rendering, since the sign is still correct. It is
important however to not overestimate the step size, since this could
lead to missing geometry during raycasting. The scaling of
f(p) ensures the distance is never overestimated, and the
step size is never too large.
To move all the triangles along the normal direction, the
displacement vector is added to both the gl_Position
(before projection) and the gsposition outputs:
#version 430 core
layout(triangles) in;
layout(triangle_strip, max_vertices = 3) out;
uniform mat4 worldViewProjMatrix;
out vec3 gsposition;
out vec3 gsnormal;
void main() {
vec3 vect1 = normalize(gl_in[1].gl_Position.xyz - gl_in[0].gl_Position.xyz);
vec3 vect2 = normalize(gl_in[2].gl_Position.xyz - gl_in[0].gl_Position.xyz);
vec3 normal = normalize(cross(vect1, vect2));
vec4 disp = 0.5 * vec4(normal.x, normal.y, normal.z, 0);
gl_Position = worldViewProjMatrix * (gl_in[0].gl_Position + disp);
gsposition = gl_in[0].gl_Position.xyz + disp.xyz;
gsnormal = normal;
EmitVertex();
gl_Position = worldViewProjMatrix * (gl_in[1].gl_Position + disp);
gsposition = gl_in[1].gl_Position.xyz + disp.xyz;
gsnormal = normal;
EmitVertex();
gl_Position = worldViewProjMatrix * (gl_in[2].gl_Position + disp);
gsposition = gl_in[2].gl_Position.xyz + disp.xyz;
gsnormal = normal;
EmitVertex();
EndPrimitive();
}For subdivision of the triangle, the max_vertices limit
is increased to 5: In addition to the new vertex, the first vertex has
to be duplicated in order to complete the triangle strip.
#version 430 core
layout(triangles) in;
layout(triangle_strip, max_vertices = 5) out;
uniform mat4 worldViewProjMatrix;
out vec3 gsposition;
out vec3 gsnormal;
void main() {
vec3 vect1 = normalize(gl_in[1].gl_Position.xyz - gl_in[0].gl_Position.xyz);
vec3 vect2 = normalize(gl_in[2].gl_Position.xyz - gl_in[0].gl_Position.xyz);
vec3 normal = normalize(cross(vect1, vect2));
gl_Position = worldViewProjMatrix * gl_in[0].gl_Position;
gsposition = gl_in[0].gl_Position.xyz;
gsnormal = normal;
EmitVertex();
gl_Position = worldViewProjMatrix * gl_in[1].gl_Position;
gsposition = gl_in[1].gl_Position.xyz;
gsnormal = normal;
EmitVertex();
vec4 disp = 0.5 * vec4(normal.x, normal.y, normal.z, 0);
vec4 center =
(gl_in[0].gl_Position + gl_in[1].gl_Position + gl_in[2].gl_Position) / 3 +
disp;
gl_Position = worldViewProjMatrix * center;
gsposition = center.xyz;
gsnormal = normal;
EmitVertex();
gl_Position = worldViewProjMatrix * gl_in[2].gl_Position;
gsposition = gl_in[2].gl_Position.xyz;
gsnormal = normal;
EmitVertex();
// Emit first vertex again, to complete triangle strip
gl_Position = worldViewProjMatrix * gl_in[0].gl_Position;
gsposition = gl_in[0].gl_Position.xyz;
gsnormal = normal;
EmitVertex();
EndPrimitive();
}In order to assign the same normal to each vertex of the three new
sides, max_vertices is set to 9. Three separate primitives
of three vertices (with the same normal) each are output:
#version 430 core
layout(triangles) in;
layout(triangle_strip, max_vertices = 9) out;
uniform mat4 worldViewProjMatrix;
out vec3 gsposition;
out vec3 gsnormal;
void main() {
// Input triangle verticies
vec4 v0 = gl_in[0].gl_Position;
vec4 v1 = gl_in[1].gl_Position;
vec4 v2 = gl_in[2].gl_Position;
// Triangle sides
vec3 vect1 = normalize(gl_in[1].gl_Position.xyz - gl_in[0].gl_Position.xyz);
vec3 vect2 = normalize(gl_in[2].gl_Position.xyz - gl_in[0].gl_Position.xyz);
vec3 vect3 = normalize(gl_in[2].gl_Position.xyz - gl_in[1].gl_Position.xyz);
// Normal of the input triangle
vec3 origNormal = normalize(cross(vect1, vect2));
vec4 disp = 0.5 * vec4(origNormal.x, origNormal.y, origNormal.z, 0);
vec4 center = (v0 + v1 + v2) / 3 + disp;
// Output triangle normals
vec3 n1 = normalize(cross(vect1, normalize(center.xyz - v0.xyz)));
vec3 n2 = normalize(cross(vect3, normalize(center.xyz - v1.xyz)));
vec3 n3 = normalize(cross(normalize(center.xyz - v0.xyz), vect3));
// Triangle 1
gl_Position = worldViewProjMatrix * gl_in[0].gl_Position;
gsposition = gl_in[0].gl_Position.xyz;
gsnormal = n1;
EmitVertex();
gl_Position = worldViewProjMatrix * gl_in[1].gl_Position;
gsposition = gl_in[1].gl_Position.xyz;
gsnormal = n1;
EmitVertex();
gl_Position = worldViewProjMatrix * center;
gsposition = center.xyz;
gsnormal = n1;
EmitVertex();
EndPrimitive();
// Triangle 2
gl_Position = worldViewProjMatrix * gl_in[1].gl_Position;
gsposition = gl_in[1].gl_Position.xyz;
gsnormal = n2;
EmitVertex();
gl_Position = worldViewProjMatrix * gl_in[2].gl_Position;
gsposition = gl_in[2].gl_Position.xyz;
gsnormal = n2;
EmitVertex();
gl_Position = worldViewProjMatrix * center;
gsposition = center.xyz;
gsnormal = n2;
EmitVertex();
EndPrimitive();
// Triangle 3
gl_Position = worldViewProjMatrix * gl_in[2].gl_Position;
gsposition = gl_in[2].gl_Position.xyz;
gsnormal = n3;
EmitVertex();
gl_Position = worldViewProjMatrix * gl_in[0].gl_Position;
gsposition = gl_in[0].gl_Position.xyz;
gsnormal = n3;
EmitVertex();
gl_Position = worldViewProjMatrix * center;
gsposition = center.xyz;
gsnormal = n3;
EmitVertex();
EndPrimitive();
}