I am trying to write a simple program in C, using OpenGL, that would allow "drawing" a 2D C array (int **, 32-bit integers) according to a color palette.
For the moment (I am not there yet, far from it :) ) I'm learning how to send an array of 32-bits signed ints to the GPU and show it somehow.
I'm trying to do this in modern OpenGL.
My approach (bear with me as I just started learning these topics two days ago):
vertices) for defining a rectangle based of two triangles (defining by picking the vertices using indices (indices)). The vertices data is also interleaved with 2D texture coordinates (for texture sampling in the shaders).glTexImage2D(), with internal format equal to GL_R32I as my C array is of type int **. I am not so sure about the format and type parameters, but I've set them to GL_RED_INTEGER and GL_UNSIGNED_INT, respectively.texture(texture1, TexCoord).r but probably this isn't right... also tried to cast that red component to float: (float) texture(texture1, TexCoord).r but does not work either. Just to give you some reassurance that might code does somethings right, leaving only FragColor = vec4(1.0f, 0.8f, 0.2f, 1.0f); in the fragment shader does show that colour, meaning I get a rectangle filling up the window with that color. So only when I start fiddling with the texture I get either a black screen or cyan RGB: (0, 1.0, 1.0, 1.0).Note: My C array is named plane, and right now it is filled up with a left block of 0 values and a right block of 1s.
Right now, I'd be happy if I could hard code an if-statement inside the fragment shader that colored the 0s and 1s from the 32-bit plane into any two other colors. Then I think I could proceed to include a 1D texture with the color palette... as done here.
pixel.h#ifndef PIXEL_H
#define PIXEL_H
/*
To make sure there will be no header conflicts, you can define
GLFW_INCLUDE_NONE before the GLFW header to explicitly disable
inclusion of the development environment header. This also allows
the two headers to be included in any order.
*/
#define GLFW_INCLUDE_NONE
#include <glad/glad.h>
#include <GLFW/glfw3.h>
#include <plane.h>
#include <utils.h>
#include <stdlib.h>
#include <stdio.h>
#endif
pixel.c#include <pixel.h>
const char *vertexShaderSource = "#version 330 core\n"
"layout (location = 0) in vec3 aPos;\n"
"layout (location = 1) in vec2 aTexCoord;\n"
"out vec2 TexCoord;\n"
"void main()\n"
"{\n"
" gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n"
" TexCoord = vec2(aTexCoord.x, aTexCoord.y);\n"
"}\0";
const char *fragmentShaderSource = "#version 330 core\n"
"out vec4 FragColor;\n"
"in vec2 TexCoord;\n"
"uniform isampler2D texture1;\n"
"void main()\n"
"{\n"
" FragColor = vec4(1.0f, 0.8f, 0.2f, 1.0f);\n"
" //FragColor = vec4(texture(texture1, TexCoord).r, 1.0f, 1.0f, 1.0f);\n"
"}\n\0";
int main(void)
{
// Window width and height.
const unsigned int width = 20;
const unsigned int height = 10;
// Before you can use most GLFW functions, the library must be initialized.
if (!glfwInit()) {
printf("Could not initialise GLFW library!");
exit(EXIT_FAILURE);
}
/*
* By default, the OpenGL context GLFW creates may have any version.
* You can require a minimum OpenGL version by setting the
* GLFW_CONTEXT_VERSION_MAJOR and GLFW_CONTEXT_VERSION_MINOR hints
* before creation. If the required minimum version is not supported
* on the machine, context (and window) creation fails.
*/
glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 3);
glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 2);
glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE);
// Create a GLFW window.
GLFWwindow* window = glfwCreateWindow(width, height, "pixel example", NULL, NULL);
if (!window)
{
printf("Window or OpenGL context creation failed!\n");
glfwTerminate();
exit(EXIT_FAILURE);
}
// Before you can use the OpenGL API, you must have a current OpenGL context.
glfwMakeContextCurrent(window);
/*
* If you are using an extension loader library to access modern OpenGL
* then this is when to initialize it, as the loader needs a current
* context to load from. This example uses glad, but the same rule applies
* to all such libraries.
*/
gladLoadGL();
/*
* Set a framebuffer size callback to update the viewport when
* the window size changes.
*/
glfwSetFramebufferSizeCallback(window, fb);
/*
*
* Data to be drawn.
*
*/
int **plane = NewPlane(width, height);
PLANE(width, height, if (i < width / 2) plane[i][j] = 0; else plane[i][j] = 1;)
//plane[width/2][height/2] = 1;
//PLANE(width, height, printf("%d %d %d\n", i, j, plane[i][j]);)
printf("size of int: %ld bytes\n", sizeof(int));
// build and compile our shader program
// ------------------------------------
// vertex shader
unsigned int vertexShader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertexShader, 1, &vertexShaderSource, NULL);
glCompileShader(vertexShader);
// check for shader compile errors
int success;
char infoLog[512];
glGetShaderiv(vertexShader, GL_COMPILE_STATUS, &success);
if (!success)
{
glGetShaderInfoLog(vertexShader, 512, NULL, infoLog);
printf("ERROR::SHADER::VERTEX::COMPILATION_FAILED\n%s\n", infoLog);
}
// fragment shader
unsigned int fragmentShader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragmentShader, 1, &fragmentShaderSource, NULL);
glCompileShader(fragmentShader);
// check for shader compile errors
glGetShaderiv(fragmentShader, GL_COMPILE_STATUS, &success);
if (!success)
{
glGetShaderInfoLog(fragmentShader, 512, NULL, infoLog);
printf("ERROR::SHADER::FRAGMENT::COMPILATION_FAILED\n%s\n", infoLog);
}
// link shaders
unsigned int shaderProgram = glCreateProgram();
glAttachShader(shaderProgram, vertexShader);
glAttachShader(shaderProgram, fragmentShader);
glLinkProgram(shaderProgram);
// check for linking errors
glGetProgramiv(shaderProgram, GL_LINK_STATUS, &success);
if (!success) {
glGetProgramInfoLog(shaderProgram, 512, NULL, infoLog);
printf("ERROR::SHADER::PROGRAM::LINKING_FAILED%s\n", infoLog);
}
glDeleteShader(vertexShader);
glDeleteShader(fragmentShader);
// float vertices[] = {
// 1.0f, 1.0f, 0.0f, // top right
// 1.0f, -1.0f, 0.0f, // bottom right
// -1.0f, -1.0f, 0.0f, // bottom left
// -1.0f, 1.0f, 0.0f // top left
// };
float vertices[] = {
// positions // texture coords
1.0f, 1.0f, 0.0f, 1.0f, 1.0f, // top right
1.0f, -1.0f, 0.0f, 1.0f, 0.0f, // bottom right
-1.0f, -1.0f, 0.0f, 0.0f, 0.0f, // bottom left
-1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left
};
unsigned int indices[] = {
// note that we start from 0!
0, 1, 3, // first triangle
1, 2, 3 // second triangle
};
unsigned int VBO, VAO, EBO;
glGenVertexArrays(1, &VAO);
printf("VAO: %d\n", VAO);
glGenBuffers(1, &VBO);
printf("VBO: %d\n", VBO);
glGenBuffers(1, &EBO);
printf("EBO: %d\n", EBO);
// bind the Vertex Array Object first, then bind and set vertex buffer(s), and then configure vertex attributes(s).
glBindVertexArray(VAO);
glBindBuffer(GL_ARRAY_BUFFER, VBO);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, EBO);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW);
// glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0);
// glEnableVertexAttribArray(0);
// position attribute
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)0);
glEnableVertexAttribArray(0);
// texture coord attribute
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 5 * sizeof(float), (void*)(3 * sizeof(float)));
glEnableVertexAttribArray(1);
// note that this is allowed, the call to glVertexAttribPointer registered VBO as the vertex attribute's bound vertex buffer object so afterwards we can safely unbind
glBindBuffer(GL_ARRAY_BUFFER, 0);
// remember: do NOT unbind the EBO while a VAO is active as the bound element buffer object IS stored in the VAO; keep the EBO bound.
//glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
// You can unbind the VAO afterwards so other VAO calls won't accidentally modify this VAO, but this rarely happens. Modifying other
// VAOs requires a call to glBindVertexArray anyways so we generally don't unbind VAOs (nor VBOs) when it's not directly necessary.
glBindVertexArray(0);
// uncomment this call to draw in wireframe polygons.
//glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
unsigned int texture;
glGenTextures(1, &texture);
glBindTexture(GL_TEXTURE_2D, texture);
if (plane) {
glTexImage2D(GL_TEXTURE_2D, 0, GL_R32I, width, height, 0, GL_RED_INTEGER, GL_UNSIGNED_INT, plane);
}
/*
*
* Main loop
*
*/
while (!glfwWindowShouldClose(window))
{
// Check if Escape is pressed and signal to close the window.
input(window);
// The glClearColor function is a state-setting function
glClearColor(0.2f, 0.3f, 0.3f, 1.0f);
// The glClear is a state-using function in that it uses the
// current state to retrieve the clearing color from.
glClear(GL_COLOR_BUFFER_BIT);
// Rendering goes here.
glUseProgram(shaderProgram);
glBindVertexArray(VAO); // seeing as we only have a single VAO there's no need to bind it every time, but we'll do so to keep things a bit more organized
glDrawArrays(GL_TRIANGLES, 0, 6);
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0);
glfwSwapBuffers(window);
glfwPollEvents();
}
glfwDestroyWindow(window);
glfwTerminate();
exit(EXIT_SUCCESS);
}
plane.h#ifndef PLANE_H
#define PLANE_H
#include <stdlib.h>
#include <stdio.h>
#define PLANE(width, height, A) {int i,j,_ii,_jj;for(i=0,_ii=width;i<_ii;i++)for(j=0,_jj=height;j<_jj;j++){A};}
int **NewPlane(int, int);
#endif
plane.c#include <plane.h>
int **NewPlane(int width,int height)
{
int **a;
int i,j;
a = (int **)calloc((size_t)(width),sizeof(int *));
if (a == NULL) {
fprintf(stderr,"NewPlane: error in memory allocation\n");
exit(EXIT_FAILURE);
}
a[0] = (int *)calloc((size_t)((width)*(height)),sizeof(int));
if (a[0] == NULL) {
fprintf(stderr,"NewPlane: error in memory allocation\n");
exit(EXIT_FAILURE);
}
for (i=1,j=width; i < j; i++)
a[i] = a[i-1] + height;
return a;
}
Since integral textures cannot be interpolated, the minifying filter and magnification filter needs to be on of the "nearest" filters:
glBindTexture(GL_TEXTURE_2D, texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
texture is an overloaded function. If you lookup a isampler2D with texture the type of the return value is ivec4:
uniform isampler2D texture1;
void main()
{
int value = texture(texture1, TexCoord).r;
// [...]
}
Since the internal data type is GL_R32I, the returned value is in the range of -2,147,483,648 to 2,147,483,647. The format of the default framebuffer is a unsigned normalized floating point format. The data in the buffer represent the values in the range [0.0, 1.0]. Therefore, you need to scale the integral value (maxValue should be the highest value in the texture):
FragColor = vec4(float(value) / maxValue, 1.0f, 1.0f, 1.0f);
One way get colors form an integral texture is to create a 1-dimensional texture that is a table of colors and use the integral value from the texture to lookup a color in the table:
uniform isampler2D indexTexture;
uniform sampler1D colorTable;
void main()
{
int index = texture(indexTexture, TexCoord).r;
vec4 color = texelFetch(colorTable, index, 0);
// [...]
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With