I'm working on a simulation for clouds (actual clouds) where the clouds are simulated by 3D points, then projected into a 2D heatmap, about 640x480 units big. The number of points is about 50k, which is as small as I can go without the simulation breaking, but I can't seem to find a way to perform this with any speed (it usually takes 3-5 seconds of runtime)
I suppose my question is, is it feasible for an average computer to be able to do this yet? I usually underestimate how fast computers are nowadays, but I might be overestimating them in this case. I haven't optimized the simulation yet, but if it's flat-out not possible, it'd be good to know and save the trouble now.
If it is possible, is there any technique that might prove useful for making the conversion from point data to heatmap fast enough to update 60 times a second? It really is just looking at the point data and writing to a 2D array the results after a transformation, so it's mostly bound to memory lookup I think.
Yes if your data is already in memory or you can compute it quickly
Just try it out with SDL textures (or OpenGL textures directly, which is what SDL uses):
heatmap.c
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <SDL2/SDL.h>
#define COLOR_MAX 255
double common_get_secs(void) {
struct timespec ts;
timespec_get(&ts, TIME_UTC);
return ts.tv_sec + (1e-9 * ts.tv_nsec);
}
const double COMMON_FPS_GRANULARITY_S = 0.5;
double common_fps_last_time_s;
unsigned int common_fps_nframes;
void common_fps_init() {
common_fps_nframes = 0;
common_fps_last_time_s = common_get_secs();
}
void common_fps_update_and_print() {
double dt, current_time_s;
current_time_s = common_get_secs();
common_fps_nframes++;
dt = current_time_s - common_fps_last_time_s;
if (dt > COMMON_FPS_GRANULARITY_S) {
printf("FPS = %f\n", common_fps_nframes / dt);
common_fps_last_time_s = current_time_s;
common_fps_nframes = 0;
}
}
int main(void) {
SDL_Event event;
SDL_Renderer *renderer = NULL;
SDL_Texture *texture = NULL;
SDL_Window *window = NULL;
Uint8 *base;
int pitch;
void *pixels = NULL;
const unsigned int
WINDOW_WIDTH = 500,
WINDOW_HEIGHT = WINDOW_WIDTH;
const double
SPEED = WINDOW_WIDTH / 10.0,
CENTER_X = WINDOW_WIDTH / 2.0,
CENTER_Y = WINDOW_HEIGHT / 2.0,
PERIOD = WINDOW_WIDTH / 10.0,
PI2 = 2.0 * acos(-1.0);
double dt, initial_time;
float z;
unsigned int x, xc, y, yc;
SDL_Init(SDL_INIT_TIMER | SDL_INIT_VIDEO);
SDL_CreateWindowAndRenderer(WINDOW_WIDTH, WINDOW_WIDTH, 0, &window, &renderer);
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_ARGB8888,
SDL_TEXTUREACCESS_STREAMING, WINDOW_WIDTH, WINDOW_HEIGHT);
initial_time = common_get_secs();
common_fps_init();
while (1) {
dt = common_get_secs() - initial_time;
SDL_LockTexture(texture, NULL, &pixels, &pitch);
for (x = 0; x < WINDOW_WIDTH; x++) {
for (y = 0; y < WINDOW_HEIGHT; y++) {
xc = CENTER_X - x;
yc = CENTER_Y - y;
/*z = COLOR_MAX * 0.5 * (1.0 + (sin(PI2 * (sqrt(xc*xc + yc*yc) - SPEED * dt) / PERIOD)));*/
z = (int)(x + y + SPEED * dt) % COLOR_MAX;
base = ((Uint8 *)pixels) + (4 * (x * WINDOW_WIDTH + y));
base[0] = 0;
base[1] = 0;
base[2] = z;
base[3] = COLOR_MAX;
}
}
SDL_UnlockTexture(texture);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
common_fps_update_and_print();
if (SDL_PollEvent(&event) && event.type == SDL_QUIT)
break;
}
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(window);
SDL_Quit();
return EXIT_SUCCESS;
}
Compile and run:
gcc -Wall -std=c11 -o heatmap.out heatmap.c -lSDL2 -lm
./heatmap.out
On Ubuntu 16.04, the simpler calculation:
z = (x + y + SPEED * dt) % COLOR_MAX
reaches 300 FPS on a Lenovo Thinkpad T430 with an Nvidia NVS 5400M (2012 mid-end).
So of course, a pre-computed result on memory would be even faster.
If the computation is a little more complicated however:
z = COLOR_MAX * 0.5 * (1.0 + (sin(PI2 * (sqrt(xc*xc + yc*yc) - SPEED * dt) / PERIOD)))
FPS is just 30, so we see that the limiting factor quickly becomes the calculation:
If you cannot run calculations fast enough, you will likely need to store to disk to not overflow memory, and then it is all about benchmarking your disk + compression methods (video codecs).
Fragment shaders
If you can run your computation on the fragment shader, you can do much more complicated things however in real time.
With the following code, the more complicated calculation runs at 3k FPS!
But it will be harder to implement, so make sure you need it.
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <SDL2/SDL.h>
#define GLEW_STATIC
#include <GL/glew.h>
static const GLuint WIDTH = 500;
static const GLuint HEIGHT = 500;
static const GLchar* vertex_shader_source =
"#version 120\n"
"attribute vec2 coord2d;\n"
"void main(void) {\n"
" gl_Position = vec4(coord2d, 0.0, 1.0);\n"
"}\n";
static const GLchar* fragment_shader_source =
"#version 120\n"
"uniform float pi2;\n"
"uniform float time;\n"
"uniform float width;\n"
"uniform float height;\n"
"uniform float periods_x;\n"
"uniform float periods_y;\n"
"void main(void) {\n"
" float center_x = width / 2.0;"
" float center_y = height / 2.0;"
" float x = (gl_FragCoord.x - center_x) * periods_x / width;"
" float y = (gl_FragCoord.y - center_y) * periods_y / height;"
" gl_FragColor[0] = 0.5 * (1.0 + (sin((pi2 * (sqrt(x*x + y*y) - time)))));\n"
" gl_FragColor[1] = 0.0;\n"
" gl_FragColor[2] = 0.0;\n"
"}\n";
static const GLfloat vertices[] = {
-1.0, 1.0,
1.0, 1.0,
1.0, -1.0,
-1.0, -1.0,
};
static const GLuint indexes[] = {
0, 2, 1,
0, 3, 2,
};
double common_get_secs(void) {
struct timespec ts;
timespec_get(&ts, TIME_UTC);
return ts.tv_sec + (1e-9 * ts.tv_nsec);
}
const double COMMON_FPS_GRANULARITY_S = 0.5;
double common_fps_last_time_s;
unsigned int common_fps_nframes;
void common_fps_init() {
common_fps_nframes = 0;
common_fps_last_time_s = common_get_secs();
}
void common_fps_update_and_print() {
double dt, current_time_s;
current_time_s = common_get_secs();
common_fps_nframes++;
dt = current_time_s - common_fps_last_time_s;
if (dt > COMMON_FPS_GRANULARITY_S) {
printf("FPS = %f\n", common_fps_nframes / dt);
common_fps_last_time_s = current_time_s;
common_fps_nframes = 0;
}
}
/* Copy paste. Upstream on OpenGL. */
GLint common_get_shader_program(
const char *vertex_shader_source,
const char *fragment_shader_source) {
GLchar *log = NULL;
GLint fragment_shader, log_length, program, success, vertex_shader;
/* Vertex shader */
vertex_shader = glCreateShader(GL_VERTEX_SHADER);
glShaderSource(vertex_shader, 1, &vertex_shader_source, NULL);
glCompileShader(vertex_shader);
glGetShaderiv(vertex_shader, GL_COMPILE_STATUS, &success);
glGetShaderiv(vertex_shader, GL_INFO_LOG_LENGTH, &log_length);
log = malloc(log_length);
if (log_length > 0) {
glGetShaderInfoLog(vertex_shader, log_length, NULL, log);
printf("vertex shader log:\n\n%s\n", log);
}
if (!success) {
printf("vertex shader compile error\n");
exit(EXIT_FAILURE);
}
/* Fragment shader */
fragment_shader = glCreateShader(GL_FRAGMENT_SHADER);
glShaderSource(fragment_shader, 1, &fragment_shader_source, NULL);
glCompileShader(fragment_shader);
glGetShaderiv(fragment_shader, GL_COMPILE_STATUS, &success);
glGetShaderiv(fragment_shader, GL_INFO_LOG_LENGTH, &log_length);
if (log_length > 0) {
log = realloc(log, log_length);
glGetShaderInfoLog(fragment_shader, log_length, NULL, log);
printf("fragment shader log:\n\n%s\n", log);
}
if (!success) {
printf("fragment shader compile error\n");
exit(EXIT_FAILURE);
}
/* Link shaders */
program = glCreateProgram();
glAttachShader(program, vertex_shader);
glAttachShader(program, fragment_shader);
glLinkProgram(program);
glGetProgramiv(program, GL_LINK_STATUS, &success);
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &log_length);
if (log_length > 0) {
log = realloc(log, log_length);
glGetProgramInfoLog(program, log_length, NULL, log);
printf("shader link log:\n\n%s\n", log);
}
if (!success) {
printf("shader link error");
exit(EXIT_FAILURE);
}
free(log);
glDeleteShader(vertex_shader);
glDeleteShader(fragment_shader);
return program;
}
int main(void) {
/* SDL variables. */
SDL_Event event;
SDL_Window *window;
SDL_GLContext gl_context;
const unsigned int WINDOW_WIDTH = 500, WINDOW_HEIGHT = WINDOW_WIDTH;
double dt, initial_time;
/* OpenGL variables. */
GLint
attribute_coord2d,
ibo_size,
width_location,
height_location,
time_location,
periods_x_location,
periods_y_location,
pi2_location,
program
;
GLuint ibo, vbo;
const char *attribute_name = "coord2d";
const float
periods_x = 10.0,
periods_y = 10.0,
pi2 = 2.0 * acos(-1.0)
;
/* SDL init. */
SDL_Init(SDL_INIT_TIMER | SDL_INIT_VIDEO);
window = SDL_CreateWindow(__FILE__, 0, 0,
WINDOW_WIDTH, WINDOW_HEIGHT, SDL_WINDOW_OPENGL);
gl_context = SDL_GL_CreateContext(window);
glewInit();
/* OpenGL init. */
{
program = common_get_shader_program(vertex_shader_source, fragment_shader_source);
attribute_coord2d = glGetAttribLocation(program, attribute_name);
if (attribute_coord2d == -1) {
fprintf(stderr, "error: attribute_coord2d: %s\n", attribute_name);
return EXIT_FAILURE;
}
height_location = glGetUniformLocation(program, "height");
periods_x_location = glGetUniformLocation(program, "periods_x");
periods_y_location = glGetUniformLocation(program, "periods_y");
pi2_location = glGetUniformLocation(program, "pi2");
time_location = glGetUniformLocation(program, "time");
width_location = glGetUniformLocation(program, "width");
glClearColor(0.0f, 0.0f, 0.0f, 1.0f);
glUseProgram(program);
glViewport(0, 0, WIDTH, HEIGHT);
glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW);
glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(indexes), indexes, GL_STATIC_DRAW);
glGetBufferParameteriv(GL_ELEMENT_ARRAY_BUFFER, GL_BUFFER_SIZE, &ibo_size);
glUniform1f(pi2_location, pi2);
glUniform1f(width_location, WIDTH);
glUniform1f(height_location, HEIGHT);
glUniform1f(periods_x_location, periods_x);
glUniform1f(periods_y_location, periods_y);
}
initial_time = common_get_secs();
common_fps_init();
while (1) {
dt = common_get_secs() - initial_time;
/* OpenGL draw. */
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(attribute_coord2d);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(attribute_coord2d, 2, GL_FLOAT, GL_FALSE, 0, 0);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glUniform1f(time_location, dt);
glDrawElements(GL_TRIANGLES, ibo_size / sizeof(indexes[0]), GL_UNSIGNED_INT, 0);
glDisableVertexAttribArray(attribute_coord2d);
common_fps_update_and_print();
SDL_GL_SwapWindow(window);
if (SDL_PollEvent(&event) && event.type == SDL_QUIT)
break;
}
/* OpenGL cleanup. */
glDeleteBuffers(1, &ibo);
glDeleteBuffers(1, &vbo);
glDeleteProgram(program);
/* SDL cleanup. */
SDL_GL_DeleteContext(gl_context);
SDL_DestroyWindow(window);
SDL_Quit();
return EXIT_SUCCESS;
}
Then:
gcc -Wall -std=c11 a.c -lSDL2 -lm -lGL -lGLEW
recordmydesktop screen capture including FPS periodically printed to terminal and converted to GIF for upload:
GitHub upstreams:
It is definitely feasible, probably even if the calculation are done by the CPU. Ideally you should be using the GPU. The APIs needed are either OpenCL or since you are rendering the results you might want to make use of Compute Shaders.
Both techniques allow you to write a small program (shader) that works on a single element (point). These all get run in parallel on the GPU which should allow them to run really fast.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With