I am writing a rendering system in CUDA and want results to be quickly displayed via OpenGL, without touching main memory. I basically do the following:
Create and initialize OpenGL texture, and register it in CUDA as cudaGraphicsResource
GLuint viewGLTexture;
cudaGraphicsResource_t viewCudaResource;
void initialize() {
glEnable(GL_TEXTURE_2D);
glGenTextures(1, &viewGLTexture);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard)
}
Whenever view is resized I resize viewport and texture image appropriately:
void resize() {
glViewport(0, 0, view.getWidth(), view.getHeight());
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
}
And then each frame I map graphicsResource as a cudaSurfaceObject via cudaArray, call rendering kernel on it, unmap and synchronize to let OpenGL draw a fullscreen quad with this texture:
void renderFrame() {
cudaGraphicsMapResources(1, &viewCudaResource);
{
cudaArray_t viewCudaArray;
cudaGraphicsSubResourceGetMappedArray(&viewCudaArray, viewCudaResource, 0, 0);
cudaResourceDesc viewCudaArrayResourceDesc;
{
viewCudaArrayResourceDesc.resType = cudaResourceTypeArray;
viewCudaArrayResourceDesc.res.array.array = viewCudaArray;
}
cudaSurfaceObject_t viewCudaSurfaceObject;
cudaCreateSurfaceObject(&viewCudaSurfaceObject, &viewCudaArrayResourceDesc);
{
invokeRenderingKernel(viewCudaSurfaceObject);
}
cudaDestroySurfaceObject(viewCudaSurfaceObject));
}
cudaGraphicsUnmapResources(1, &viewCudaResource);
cudaStreamSynchronize(0);
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glBegin(GL_QUADS);
{
glTexCoord2f(0.0f, 0.0f); glVertex2f(-1.0f, -1.0f);
glTexCoord2f(1.0f, 0.0f); glVertex2f(+1.0f, -1.0f);
glTexCoord2f(1.0f, 1.0f); glVertex2f(+1.0f, +1.0f);
glTexCoord2f(0.0f, 1.0f); glVertex2f(-1.0f, +1.0f);
}
glEnd();
}
glBindTexture(GL_TEXTURE_2D, 0);
glFinish();
}
The problem is: Whenever view is resized all CUDA calls start spewing out "unknown error"s and visually it looks like the texture is not in fact resized, just stretched across the whole view. Why is this happening and how do I fix it?
It seems interop requires to re-register textures upon resize. The following works:
void resize() {
glViewport(0, 0, view.getWidth(), view.getHeight());
// unregister
cudaGraphicsUnregisterResource(viewCudaResource);
// resize
glBindTexture(GL_TEXTURE_2D, viewGLTexture);
{
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, view.getWidth(), view.getHeight(), 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL);
}
glBindTexture(GL_TEXTURE_2D, 0);
// register back
cudaGraphicsGLRegisterImage(&viewCudaResource, viewGLTexture, GL_TEXTURE_2D, cudaGraphicsRegisterFlagsWriteDiscard);
}
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With