Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

device function pointers

Tags:

cuda

I need a device version of the following host code:

double (**func)(double x);

double func1(double x)
{
 return x+1.;
}

double func2(double x)
{
 return x+2.;
}

double func3(double x)
{
 return x+3.;
}

void test(void)
{
 double x;

 for(int i=0;i<3;++i){
  x=func[i](2.0);
  printf("%g\n",x);
 }

}

int main(void)
{
 func=(double (**)(double))malloc(10*sizeof(double (*)(double)));

 test();

 return 0;
}

where func1, func2, func3 have to be __device__ functions and "test" has to be a (suitably modified) __global__ kernel.

I have a NVIDIA GeForce GTS 450 (compute capability 2.1) Thank you in advance Michele

========================================================

A working solution

#define REAL double

typedef REAL (*func)(REAL x);

__host__ __device__ REAL func1(REAL x)
{
    return x+1.0f;
}

__host__ __device__ REAL func2(REAL x)
{
    return x+2.0f;
}

__host__ __device__ REAL func3(REAL x)
{
    return x+3.0f;
}

__device__ func func_list_d[3];
func func_list_h[3];

__global__ void assign_kernel(void)
{
    func_list_d[0]=func1;
    func_list_d[1]=func2;
    func_list_d[2]=func3;
}

void assign(void)
{
    func_list_h[0]=func1;
    func_list_h[1]=func2;
    func_list_h[2]=func3;
}


__global__ void test_kernel(void)
{
    REAL x;
    for(int i=0;i<3;++i){
        x=func_list_d[i](2.0);
        printf("%g\n",x);
  }
}

void test(void)
{
    REAL x;
    printf("=============\n");
    for(int i=0;i<3;++i){
        x=func_list_h[i](2.0);
        printf("%g\n",x);
  }
}

int main(void)
{
    assign_kernel<<<1,1>>>();
    test_kernel<<<1,1>>>();
    cudaThreadSynchronize();

    assign();
    test();

    return 0;
}
like image 253
micheletuttafesta Avatar asked Jan 25 '12 09:01

micheletuttafesta


1 Answers

function pointers are allowed on Fermi. This is how you could do it:

typedef double (*func)(double x);

__device__ double func1(double x)
{
return x+1.0f;
}

__device__ double func2(double x)
{
return x+2.0f;
}

__device__ double func3(double x)
{
return x+3.0f;
}

__device__ func pfunc1 = func1;
__device__ func pfunc2 = func2;
__device__ func pfunc3 = func3;

__global__ void test_kernel(func* f, int n)
{
  double x;

  for(int i=0;i<n;++i){
   x=f[i](2.0);
   printf("%g\n",x);
  }
}

int main(void)
{
  int N = 5;
  func* h_f;
  func* d_f;
  h_f = (func*)malloc(N*sizeof(func));
  cudaMalloc((void**)&d_f,N*sizeof(func));

  cudaMemcpyFromSymbol( &h_f[0], pfunc1, sizeof(func));
  cudaMemcpyFromSymbol( &h_f[1], pfunc1, sizeof(func));
  cudaMemcpyFromSymbol( &h_f[2], pfunc2, sizeof(func));
  cudaMemcpyFromSymbol( &h_f[3], pfunc3, sizeof(func));
  cudaMemcpyFromSymbol( &h_f[4], pfunc3, sizeof(func));

  cudaMemcpy(d_f,h_f,N*sizeof(func),cudaMemcpyHostToDevice);

  test_kernel<<<1,1>>>(d_f,N);

  cudaFree(d_f);
  free(h_f);

  return 0;
}
like image 126
brano Avatar answered Oct 23 '22 02:10

brano