Logo Questions Linux Laravel Mysql Ubuntu Git Menu
 

Why is the CPU implementation of my custom op being selected?

In order to learn how to write custom TensorFlow ops, I followed the Adding a New Op tutorial and made an "add_b" op that adds a scalar b to every input value.

add_b_op.cc:

#define EIGEN_USE_THREADS

#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"

#include "tensorflow/core/framework/common_shape_fns.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"

using namespace tensorflow;

REGISTER_OP("AddB")
    .Attr("T: {float, double}")
    .Input("input: T")
    .Input("b: T")
    .Output("output: T")
    .SetShapeFn([] (shape_inference::InferenceContext* c) -> Status {
      shape_inference::ShapeHandle out;
      TF_RETURN_IF_ERROR(c->WithRank(c->input(1), 0, &out));
      return shape_inference::UnchangedShape(c);
    })
//----------------------------------------------------------------------
    .Doc(R"doc(
Adds `b` to each input.

input: The input values.
b: A number to add to each input value.
)doc");


template <typename T>
class AddBCpuOp : public OpKernel {
 public:
  explicit AddBCpuOp(OpKernelConstruction* context) : OpKernel(context) {}

  void Compute(OpKernelContext* context) override {
    const Tensor& input_tensor = context->input(0);
    const auto input = input_tensor.flat<T>();

    Tensor* output_tensor = nullptr;
    OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
                                                     &output_tensor));
    auto output = output_tensor->flat<T>();

    const Eigen::ThreadPoolDevice& d = context->eigen_device<Eigen::ThreadPoolDevice>();

    // Note: The mistake of adding 1 instead of `b` is intentional to be able to distinguish
    // the CPU and GPU implementations.
    output.device(d) = input + static_cast<T>(1);
  }
};

REGISTER_KERNEL_BUILDER(
    Name("AddB")
    .Device(DEVICE_CPU)
    .TypeConstraint<float>("T"),
    AddBCpuOp<float>);
REGISTER_KERNEL_BUILDER(
    Name("AddB")
    .Device(DEVICE_CPU)
    .TypeConstraint<double>("T"),
    AddBCpuOp<double>);


#if GOOGLE_CUDA

template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output);

template <typename T>
class AddBGpuOp : public OpKernel {
 public:
  explicit AddBGpuOp(OpKernelConstruction* context) : OpKernel(context) {}

  void Compute(OpKernelContext* context) override {
    const Tensor& input_tensor = context->input(0);
    const auto input = input_tensor.flat<T>();

    const Tensor& b_tensor = context->input(1);
    OP_REQUIRES(context, TensorShapeUtils::IsScalar(b_tensor.shape()),
                errors::InvalidArgument("add_b expects a scalar for `b`."));
    const auto b = b_tensor.scalar<T>();

    Tensor* output_tensor = nullptr;
    OP_REQUIRES_OK(context, context->allocate_output(0, input_tensor.shape(),
                                                     &output_tensor));
    auto output = output_tensor->flat<T>();

    OP_REQUIRES(context, LaunchAddBKernel(input.data(), input.dimension(0), b.data(), output.data()),
                errors::Internal("add_b: LaunchAddBKernel() failed."));
  }
};

REGISTER_KERNEL_BUILDER(
    Name("AddB")
    .Device(DEVICE_GPU)
    .TypeConstraint<float>("T"),
    AddBGpuOp<float>);
REGISTER_KERNEL_BUILDER(
    Name("AddB")
    .Device(DEVICE_GPU)
    .TypeConstraint<double>("T"),
    AddBGpuOp<double>);

#endif // if GOOGLE_CUDA

add_b_op.cu.cc

template <typename T, int BLOCK_DIM_X>
__global__ void AddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
  const int i = blockIdx.x * BLOCK_DIM_X + threadIdx.x;
  if (i < n) {
    d_output[i] = d_input[i] + *d_b;
  }
}

template <typename T>
bool LaunchAddBKernel(const T *__restrict__ d_input, int n, const T *__restrict__ d_b, T *__restrict__ d_output) {
  if (n <= 0) return true;

  constexpr int BLOCK_DIM_X = 256;
  AddBKernel<T, BLOCK_DIM_X><<<n / BLOCK_DIM_X + (n % BLOCK_DIM_X != 0), BLOCK_DIM_X>>>(d_input, n, d_b, d_output);
  return true;
}

// Explicit instantiations.
template bool LaunchAddBKernel<float>(const float *__restrict__, int, const float *__restrict__, float *__restrict__);
template bool LaunchAddBKernel<double>(const double *__restrict__, int, const double *__restrict__, double *__restrict__);

I have intentionally introduced an error in the CPU implementation to be able to distinguish whether the CPU or GPU implementation is being used.

When I test out my custom op with:

from __future__ import print_function
import tensorflow as tf

module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
  print(module.add_b([5., 4., 3., 2., 1.], 8.).eval())

I get the following output:

I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:892] OS X does not support NUMA - returning NUMA node zero
I tensorflow/core/common_runtime/gpu/gpu_device.cc:951] Found device 0 with properties: 
name: GeForce GT 750M
major: 3 minor: 0 memoryClockRate (GHz) 0.9255
pciBusID 0000:01:00.0
Total memory: 2.00GiB
Free memory: 1.80GiB
I tensorflow/core/common_runtime/gpu/gpu_device.cc:972] DMA: 0 
I tensorflow/core/common_runtime/gpu/gpu_device.cc:982] 0:   Y 
I tensorflow/core/common_runtime/gpu/gpu_device.cc:1041] Creating TensorFlow device (/gpu:0) -> (device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0)
Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0
I tensorflow/core/common_runtime/direct_session.cc:252] Device mapping:
/job:localhost/replica:0/task:0/gpu:0 -> device: 0, name: GeForce GT 750M, pci bus id: 0000:01:00.0

AddB: /job:localhost/replica:0/task:0/gpu:0
I tensorflow/core/common_runtime/simple_placer.cc:819] AddB: /job:localhost/replica:0/task:0/gpu:0
AddB/b: /job:localhost/replica:0/task:0/gpu:0
I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/b: /job:localhost/replica:0/task:0/gpu:0
AddB/input: /job:localhost/replica:0/task:0/gpu:0
I tensorflow/core/common_runtime/simple_placer.cc:819] AddB/input: /job:localhost/replica:0/task:0/gpu:0
[ 6.  5.  4.  3.  2.]

The "device placement logs" appear to indicate that the op is being performed on the GPU, but the output indicates that the CPU implementation is being used.

When I comment out the two REGISTER_KERNEL_BUILDER() registrations for the DEVICE_CPU implementation, recompile, and re-test, I get the expected output of [ 13. 12. 11. 10. 9.], but there is an error:

E tensorflow/core/common_runtime/executor.cc:334] Executor failed to create kernel. Not found: No registered 'AddB' OpKernel for CPU devices compatible with node AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b)
    .  Registered:  device='GPU'; T in [DT_FLOAT]
  device='GPU'; T in [DT_DOUBLE]

     [[Node: AddB = AddB[T=DT_FLOAT, _device="/job:localhost/replica:0/task:0/gpu:0"](AddB/input, AddB/b)]]

That error message looks like a bug to me, because although the error says "Executor failed to create kernel", a kernel was apparently created to run the op on the GPU.

Why is the CPU implementation being used rather than the GPU implementation?

In case this is important, here are details about my development setup:

  • I am using a MacBook Pro with a built-in NVIDIA GeForce GT 750M (CUDA Compute Capability 3.0).
  • macOS Sierra Version 10.12.1 (16B2555)
  • cuda_8.0.47_mac, cudnn-8.0-osx-x64-v5.1
  • TensorFlow 0.11.0rc2 installed via: export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/mac/gpu/tensorflow-0.11.0rc2-py2-none-any.whl

UPDATE I have found that whether the CPU or GPU implementation is selected depends on the size of the input. Using this test script:

from __future__ import print_function
import numpy as np
import tensorflow as tf
from time import time

NUM_VALUES = 1310720

input = np.arange(0, NUM_VALUES, dtype = float)

module = tf.load_op_library('custom_ops.so')
with tf.Session(config = tf.ConfigProto(log_device_placement = True)):
  start = time(); print(module.add_b(input, 8.).eval()); end = time(); print(end - start)

.. when NUM_VALUES is 1310720 or less, then the CPU implementation is used. When NUM_VALUES is 1310721 or more, then the GPU implementation is used.

Is there a (1310720 * 8 bytes per double = ) 10 MiB cut-off? If so, how do I override this? The AddB() op is simple enough, but for a more complex custom operation, 10 MiB might be too large of a threshold for the GPU implementation to be selected.

like image 851
Daniel Trebbien Avatar asked Nov 22 '16 21:11

Daniel Trebbien


Video Answer


2 Answers

I just read TensorFlow issue #2054 - Manual placement on GPU of a custom operator with both CPU and GPU implementation will always run the CPU version and the behavior of running the CPU implementation appears to be a feature of TensorFlow called "constant folding". When TensorFlow optimizes the graph before the first run, ops involving constants are generally evaluated on the CPU, as the thinking is that CPU and GPU implementations should produce the same results. Makes sense.

Two ways of disabling this behavior are:

  1. Disabling graph optimization:

    from __future__ import print_function
    import numpy as np
    import tensorflow as tf
    from time import time
    
    NUM_VALUES = 10
    
    input = np.arange(0, NUM_VALUES, dtype = float)
    
    custom_ops_module = tf.load_op_library('custom_ops.so')
    
    config = tf.ConfigProto(log_device_placement = True)
    config.graph_options.optimizer_options.opt_level = -1
    
    with tf.Session(config = config):
      start = time(); print(custom_ops_module.add_b(input, 8.).eval()); end = time(); print(end - start)
    
  2. Not using constants, by, for example, feeding the values into placeholders:

    from __future__ import print_function
    import numpy as np
    import tensorflow as tf
    from time import time
    
    NUM_VALUES = 10
    
    custom_ops_module = tf.load_op_library('custom_ops.so')
    
    graph = tf.Graph()
    with graph.as_default():
      input = tf.placeholder(tf.float64, shape = (NUM_VALUES,))
      b = tf.placeholder(tf.float64, shape = ())
      result = custom_ops_module.add_b(input, b)
    
    with tf.Session(graph = graph, config = tf.ConfigProto(log_device_placement = True)) as session:
      feed_dict = {
        input: np.arange(0, NUM_VALUES, dtype = float),
        b: 8.,
      }
      start = time(); print(session.run([result], feed_dict = feed_dict)); end = time(); print(end - start)
    
like image 130
Daniel Trebbien Avatar answered Oct 24 '22 21:10

Daniel Trebbien


I think the template instantiations might be incorrect:

template <typename Device, typename T>
class AddBOp : public OpKernel {
...
}

REGISTER_KERNEL_BUILDER(
    Name("AddB")
    .Device(DEVICE_CPU)
    .TypeConstraint<float>("T"),
    AddBOp<CPUDevice, float>);

And then:

template <typename T>
class AddBOp<GPUDevice, T> : public OpKernel {
...
}

REGISTER_KERNEL_BUILDER(
    Name("AddB")
    .Device(DEVICE_GPU)
    .TypeConstraint<float>("T"),
    AddBOp<GPUDevice, float>);

I think the registration of AddB for GPU instantiates the object that matches the first implementation, not the second (the first implementation has two template arguments, the second implementation has one).

You could probably fix this by calling AddBOp < float > in the second registration, though I would advise better names to avoid confusion.

like image 21
vrv Avatar answered Oct 24 '22 22:10

vrv