More specifically I have a simple fprop that is a composition of tf operations. I want to override the tensorflow gradient computation with my own gradient method using RegisterGradient.
What's wrong with this code?
import tensorflow as tf
from tensorflow.python.framework import ops
@ops.RegisterGradient("MyopGrad")
def frop_grad(op, grad):
x = op.inputs[0]
return 0 * x # zero out to see the difference:
def fprop(x):
x = tf.sqrt(x)
out = tf.maximum(x, .2)
return out
a = tf.Variable(tf.constant([5., 4., 3., 2., 1.], dtype=tf.float32))
h = fprop(a)
h = tf.identity(h, name="Myop")
grad = tf.gradients(h, a)
g = tf.get_default_graph()
with g.gradient_override_map({'Myop': 'MyopGrad'}):
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
result = sess.run(grad)
print(result[0])
I want to see all zeros in the print, but instead I am getting:
[ 0.2236068 0.25000003 0.28867513 0.35355341 0.5 ]
You need to define the op within the scope of with g.gradient_override_map({'Myop': 'MyopGrad'})
Also, you need to map Identity
rather than the name Myop
to your new gradient.
Here is the full code:
import tensorflow as tf
from tensorflow.python.framework import ops
@ops.RegisterGradient("MyopGrad")
def frop_grad(op, grad):
x = op.inputs[0]
return 0 * x # zero out to see the difference:
def fprop(x):
x = tf.sqrt(x)
out = tf.maximum(x, .2)
return out
a = tf.Variable(tf.constant([5., 4., 3., 2., 1.], dtype=tf.float32))
h = fprop(a)
g = tf.get_default_graph()
with g.gradient_override_map({'Identity': 'MyopGrad'}):
h = tf.identity(h, name="Myop")
grad = tf.gradients(h, a)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
result = sess.run(grad)
print(result[0])
Output:
[ 0. 0. 0. 0. 0.]
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With