HomeSort by relevance Sort by last modified time
    Searched refs:grad_grad (Results 1 - 6 of 6) sorted by null

  /external/tensorflow/tensorflow/python/kernel_tests/
softplus_op_test.py 118 (grad_grad,) = gradients_impl.gradients(grad, x)
124 x, [2, 5], grad_grad, [2, 5], x_init_value=x_init)
while_v2_test.py 174 grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2
177 self.assertSequenceEqual(self.evaluate(grad_grad), [48.])
186 grad_grad = gradients_impl.gradients(grad, [x]) # 12x**2
190 self.assertSequenceEqual(self.evaluate(grad_grad), [48.])
control_flow_ops_py_test.py     [all...]
  /external/tensorflow/tensorflow/cc/gradients/
nn_grad.cc 91 auto grad_grad = grad_inputs[1]; local
94 if (!IsZero(scope, grad_grad)) {
98 auto grad_grad_expand = ExpandDims(scope, grad_grad, 1);
104 auto subtraction_result = Subtract(scope, grad_grad, squeeze_result);
  /external/tensorflow/tensorflow/python/ops/
nn_grad.py 512 def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
516 # grad_grad is the backprop for softmax gradient.
533 if grad_grad is not None and not IsZero(grad_grad):
536 grad += ((grad_grad - array_ops.squeeze(
538 array_ops.expand_dims(grad_grad, 1),
    [all...]
  /external/tensorflow/tensorflow/python/ops/parallel_for/
control_flow_ops_test.py 314 grad_grad = g.gradient(grad, ones)
315 return output, grad, grad_grad
335 grad_grad = g.gradient(grad, ones)
336 return output, grad, grad_grad
    [all...]

Completed in 703 milliseconds