/external/tensorflow/tensorflow/contrib/optimizer_v2/ |
adadelta.py | 59 state.zeros_slot(v, "accum_update") 63 accum_update = state.get_slot(var, "accum_update") 67 accum_update, 76 accum_update = state.get_slot(var, "accum_update") 80 accum_update.handle, 89 accum_update = state.get_slot(var, "accum_update") 93 accum_update, [all...] |
adadelta_test.py | 55 accum_update = 0.0 76 self.assertEqual(["accum", "accum_update"], 82 slot_update[0] = adadelta_opt.get_slot(var0, "accum_update") 90 slot_update[1] = adadelta_opt.get_slot(var1, "accum_update") 106 update[step] = (np.sqrt(accum_update + epsilon) * 108 accum_update = (accum_update * rho + (update[step]**2) * 121 [accum_update, accum_update],
|
/external/tensorflow/tensorflow/python/training/ |
adadelta.py | 70 self._zeros_slot(v, "accum_update", self._name) 83 accum_update = self.get_slot(var, "accum_update") 87 accum_update, 96 accum_update = self.get_slot(var, "accum_update") 100 accum_update.handle, 109 accum_update = self.get_slot(var, "accum_update") 113 accum_update, [all...] |
adadelta_test.py | 56 accum_update = 0.0 86 self.assertEqual(["accum", "accum_update"], 92 slot_update[0] = adadelta_opt.get_slot(var0, "accum_update") 100 slot_update[1] = adadelta_opt.get_slot(var1, "accum_update") 120 np.sqrt(accum_update + epsilon) * 122 accum_update = ( 123 accum_update * rho + (update[step]**2) * (1.0 - rho)) 137 [accum_update, accum_update],
|
training_ops_test.py | 109 accum_update = y + grad * grad 110 linear_update = z + grad - (accum_update**(-lr_power) - y** 112 quadratic = 1.0 / (accum_update**(lr_power) * lr) + 2 * l2 117 self.assertAllCloseAccordingToType(accum_update, self.evaluate(accum))
|
/external/tensorflow/tensorflow/compiler/tests/ |
adadelta_test.py | 57 accum_update = 0.0 76 self.assertEqual(["accum", "accum_update"], 82 slot_update[0] = adadelta_opt.get_slot(var0, "accum_update") 90 slot_update[1] = adadelta_opt.get_slot(var1, "accum_update") 107 np.sqrt(accum_update + epsilon) * 109 accum_update = ( 110 accum_update * rho + (update[step]**2) * (1.0 - rho)) 121 np.array([accum_update, accum_update], dtype=dtype),
|
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
adadelta_test.py | 56 accum_update = 0.0 105 np.sqrt(accum_update + epsilon) * 107 accum_update = ( 108 accum_update * rho + (update[step]**2) * (1.0 - rho)) 122 [accum_update, accum_update],
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
training_ops.cc | 818 xla::XlaOp var, accum, accum_update; variable [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
training_ops_gpu.cu.cc | 60 typename TTypes<T>::Flat accum_update, 73 (accum_update + epsilon.reshape(single).broadcast(bcast)).sqrt() * 76 accum_update.device(d) = 77 accum_update * rho.reshape(single).broadcast(bcast) +
|
training_ops.cc | 71 typename TTypes<T>::Flat accum_update, 79 (accum_update + epsilon()).sqrt() * (accum + epsilon()).rsqrt() * grad; 81 accum_update.device(d) = 82 accum_update * rho() + update.square() * (static_cast<T>(1) - rho()); 636 Tensor accum_update; local 639 sparse, &accum_update)); 650 ctx, accum_update.IsInitialized(), 692 Tensor accum_update; local 695 sparse, &accum_update)); 703 device, var.flat<T>(), accum.flat<T>(), accum_update.flat<T>() 784 Tensor accum_update; local [all...] |
training_ops.h | 41 typename TTypes<T>::Flat accum_update,
|