/external/tensorflow/tensorflow/contrib/opt/python/training/ |
lazy_adam_gs_optimizer.py | 51 beta1_power, beta2_power = self._get_beta_accumulators() 53 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 58 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power)) 84 beta1_power, beta2_power = self._get_beta_accumulators() 86 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 91 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
|
lazy_adam_optimizer.py | 51 beta1_power, beta2_power = self._get_beta_accumulators() 53 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 58 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power)) 84 beta1_power, beta2_power = self._get_beta_accumulators() 86 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 91 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
|
nadam_optimizer.py | 38 beta1_power, beta2_power = self._get_beta_accumulators() 44 math_ops.cast(beta2_power, var.dtype.base_dtype), 56 beta1_power, beta2_power = self._get_beta_accumulators() 62 math_ops.cast(beta2_power, grad.dtype.base_dtype), 72 beta1_power, beta2_power = self._get_beta_accumulators() 74 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 79 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
|
adam_gs_optimizer.py | 148 beta1_power, beta2_power = self._get_beta_accumulators() 154 math_ops.cast(beta2_power, var.dtype.base_dtype), 165 beta1_power, beta2_power = self._get_beta_accumulators() 171 math_ops.cast(beta2_power, grad.dtype.base_dtype), 180 beta1_power, beta2_power = self._get_beta_accumulators() 182 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 187 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power))
|
adam_gs_optimizer_test.py | 94 beta1_power, beta2_power = opt._get_beta_accumulators() 100 self.evaluate(beta2_power)) 206 beta1_power, beta2_power = opt._get_beta_accumulators() 208 self.assertTrue(beta2_power is not None) 210 self.assertNotIn(beta2_power, opt_variables) 228 0.999**(t + 1), self.evaluate(beta2_power)) 233 beta1_power, beta2_power = opt._get_beta_accumulators() 237 0.999**t, self.evaluate(beta2_power)) 286 beta1_power, beta2_power = opt._get_beta_accumulators() 292 self.evaluate(beta2_power)) [all...] |
lazy_adam_gs_optimizer_test.py | 97 beta1_power, beta2_power = opt._get_beta_accumulators() 102 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) 225 beta1_power, beta2_power = opt._get_beta_accumulators() 227 self.assertIsNotNone(beta2_power is not None) 229 self.assertNotIn(beta2_power, opt_variables) 247 0.999**(t + 1), self.evaluate(beta2_power)) 252 beta1_power, beta2_power = opt._get_beta_accumulators() 256 0.999**t, self.evaluate(beta2_power)) 305 beta1_power, beta2_power = opt._get_beta_accumulators() 310 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval() [all...] |
lazy_adam_optimizer_test.py | 92 beta1_power, beta2_power = opt._get_beta_accumulators() 97 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) 199 beta1_power, beta2_power = opt._get_beta_accumulators() 201 self.assertIsNotNone(beta2_power is not None) 203 self.assertIn(beta2_power, opt_variables) 214 beta1_power, beta2_power = opt._get_beta_accumulators() 226 self.evaluate(beta2_power)) 272 beta1_power, beta2_power = opt._get_beta_accumulators() 277 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) 306 beta1_power, beta2_power = opt._get_beta_accumulators( [all...] |
nadam_optimizer_test.py | 91 beta1_power, beta2_power = opt._get_beta_accumulators() 96 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) 140 beta1_power, beta2_power = opt._get_beta_accumulators() 145 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
|
/external/tensorflow/tensorflow/contrib/optimizer_v2/ |
adam.py | 101 state.get_non_slot("beta2_power")) 108 initial_value=lambda: state.get_hyper("beta2"), name="beta2_power") 118 beta1_power, beta2_power = self._get_beta_accumulators(state) 124 math_ops.cast(beta2_power, var.dtype.base_dtype), 135 beta1_power, beta2_power = self._get_beta_accumulators(state) 141 math_ops.cast(beta2_power, grad.dtype.base_dtype), 150 beta1_power, beta2_power = self._get_beta_accumulators(state) 152 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 157 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power) [all...] |
adam_test.py | 89 beta1_power, beta2_power = opt._get_beta_accumulators() 94 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) 177 beta1_power, beta2_power = opt._get_beta_accumulators() 179 self.assertTrue(beta2_power is not None) 181 self.assertIn(beta2_power, opt_variables) 193 beta1_power, beta2_power = opt._get_beta_accumulators() 205 self.evaluate(beta2_power)) 247 beta1_power, beta2_power = opt._get_beta_accumulators() 252 self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval()) 281 beta1_power, beta2_power = opt._get_beta_accumulators( [all...] |
/external/tensorflow/tensorflow/python/training/ |
adam.py | 116 self._get_non_slot_variable("beta2_power", graph=graph)) 127 initial_value=self._beta2, name="beta2_power", colocate_with=first_var) 148 beta1_power, beta2_power = self._get_beta_accumulators() 154 math_ops.cast(beta2_power, var.dtype.base_dtype), 165 beta1_power, beta2_power = self._get_beta_accumulators() 171 math_ops.cast(beta2_power, grad.dtype.base_dtype), 180 beta1_power, beta2_power = self._get_beta_accumulators() 182 beta2_power = math_ops.cast(beta2_power, var.dtype.base_dtype) 187 lr = (lr_t * math_ops.sqrt(1 - beta2_power) / (1 - beta1_power) [all...] |
adam_test.py | 89 beta1_power, beta2_power = opt._get_beta_accumulators() 95 self.evaluate(beta2_power)) 195 beta1_power, beta2_power = opt._get_beta_accumulators() 197 self.assertTrue(beta2_power is not None) 199 self.assertIn(beta2_power, opt_variables) 207 resource_variable_ops.is_resource_variable(beta2_power)) 218 beta1_power, beta2_power = opt._get_beta_accumulators() 230 self.evaluate(beta2_power)) 277 beta1_power, beta2_power = opt._get_beta_accumulators() 283 self.evaluate(beta2_power)) [all...] |
sync_replicas_optimizer_test.py | 295 beta1_power, beta2_power = opt._opt._get_beta_accumulators() 297 self.assertIn(beta2_power, opt_variables)
|
training_ops_test.py | 279 beta2_power = beta2**t 285 beta2_power_t = variables.VariableV1(beta2_power)
|
/external/tensorflow/tensorflow/compiler/tests/ |
adam_test.py | 81 beta1_power, beta2_power = opt._get_beta_accumulators() 87 self.evaluate(beta2_power)) 124 beta1_power, beta2_power = opt._get_beta_accumulators() 130 self.evaluate(beta2_power)) 164 beta1_power, beta2_power = opt._get_beta_accumulators() 174 self.evaluate(beta2_power))
|
/external/tensorflow/tensorflow/core/kernels/ |
training_ops_gpu.cu.cc | 130 typename TTypes<T>::ConstScalar beta2_power, 149 (lr * (beta2_power.constant(one) - beta2_power).sqrt() / 158 var.device(d) -= (lr * (beta2_power.constant(one) - beta2_power).sqrt() / 174 typename TTypes<T>::ConstScalar beta2_power, 192 var.device(d) -= (lr * (beta2_power.constant(one) - beta2_power).sqrt() /
|
training_ops.h | 143 typename TTypes<T>::ConstScalar beta2_power, 157 typename TTypes<T>::ConstScalar beta2_power,
|
training_ops_test.cc | 173 auto beta2_power = Scalar(g, 0.99); local 181 {var, m, v, beta1_power, beta2_power, lr, beta1, beta2, epsilon, grad});
|
training_ops.cc | 307 typename TTypes<T>::ConstScalar beta2_power, 313 const T alpha = lr() * Eigen::numext::sqrt(T(1) - beta2_power()) / 336 T beta1_power, T beta2_power, T lr, T beta1, T beta2, 339 lr * Eigen::numext::sqrt(T(1) - beta2_power) / (T(1) - beta1_power); 356 typename TTypes<T>::ConstScalar beta2_power, 362 const T alpha = lr() * Eigen::numext::sqrt(T(1) - beta2_power()) / 2837 const Tensor& beta2_power = ctx->input(4); variable 2935 T beta2_power = 0; variable 3106 const Tensor& beta2_power = ctx->input(5); variable [all...] |
/external/tensorflow/tensorflow/python/keras/optimizer_v2/ |
nadam_test.py | 107 beta1_power, beta2_power = get_beta_accumulators(opt, dtype) 112 self.assertAllCloseAccordingToType(0.999**(t + 1), beta2_power.eval())
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
training_ops.cc | 430 errors::InvalidArgument("beta2_power is not a scalar: ", 459 xla::XlaOp beta2_power = ctx->Input(4); variable 474 xla::XlaOp alpha = lr * xla::Sqrt(one - beta2_power) / (one - beta1_power); [all...] |
/external/tensorflow/tensorflow/python/training/tracking/ |
util_with_v1_optimizers_test.py | 116 "optimizer/beta2_power", 149 "beta2_power", 150 named_variables["optimizer/beta2_power" + suffix].full_name) 226 # Preserve beta1_power and beta2_power when appying gradients so we can 256 beta1_power, beta2_power = on_create_optimizer._get_beta_accumulators() 258 self.assertAllEqual(optimizer_variables[1], self.evaluate(beta2_power)) [all...] |