/external/tensorflow/tensorflow/python/training/ |
learning_rate_decay.py | 32 def exponential_decay(learning_rate, 49 decayed_learning_rate = learning_rate * 62 learning_rate = tf.train.exponential_decay(starter_learning_rate, global_step, 66 tf.train.GradientDescentOptimizer(learning_rate) 72 learning_rate: A scalar `float32` or `float64` `Tensor` or a 85 A scalar `Tensor` of the same type as `learning_rate`. The decayed 95 [learning_rate, global_step, decay_steps, decay_rate]) as name: 96 learning_rate = ops.convert_to_tensor(learning_rate, name="learning_rate") [all...] |
momentum.py | 36 variable -= learning_rate * accumulation 46 def __init__(self, learning_rate, momentum, 51 learning_rate: A `Tensor` or a floating point value. The learning rate. 64 When eager execution is enabled, learning_rate and momentum can each be a 71 self._learning_rate = learning_rate 80 learning_rate = self._learning_rate 81 if callable(learning_rate): 82 learning_rate = learning_rate() 83 self._learning_rate_tensor = ops.convert_to_tensor(learning_rate, [all...] |
gradient_descent.py | 34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): 38 learning_rate: A Tensor or a floating point value. The learning 45 self._learning_rate = learning_rate 73 name="learning_rate")
|
proximal_gradient_descent.py | 38 def __init__(self, learning_rate, l1_regularization_strength=0.0, 44 learning_rate: A Tensor or a floating point value. The learning 55 self._learning_rate = learning_rate 101 name="learning_rate")
|
/external/tensorflow/tensorflow/python/estimator/canned/ |
optimizers.py | 41 def get_optimizer_instance(opt, learning_rate=None): 46 * A string: Creates an `Optimizer` subclass with the given `learning_rate`. 56 learning_rate: A float. Only used if `opt` is a string. 63 ValueError: If `opt` is a supported string but `learning_rate` was not 69 if not learning_rate: 70 raise ValueError('learning_rate must be specified when opt is string.') 71 return _OPTIMIZER_CLS_NAMES[opt](learning_rate=learning_rate)
|
optimizers_test.py | 36 optimizers.get_optimizer_instance('unsupported_name', learning_rate=0.1) 40 ValueError, 'learning_rate must be specified when opt is string'): 41 optimizers.get_optimizer_instance('Adagrad', learning_rate=None) 44 opt = optimizers.get_optimizer_instance('Adagrad', learning_rate=0.1) 49 opt = optimizers.get_optimizer_instance('Adam', learning_rate=0.1) 54 opt = optimizers.get_optimizer_instance('Ftrl', learning_rate=0.1) 59 opt = optimizers.get_optimizer_instance('RMSProp', learning_rate=0.1) 64 opt = optimizers.get_optimizer_instance('SGD', learning_rate=0.1)
|
linear.py | 46 learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns))) 47 return ftrl.FtrlOptimizer(learning_rate=learning_rate) 143 learning_rate=_LEARNING_RATE) 199 learning_rate=0.1,
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
addsign_test.py | 63 learning_rate=0.1, 91 learning_rate=learning_rate, 127 learning_rate, 137 learning_rate, 153 self._testDense(use_resource=False, learning_rate=0.01, alpha=0.1, beta=0.8) 159 self._testDense(use_resource=True, learning_rate=0.01, alpha=0.1, beta=0.8) 166 learning_rate=0.1, 199 learning_rate=learning_rate, [all...] |
powersign_test.py | 64 learning_rate=0.1, 92 learning_rate=learning_rate, 129 learning_rate, 139 learning_rate, 156 learning_rate=0.1, 164 self._testDense(use_resource=True, learning_rate=0.1, base=10.0, beta=0.8) 171 learning_rate=0.1, 204 learning_rate=learning_rate, [all...] |
/external/webrtc/webrtc/base/ |
rollingaccumulator.h | 124 // Weights nth sample with weight (learning_rate)^n. Learning_rate should be 126 double ComputeWeightedMean(double learning_rate) const { 127 if (count_ < 1 || learning_rate <= 0.0 || learning_rate >= 1.0) { 135 current_weight *= learning_rate;
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
optimizers.py | 50 "learning_rate", 60 learning_rate, 81 Alternatively, if `learning_rate` is `None`, the function takes no 82 arguments. E.g. `optimize_loss(..., learning_rate=None, 99 learning_rate: float or `Tensor`, magnitude of update per each training 119 learning_rate_decay_fn: function, takes `learning_rate` and `global_step` 124 Ignored if `learning_rate` is not supplied. 149 * `learning_rate` is an invalid type or value. 152 * `learning_rate` and `learning_rate_decay_fn` are supplied, but no 172 if learning_rate is not None [all...] |
optimizers_test.py | 63 gradient_descent.GradientDescentOptimizer(learning_rate=0.1), 64 lambda lr: gradient_descent.GradientDescentOptimizer(learning_rate=lr) 71 loss, global_step, learning_rate=0.1, optimizer=optimizer) 81 return gradient_descent.GradientDescentOptimizer(learning_rate=0.1) 87 loss, global_step, learning_rate=None, optimizer=optimizer_fn) 102 loss, global_step, learning_rate=0.1, optimizer=optimizer) 109 loss, global_step, learning_rate=0.1, optimizer="SGD", 117 None, global_step, learning_rate=0.1, optimizer="SGD") 120 [[1.0]], global_step, learning_rate=0.1, optimizer="SGD") 133 learning_rate=0.1 [all...] |
/external/tensorflow/tensorflow/examples/learn/ |
iris_custom_decay_dnn.py | 55 learning_rate = tf.train.exponential_decay( 56 learning_rate=0.1, global_step=global_step, 58 optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
|
/external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/ |
test_utils.py | 104 learning_rate=0.1, ignore_params_fn=lambda _: (), 156 optimizer=adam.AdamOptimizer(learning_rate)) 172 learning_rate=0.1, rtol=0.2, atol=0.1, train_loss_tolerance_coeff=0.99, 188 learning_rate: Step size for optimization. 214 train_iterations=train_iterations, seed=seed, learning_rate=learning_rate, 257 learning_rate=0.1, 269 learning_rate: Step size for optimization. 279 seed=seed, learning_rate=learning_rate, [all...] |
/external/tensorflow/tensorflow/contrib/training/python/training/ |
training_test.py | 99 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 116 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 150 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 183 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 206 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 245 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 280 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 303 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 329 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 349 def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0) [all...] |
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
dynamic_rnn_estimator_test.py | 260 learning_rate=0.1) 361 learning_rate = 0.1 398 learning_rate=learning_rate, 417 learning_rate = 0.1 454 learning_rate=learning_rate, 513 learning_rate = 0.1 548 learning_rate=learning_rate, [all...] |
dynamic_rnn_estimator.py | 389 learning_rate=None, 428 learning_rate: Learning rate used for optimization. This argument has no 524 learning_rate=learning_rate, 553 learning_rate=0.1, 619 learning_rate: Learning rate. This argument has no effect if `optimizer` 671 optimizer = momentum_opt.MomentumOptimizer(learning_rate, momentum) 682 learning_rate=learning_rate,
|
state_saving_rnn_estimator.py | 393 learning_rate=None, 429 learning_rate: Learning rate used for optimization. This argument has no 517 learning_rate=learning_rate, 542 learning_rate=0.1, 578 learning_rate: Learning rate. This argument has no effect if `optimizer` 628 optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum) 643 learning_rate=learning_rate,
|
state_saving_rnn_estimator_test.py | 349 learning_rate=0.1) 472 learning_rate = 0.3 502 learning_rate=learning_rate, 531 learning_rate = 0.5 559 learning_rate=learning_rate, 603 learning_rate = 0.4 642 learning_rate=learning_rate, [all...] |
/external/tensorflow/tensorflow/examples/tutorials/mnist/ |
mnist.py | 100 def training(loss, learning_rate): 112 learning_rate: The learning rate to use for gradient descent. 120 optimizer = tf.train.GradientDescentOptimizer(learning_rate)
|
/external/tensorflow/tensorflow/contrib/slim/python/slim/ |
learning_test.py | 252 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 287 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 321 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 354 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 379 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 406 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 439 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 458 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 477 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0) 513 optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0 [all...] |
/external/tensorflow/tensorflow/contrib/eager/python/examples/mnist/ |
mnist_graph_test.py | 46 optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0)
|
/external/tensorflow/tensorflow/python/keras/_impl/keras/ |
model_subclassing_test.py | 182 optimizer=RMSPropOptimizer(learning_rate=0.001), 202 optimizer=RMSPropOptimizer(learning_rate=0.001), 223 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) 241 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) 261 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) 294 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) 326 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) 358 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) 378 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001)) 387 model.compile(loss='mse', optimizer=RMSPropOptimizer(learning_rate=0.001) [all...] |
/external/tensorflow/tensorflow/contrib/eager/python/examples/rnn_ptb/ |
rnn_ptb.py | 307 # Make learning_rate a Variable so it can be included in the checkpoint 308 # and we can resume training with the last saved learning_rate. 309 learning_rate = tfe.Variable(20.0, name="learning_rate") 310 sys.stderr.write("learning_rate=%f\n" % learning_rate.numpy()) 314 optimizer = tf.train.GradientDescentOptimizer(learning_rate) 322 tfe.Saver(model.trainable_weights + [learning_rate]).save( 326 learning_rate.assign(learning_rate / 4.0 [all...] |
/external/tensorflow/tensorflow/python/keras/_impl/keras/engine/ |
training_eager_test.py | 45 optimizer = RMSPropOptimizer(learning_rate=0.001) 196 model.compile(optimizer=RMSPropOptimizer(learning_rate=0.001), loss='mse') 213 optimizer = RMSPropOptimizer(learning_rate=0.001) 291 optimizer=RMSPropOptimizer(learning_rate=0.001)) 305 optimizer=RMSPropOptimizer(learning_rate=0.001), 330 optimizer=RMSPropOptimizer(learning_rate=0.001)) 397 optimizer=RMSPropOptimizer(learning_rate=0.001)) 475 optimizer=RMSPropOptimizer(learning_rate=0.001), 493 optimizer=RMSPropOptimizer(learning_rate=0.001)) 512 optimizer=RMSPropOptimizer(learning_rate=0.001) [all...] |