HomeSort by relevance Sort by last modified time
    Searched refs:optimizer (Results 176 - 200 of 335) sorted by null

1 2 3 4 5 6 78 91011>>

  /external/tensorflow/tensorflow/contrib/opt/python/training/
elastic_average_optimizer.py 15 """Wrapper optimizer for Elastic Average SGD """
30 from tensorflow.python.training import optimizer
65 optimizer = ElasticAverageOptimizer(
72 train_op = optimizer.apply_gradients(
76 hooks = [optimizer.make_session_run_hook(is_chief, task_index)]
142 class ElasticAverageOptimizer(optimizer.Optimizer):
143 """Wrapper optimizer that implements the Elastic Average SGD algorithm.
144 This is an async optimizer. During the training, Each worker will update
166 """Construct a new gradient descent optimizer
    [all...]
adam_gs_optimizer_test.py 119 # If a GPU is available, tests that all optimizer ops can be placed on
124 optimizer = adam_gs_optimizer.AdamGSOptimizer(3.0)
125 minimize_op = optimizer.minimize(gathered_sum)
348 optimizer = adam_gs_optimizer.AdamGSOptimizer()
353 optimizer.apply_gradients([(grads0, var0)])
360 optimizer.apply_gradients([(grads0, var0)])
368 # If the optimizer saves any state not keyed by graph the following line
370 optimizer.apply_gradients([(grads0, var0)])
lazy_adam_gs_optimizer_test.py 116 # If a GPU is available, tests that all optimizer ops can be placed on
128 optimizer = lazy_adam_gs_optimizer.LazyAdamGSOptimizer(
130 minimize_op = optimizer.minimize(gathered_sum, global_step=global_step)
366 optimizer = lazy_adam_gs_optimizer.LazyAdamGSOptimizer()
371 optimizer.apply_gradients([(grads0, var0)])
378 optimizer.apply_gradients([(grads0, var0)])
386 # If the optimizer saves any state not keyed by graph the following line
388 optimizer.apply_gradients([(grads0, var0)])
lazy_adam_optimizer_test.py 111 # If a GPU is available, tests that all optimizer ops can be placed on
120 optimizer = lazy_adam_optimizer.LazyAdamOptimizer(3.0)
121 minimize_op = optimizer.minimize(gathered_sum)
329 optimizer = lazy_adam_optimizer.LazyAdamOptimizer()
334 optimizer.apply_gradients([(grads0, var0)])
341 optimizer.apply_gradients([(grads0, var0)])
349 # If the optimizer saves any state not keyed by graph the following line
351 optimizer.apply_gradients([(grads0, var0)])
  /external/tensorflow/tensorflow/python/keras/
callbacks_test.py 232 optimizer='rmsprop',
277 loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
301 loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
449 loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
483 optimizer='sgd', loss='binary_crossentropy', metrics=['accuracy'])
507 optimizer='sgd', loss='binary_crossentropy', metrics=['acc'])
579 optimizer='sgd',
593 model.optimizer.lr)) - 0.2) < keras.backend.epsilon()
598 optimizer='sgd',
610 model.optimizer.lr)) - 0.01 / 4) < keras.backend.epsilon(
    [all...]
optimizers.py 16 """Built-in optimizer classes.
42 from tensorflow.python.training import optimizer as tf_optimizer_module
48 class Optimizer(object):
49 """Abstract optimizer base class.
51 Note: this is the parent class of all optimizers, not an actual optimizer
67 'passed to optimizer: ' + str(k))
109 """Sets the weights of the optimizer, from Numpy arrays.
112 (otherwise the optimizer has no weights).
118 of the optimizer (i.e. it should match the
129 'of the optimizer (' + str(len(params)) + ')'
    [all...]
model_subclassing_test.py 594 optimizer='rmsprop',
614 optimizer='rmsprop',
635 optimizer='rmsprop',
668 optimizer='rmsprop',
701 optimizer='rmsprop',
734 optimizer='rmsprop',
755 optimizer='rmsprop',
767 optimizer='rmsprop',
788 optimizer='rmsprop',
813 optimizer='rmsprop'
    [all...]
callbacks_v1_test.py 88 optimizer='sgd',
196 optimizer='sgd',
284 optimizer='sgd',
324 optimizer='sgd',
373 loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
485 optimizer=adam.AdamOptimizer(0.01),
  /external/tensorflow/tensorflow/python/keras/engine/
training_generator_test.py 76 optimizer=rmsprop.RMSprop(1e-3),
116 optimizer=rmsprop.RMSprop(1e-3),
181 optimizer=rmsprop.RMSprop(1e-3),
218 model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3),
287 model.compile(loss='mse', optimizer=rmsprop.RMSprop(1e-3))
  /external/tensorflow/tensorflow/python/keras/saving/
hdf5_format_test.py 277 optimizer=keras.optimizers.RMSprop(lr=0.0001),
289 optimizer=keras.optimizers.RMSprop(lr=0.0001),
315 optimizer=keras.optimizers.RMSprop(lr=0.0001),
327 optimizer=keras.optimizers.RMSprop(lr=0.0001),
352 optimizer=keras.optimizers.RMSprop(lr=0.0001),
411 optimizer=keras.optimizers.RMSprop(lr=0.0001),
466 # test with custom optimizer, loss
477 model.compile(loss=custom_loss, optimizer=CustomOp(), metrics=['acc'])
510 optimizer=keras.optimizers.RMSprop(lr=0.0001),
542 model.compile(loss='mse', optimizer='sgd', metrics=['acc']
    [all...]
hdf5_format.py 53 - the model's optimizer's state (if any)
67 include_optimizer: If True, save optimizer's state together.
109 if include_optimizer and model.optimizer:
110 if isinstance(model.optimizer, optimizers.TFOptimizer):
114 'optimizer attributes or optimizer state '
116 'As a result, we cannot save the optimizer '
119 'Prefer using a Keras optimizer instead '
125 'class_name': model.optimizer.__class__.__name__,
126 'config': model.optimizer.get_config(
    [all...]
  /external/tensorflow/tensorflow/python/training/
gradient_descent_test.py 47 optimizer = gradient_descent.GradientDescentOptimizer(3.0)
48 sgd_op = optimizer.apply_gradients(
61 self.assertEqual(0, len(optimizer.variables()))
262 optimizer = gradient_descent.GradientDescentOptimizer(1.0)
269 optimizer.apply_gradients([(grad, self.v)])
optimizer.py 99 def update_op(self, optimizer, g):
116 def update_op(self, optimizer, g):
118 update_op = optimizer._apply_dense(g, self._v) # pylint: disable=protected-access
131 return optimizer._apply_sparse_duplicate_indices(g, self._v)
143 def update_op(self, optimizer, g):
145 update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0])
162 def update_op(self, optimizer, g):
168 return optimizer._resource_apply_sparse_duplicate_indices(
170 update_op = optimizer._resource_apply_dense(g, self._v)
182 compute the gradients with respect to a Tensor using the optimizer. Updatin
    [all...]
adam_test.py 117 # If a GPU is available, tests that all optimizer ops can be placed on
122 optimizer = adam.AdamOptimizer(3.0)
123 minimize_op = optimizer.minimize(gathered_sum)
337 optimizer = adam.AdamOptimizer()
342 optimizer.apply_gradients([(grads0, var0)])
349 optimizer.apply_gradients([(grads0, var0)])
357 # If the optimizer saves any state not keyed by graph the following line
359 optimizer.apply_gradients([(grads0, var0)])
  /external/tensorflow/tensorflow/contrib/optimizer_v2/
optimizer_v2.py 16 """Version 2 of class Optimizer."""
39 from tensorflow.python.training import optimizer as optimizer_v1
55 def update_op(self, optimizer, g, *args):
69 def update_op(self, optimizer, g, *args):
71 update_op = optimizer._apply_dense(g, self._v, *args) # pylint: disable=protected-access
84 return optimizer._apply_sparse_duplicate_indices(g, self._v, *args)
96 def update_op(self, optimizer, g, *args):
98 update_op = optimizer._resource_apply_dense(g, self._v.op.inputs[0], *args)
115 def update_op(self, optimizer, g, *args):
121 return optimizer._resource_apply_sparse_duplicate_indices
    [all...]
gradient_descent_test.py 15 """Functional test for GradientDescent optimizer."""
42 optimizer = gradient_descent.GradientDescentOptimizer(3.0)
43 sgd_op = optimizer.apply_gradients(
56 self.assertEqual(0, len(optimizer.variables()))
  /external/tensorflow/tensorflow/python/keras/layers/
cudnn_recurrent_test.py 115 optimizer=RMSprop(learning_rate=0.001))
145 optimizer=RMSprop(learning_rate=0.001),
213 model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
410 model.compile(loss='mse', optimizer='rmsprop')
426 model.compile(loss='mse', optimizer=R'rmsprop')
435 model.compile(loss='mse', optimizer=R'rmsprop')
444 model.compile(loss='mse', optimizer='rmsprop')
lstm_test.py 130 optimizer='rmsprop',
144 optimizer='rmsprop',
174 optimizer=adam.AdamOptimizer(),
200 optimizer=adam.AdamOptimizer(),
252 optimizer='rmsprop',
314 optimizer=adam.AdamOptimizer(),
361 model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
gru_v2_test.py 130 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
313 optimizer=gradient_descent.GradientDescentOptimizer(0.001))
326 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
352 optimizer=gradient_descent.GradientDescentOptimizer(0.001))
437 model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
498 model.compile(optimizer='adam',
561 optimizer = gradient_descent.GradientDescentOptimizer(0.001)
562 train_op = optimizer.minimize(loss)
619 optimizer = gradient_descent.GradientDescentOptimizer(0.001)
620 train_op = optimizer.minimize(loss
    [all...]
lstm_v2_test.py 117 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
147 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
175 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
234 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
299 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
387 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
400 optimizer=gradient_descent.GradientDescentOptimizer(0.01))
600 model.compile(optimizer=gradient_descent.GradientDescentOptimizer(0.01),
661 model.compile(optimizer='adam',
710 optimizer = gradient_descent.GradientDescentOptimizer(0.001
    [all...]
  /external/deqp-deps/SPIRV-Tools/tools/comp/
markv.cpp 29 #include "spirv-tools/optimizer.hpp"
268 spvtools::Optimizer optimizer(kSpvEnv);
269 optimizer.RegisterPass(spvtools::CreateCompactIdsPass());
270 if (!optimizer.Run(spirv.data(), spirv.size(), &spirv_before)) {
271 std::cerr << "error: Optimizer failure on: " << input_filename
  /external/swiftshader/third_party/SPIRV-Tools/tools/comp/
markv.cpp 29 #include "spirv-tools/optimizer.hpp"
268 spvtools::Optimizer optimizer(kSpvEnv);
269 optimizer.RegisterPass(spvtools::CreateCompactIdsPass());
270 if (!optimizer.Run(spirv.data(), spirv.size(), &spirv_before)) {
271 std::cerr << "error: Optimizer failure on: " << input_filename
  /external/libopus/scripts/
rnn_train.py 57 # try using different optimizers and different optimizer configs
59 optimizer='adam', variable
  /external/tensorflow/tensorflow/contrib/distribute/python/
keras_stateful_lstm_model_correctness_test.py 71 optimizer=gradient_descent.GradientDescentOptimizer(
  /external/tensorflow/tensorflow/contrib/eager/python/examples/revnet/
main_estimator_tpu.py 135 optimizer = tf.train.MomentumOptimizer(learning_rate,
138 optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
143 train_op = optimizer.apply_gradients(

Completed in 3125 milliseconds

1 2 3 4 5 6 78 91011>>