/external/tensorflow/tensorflow/python/training/ |
gradient_descent.py | 34 def __init__(self, learning_rate, use_locking=False, name="GradientDescent"): 40 use_locking: If True use locks for update operations. 44 super(GradientDescentOptimizer, self).__init__(use_locking, name) 52 use_locking=self._use_locking).op 58 grad, use_locking=self._use_locking) 69 return var.scatter_sub(delta, use_locking=self._use_locking)
|
proximal_gradient_descent.py | 39 l2_regularization_strength=0.0, use_locking=False, 50 use_locking: If True use locks for update operations. 54 super(ProximalGradientDescentOptimizer, self).__init__(use_locking, name) 68 use_locking=self._use_locking).op 77 use_locking=self._use_locking) 87 use_locking=self._use_locking).op 97 use_locking=self._use_locking)
|
adadelta.py | 37 use_locking=False, name="Adadelta"): 46 use_locking: If `True` use locks for update operations. 50 super(AdadeltaOptimizer, self).__init__(use_locking, name) 81 use_locking=self._use_locking) 94 use_locking=self._use_locking) 108 use_locking=self._use_locking) 122 use_locking=self._use_locking)
|
adagrad.py | 41 use_locking=False, name="Adagrad"): 48 use_locking: If `True` use locks for update operations. 58 super(AdagradOptimizer, self).__init__(use_locking, name) 91 use_locking=self._use_locking) 100 use_locking=self._use_locking) 110 use_locking=self._use_locking) 120 use_locking=self._use_locking)
|
momentum.py | 47 use_locking=False, name="Momentum", use_nesterov=False): 53 use_locking: If `True` use locks for update operations. 70 super(MomentumOptimizer, self).__init__(use_locking, name) 97 use_locking=self._use_locking, 107 use_locking=self._use_locking, 117 use_locking=self._use_locking, 127 use_locking=self._use_locking,
|
proximal_adagrad.py | 39 use_locking=False, name="ProximalAdagrad"): 50 use_locking: If `True` use locks for update operations. 60 super(ProximalAdagradOptimizer, self).__init__(use_locking, name) 94 grad, use_locking=self._use_locking) 102 grad, use_locking=self._use_locking) 111 use_locking=self._use_locking) 121 use_locking=self._use_locking)
|
adam.py | 41 use_locking=False, name="Adam"): 89 use_locking: If True use locks for update operations. 93 super(AdamOptimizer, self).__init__(use_locking, name) 152 grad, use_locking=self._use_locking).op 166 grad, use_locking=self._use_locking) 181 use_locking=self._use_locking) 187 v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking) 193 use_locking=self._use_locking) 200 x, i, v, use_locking=self._use_locking)) 218 beta1_power * self._beta1_t, use_locking=self._use_locking [all...] |
ftrl.py | 45 use_locking=False, 61 use_locking: If `True` use locks for update operations. 85 super(FtrlOptimizer, self).__init__(use_locking, name) 158 use_locking=self._use_locking) 173 use_locking=self._use_locking) 190 use_locking=self._use_locking) 205 use_locking=self._use_locking) 223 use_locking=self._use_locking) 239 use_locking=self._use_locking) 255 use_locking=self._use_locking [all...] |
rmsprop.py | 66 use_locking=False, 88 use_locking: If True use locks for update operation. 96 super(RMSPropOptimizer, self).__init__(use_locking, name) 145 use_locking=self._use_locking).op 156 use_locking=self._use_locking).op 173 use_locking=self._use_locking) 184 use_locking=self._use_locking) 202 use_locking=self._use_locking) 214 use_locking=self._use_locking) 232 use_locking=self._use_locking [all...] |
adagrad_da.py | 51 use_locking=False, 64 use_locking: If `True` use locks for update operations. 76 super(AdagradDAOptimizer, self).__init__(use_locking, name) 122 use_locking=self._use_locking) 138 use_locking=self._use_locking) 155 use_locking=self._use_locking) 172 use_locking=self._use_locking)
|
adagrad_test.py | 37 def doTestBasic(self, use_locking=False, use_resource=False): 49 3.0, initial_accumulator_value=0.1, use_locking=use_locking) 66 self.doTestBasic(use_locking=False) 69 self.doTestBasic(use_locking=False, use_resource=True) 72 self.doTestBasic(use_locking=True)
|
/external/tensorflow/tensorflow/python/ops/ |
state_ops.py | 194 def assign_sub(ref, value, use_locking=None, name=None): 207 use_locking: An optional `bool`. Defaults to `False`. 218 ref, value, use_locking=use_locking, name=name) 223 def assign_add(ref, value, use_locking=None, name=None): 236 use_locking: An optional `bool`. Defaults to `False`. 247 ref, value, use_locking=use_locking, name=name) 252 def assign(ref, value, validate_shape=None, use_locking=None, name=None): 268 use_locking: An optional `bool`. Defaults to `True` [all...] |
variables.py | 592 def assign(self, value, use_locking=False): 599 use_locking: If `True`, use locking during the assignment. 605 return state_ops.assign(self._variable, value, use_locking=use_locking) 607 def assign_add(self, delta, use_locking=False): 614 use_locking: If `True`, use locking during the operation. 620 return state_ops.assign_add(self._variable, delta, use_locking=use_locking) 622 def assign_sub(self, delta, use_locking=False): 629 use_locking: If `True`, use locking during the operation [all...] |
/external/valgrind/drd/tests/ |
circular_buffer.c | 52 static int use_locking = 1; variable 106 if (use_locking) 115 if (use_locking) 129 if (use_locking) 138 if (use_locking) 195 use_locking = 0;
|
/external/tensorflow/tensorflow/contrib/gan/python/features/python/ |
clip_weights_impl.py | 79 use_locking=True,
|
/external/tensorflow/tensorflow/contrib/opt/python/training/ |
lazy_adam_optimizer.py | 64 use_locking=self._use_locking) 71 use_locking=self._use_locking) 79 use_locking=self._use_locking)
|
variable_clipping_optimizer.py | 57 use_locking=False, 69 use_locking: If `True` use locks for clip update operations. 75 super(VariableClippingOptimizer, self).__init__(use_locking, name) 117 return var.assign_sub(delta, use_locking=self._use_locking) 136 return var.scatter_sub(delta, use_locking=self._use_locking)
|
addsign.py | 42 use_locking=False, 76 use_locking: If True, use locks for update operations. 80 super(AddSignOptimizer, self).__init__(use_locking, name) 121 use_locking=self._use_locking).op 133 use_locking=self._use_locking) 142 m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking) 166 use_locking=self._use_locking)
|
powersign.py | 44 use_locking=False, 78 use_locking: If True, use locks for update operations. 82 super(PowerSignOptimizer, self).__init__(use_locking, name) 123 use_locking=self._use_locking).op 135 use_locking=self._use_locking) 145 m, (m * beta_t) + (grad * (1 - beta_t)), use_locking=self._use_locking) 170 use_locking=self._use_locking)
|
nadam_optimizer.py | 49 use_locking=self._use_locking, 67 use_locking=self._use_locking, 82 m_t = state_ops.assign(m, m * beta1_t, use_locking=self._use_locking) 90 v_t = state_ops.assign(v, v * beta2_t, use_locking=self._use_locking) 95 var, lr * m_bar / (v_sqrt + epsilon_t), use_locking=self._use_locking)
|
drop_stale_gradient_optimizer.py | 47 use_locking=False, 55 use_locking: If `True` use locks for clip update operations. 59 super(DropStaleGradientOptimizer, self).__init__(use_locking, name)
|
/external/tensorflow/tensorflow/contrib/mpi_collectives/ |
__init__.py | 173 def __init__(self, optimizer, name=None, use_locking=False): 184 use_locking: Whether to use locking when updating variables. See 192 name=name, use_locking=use_locking)
|
/external/tensorflow/tensorflow/contrib/bayesflow/python/ops/ |
sgld_optimizer.py | 143 super(SGLDOptimizer, self).__init__(use_locking=False, 176 use_locking=self._use_locking).op 190 use_locking=self._use_locking).op
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
dense_update_ops_no_tsan_test.py | 41 p, ones_t, use_locking=False) for _ in range(20) 101 p, ones_t, use_locking=True) for _ in range(20) 128 p, math_ops.multiply(ones_t, float(i)), use_locking=True)
|
/external/tensorflow/tensorflow/python/estimator/canned/ |
optimizers_test.py | 73 use_locking=False, name='TestOptimizer')
|