HomeSort by relevance Sort by last modified time
    Searched refs:loss (Results 201 - 225 of 362) sorted by null

1 2 3 4 5 6 7 891011>>

  /external/tensorflow/tensorflow/python/keras/_impl/keras/layers/
normalization_test.py 86 model.compile(loss='mse', optimizer='sgd')
105 model.compile(loss='mse', optimizer='sgd')
simplernn_test.py 100 model.compile(optimizer='sgd', loss='mse')
193 model.compile(loss='categorical_crossentropy', optimizer='adam')
  /external/tensorflow/tensorflow/python/keras/_impl/keras/
losses.py 16 """Built-in loss functions.
88 Tensor with one scalar loss entry per sample.
146 def serialize(loss):
147 return serialize_keras_object(loss)
156 printable_module_name='loss function')
172 'loss function identifier:', identifier)
models.py 109 # misc functions (e.g. loss function)
160 'loss': model.loss,
269 # Recover loss functions and metrics.
270 loss = convert_custom_objects(training_config['loss'])
278 loss=loss,
705 loss,
716 loss: String (name of objective function) or objective function
    [all...]
  /external/tensorflow/tensorflow/python/keras/_impl/keras/wrappers/
scikit_learn_test.py 45 optimizer='sgd', loss='categorical_crossentropy', metrics=['accuracy'])
81 optimizer='sgd', loss='mean_absolute_error', metrics=['accuracy'])
  /external/tensorflow/tensorflow/python/training/
adadelta_test.py 153 loss = pred * pred
155 1.0, 1.0, 1.0).minimize(loss)
adagrad_da_test.py 90 loss = pred * pred
92 1.0, global_step).minimize(loss)
optimizer.py 258 grads_and_vars = opt.compute_gradients(loss, <list of variables>)
343 def minimize(self, loss, global_step=None, var_list=None,
347 """Add operations to minimize `loss` by updating `var_list`.
355 loss: A `Tensor` containing the value to minimize.
359 minimize `loss`. Defaults to the list of variables collected in
368 grad_loss: Optional. A `Tensor` holding the gradient computed for `loss`.
378 When eager execution is enabled, `loss` should be a Python function that
380 minimized. If `var_list` is None, `loss` should take no arguments.
383 variables created during the execution of the `loss` function.
389 loss, var_list=var_list, gate_gradients=gate_gradients
    [all...]
  /external/webrtc/webrtc/modules/bitrate_controller/
send_side_bandwidth_estimation.cc 94 uint8_t* loss,
97 *loss = last_fraction_loss_;
117 // Check sequence number diff and weight loss report
125 // Don't generate a loss rate until it can be based on enough packets.
177 // packet loss reported, to allow startup bitrate probing.
191 // Loss < 2%: Increase rate by 8% of the min bitrate in the last
197 // whenever a receiver report is received with lower packet loss.
199 // take over one second since the lower packet loss to achieve 108kbps.
214 // Loss between 2% - 10%: Do nothing.
216 // Loss > 10%: Limit the rate decreases to once a kBweDecreaseIntervalMs
    [all...]
  /prebuilts/go/darwin-x86/src/strconv/
doc.go 26 // converted to that narrower type without data loss:
  /prebuilts/go/linux-x86/src/strconv/
doc.go 26 // converted to that narrower type without data loss:
  /external/tensorflow/tensorflow/contrib/tpu/python/tpu/
tpu_estimator.py 164 'loss',
173 See `EstimatorSpec` for `mode`, 'predictions, 'loss', 'train_op', and
209 loss=None,
226 loss=loss,
251 loss=self.loss,
    [all...]
  /external/iproute2/tc/
q_netem.c 38 " [ loss random PERCENT [CORRELATION]]\n" \
39 " [ loss state P13 [P31 [P32 [P23 P14]]]\n" \
40 " [ loss gemodel PERCENT [R [1-H [1-K]]]\n" \
220 } else if (matches(*argv, "loss") == 0 ||
222 if (opt.loss > 0 || loss_type != NETEM_LOSS_UNSPEC) {
223 explain1("duplicate loss argument\n");
228 /* Old (deprecated) random loss model syntax */
235 if (get_percent(&opt.loss, *argv)) {
236 explain1("loss percent");
243 explain1("loss correllation")
    [all...]
  /external/tensorflow/tensorflow/contrib/timeseries/python/timeseries/state_space_models/
state_space_model_test.py 101 outputs.loss.eval()
119 outputs.loss.eval()
151 model_outputs.loss.eval()
297 if prediction_name == "loss":
324 model_outputs.loss.eval()
328 outputs = (model_outputs.loss, posteriors,
580 model_outputs.loss.eval()
753 outputs.loss.eval()
filtering_postprocessor.py 43 """Extends/modifies a filtering step, altering state and loss.
171 loss = model_responsibility
222 updated_outputs: The `outputs` dictionary, updated with a new "loss"
261 outputs["loss"] = -interpolated_log_likelihood
  /external/tensorflow/tensorflow/contrib/gan/python/estimator/python/
gan_estimator_test.py 107 mode=mode, loss=array_ops.zeros([]),
112 loss=array_ops.zeros([]))
162 sess.run(estimator_spec.loss)
215 self.assertIn('loss', six.iterkeys(scores))
  /external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
linear_test.py 80 """Tests that loss goes down with training."""
97 loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
99 loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
104 """Tests that loss goes down with training with joint weights."""
123 loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
125 loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
207 self.assertIn('loss', scores)
402 set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
503 loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
    [all...]
model_fn_test.py 53 loss=constant_op.constant([1]),
57 "loss": (constant_op.constant(1.), control_flow_ops.no_op()),
68 if key != "loss":
71 self.assertEqual(model_fn_ops.loss, estimator_spec.loss)
  /external/tensorflow/tensorflow/contrib/opt/python/training/
drop_stale_gradient_optimizer_test.py 73 # Gradients for loss on var_0 and var_1 will be 1.0.
74 loss = 0 - var_0 - var_1
84 grad_and_vars = stale_check_opt.compute_gradients(loss)
86 grad_and_vars = stale_check_opt.compute_gradients(loss)
  /external/tensorflow/tensorflow/contrib/slim/python/slim/
learning.py 19 loss and applies the gradients) and a training loop function. The training loop
33 # Define the loss:
51 (a) computes the loss, (b) applies the gradients to update the weights and
52 (c) returns the value of the loss. slim.learning.create_train_op creates
386 """Creates an `Operation` that evaluates the gradients and returns the loss.
389 total_loss: A `Tensor` representing the total loss.
415 loss value.
462 total loss.
467 The total loss and a boolean indicating whether or not to stop training.
504 logging.info('global step %d: loss = %.4f (%.3f sec/step)'
    [all...]
  /external/valgrind/memcheck/
mc_errors.c 230 const HChar *loss = "?"; local
232 case Unreached: loss = "definitely lost"; break;
233 case IndirectLeak: loss = "indirectly lost"; break;
234 case Possible: loss = "possibly lost"; break;
235 case Reachable: loss = "still reachable"; break;
237 return loss;
242 const HChar *loss = "?"; local
244 case Unreached: loss = "Leak_DefinitelyLost"; break;
245 case IndirectLeak: loss = "Leak_IndirectlyLost"; break;
246 case Possible: loss = "Leak_PossiblyLost"; break
    [all...]
  /external/libnl/lib/idiag/
idiag.c 190 __ADD(TCP_CA_Loss, loss)
  /external/tensorflow/tensorflow/contrib/bayesflow/python/ops/
variational_sgd_optimizer.py 40 Note: If a prior is included in the loss, it should be scaled by
51 minibatch in the data set. Note: Assumes the loss is taken as the mean
  /external/tensorflow/tensorflow/contrib/timeseries/examples/
predict.py 60 # Use the (default) normal likelihood loss to adaptively fit the
63 loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
  /external/tensorflow/tensorflow/core/profiler/g3doc/
advise.md 71 top 1 graph node: seq2seq/loss/sampled_sequence_loss/sequence_loss_by_example/SoftmaxCrossEntropyWithLogits_11, cpu: 89.92ms, accelerator: 0us, total: 89.92ms
73 top 3 graph node: seq2seq/loss/sampled_sequence_loss/sequence_loss_by_example/SoftmaxCrossEntropyWithLogits_19, cpu: 73.02ms, accelerator: 0us, total: 73.02ms

Completed in 1782 milliseconds

1 2 3 4 5 6 7 891011>>