HomeSort by relevance Sort by last modified time
    Searched refs:predictions (Results 1 - 25 of 150) sorted by null

1 2 3 4 5 6

  /external/tensorflow/tensorflow/lite/models/smartreply/
predictor_test.cc 71 std::vector<PredictorResponse> predictions; local
73 GetSegmentPredictions({"Welcome"}, *model_, /*config=*/{{}}, &predictions);
74 EXPECT_GT(predictions.size(), 0);
77 for (const auto &item : predictions) {
85 &predictions,
90 std::vector<PredictorResponse> predictions; local
93 &predictions);
94 EXPECT_GT(predictions.size(), 0);
97 for (const auto &item : predictions) {
104 EXPECT_THAT(&predictions, IncludeAnyResponesIn(std::unordered_set<string>
109 std::vector<PredictorResponse> predictions; local
137 std::vector<PredictorResponse> predictions; local
    [all...]
  /external/tensorflow/tensorflow/python/kernel_tests/
in_topk_op_test.py 31 def _validateInTopK(self, predictions, target, k, expected):
34 precision = nn_ops.in_top_k(predictions, target, k)
40 predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
42 self._validateInTopK(predictions, target, 1, [True, False])
45 predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
47 self._validateInTopK(predictions, target, 2, [False, True])
51 predictions = [[0.1, 0.3, 0.2, 0.2], [0.1, 0.3, 0.2, 0.2]]
53 self._validateInTopK(predictions, target, 2, [True, True])
56 predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
58 self._validateInTopK(predictions, target, 2, [False, True]
    [all...]
metrics_test.py 563 predictions=array_ops.ones((10, 1)),
573 predictions=array_ops.ones((10, 1)),
582 predictions=array_ops.ones((10, 1)),
589 predictions = array_ops.ones((10, 3))
592 metrics.accuracy(labels, predictions)
596 predictions = array_ops.ones((10, 3))
600 metrics.accuracy(labels, predictions, weights)
604 predictions = random_ops.random_uniform(
608 accuracy, update_op = metrics.accuracy(labels, predictions)
625 # Create the queue that populates the predictions
    [all...]
  /external/tensorflow/tensorflow/python/ops/
confusion_matrix.py 34 labels, predictions, expected_rank_diff=0, name=None):
41 But, for example, if `labels` contains class IDs and `predictions` contains 1
42 probability per class, we expect `predictions` to have 1 more dimension than
44 `labels` if `rank(predictions) - rank(labels) == 0`, and
45 `predictions` if `rank(predictions) - rank(labels) == 2`.
51 labels: Label values, a `Tensor` whose dimensions match `predictions`.
52 predictions: Predicted values, a `Tensor` of arbitrary dimensions.
53 expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`.
57 Tuple of `labels` and `predictions`, possibly with last dim squeezed
    [all...]
metrics_impl.py 88 def _remove_squeezable_dimensions(predictions, labels, weights):
91 Squeezes last dim of `predictions` or `labels` if their rank differs by 1
94 new rank of `predictions`.
102 predictions: Predicted values, a `Tensor` of arbitrary dimensions.
103 labels: Optional label `Tensor` whose dimensions match `predictions`.
105 `predictions`.
108 Tuple of `predictions`, `labels` and `weights`. Each of them possibly has
111 predictions = ops.convert_to_tensor(predictions)
113 labels, predictions = confusion_matrix.remove_squeezable_dimensions
    [all...]
  /external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/
model_fn_test.py 47 def create_model_fn_ops(self, predictions, output_alternatives,
52 predictions=predictions,
70 self.assertEqual(model_fn_ops.predictions, estimator_spec.predictions)
82 predictions = self.create_predictions()
84 predictions, None, mode=model_fn.ModeKeys.INFER)
90 predictions = self.create_predictions()
92 constants.ProblemType.LINEAR_REGRESSION, predictions)}
94 predictions, output_alternatives, mode=model_fn.ModeKeys.INFER
    [all...]
logistic_regressor.py 43 `(features, labels, mode) -> (predictions, loss, train_op)`.
44 Expects the returned predictions to be probabilities in [0.0, 1.0].
54 predictions, loss, train_op = model_fn(features, labels, mode)
58 predictions=predictions,
64 predictions=predictions,
70 'predictions': predictions
109 `(features, labels, mode) -> (predictions, loss, train_op)`
    [all...]
model_fn.py 72 'predictions', 'loss', 'train_op', 'eval_metric_ops',
88 predictions=None,
98 For a multi-headed model, the predictions dict here will contain the outputs
117 predictions: Predictions `Tensor` or dict of `Tensor`.
146 get_graph_from_inputs((predictions, loss, train_op))
168 # Validate predictions.
169 if predictions is None:
171 raise ValueError('Missing predictions.')
173 if isinstance(predictions, dict)
    [all...]
  /external/tensorflow/tensorflow/contrib/metrics/python/ops/
confusion_matrix_ops.py 25 def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32,
28 return cm.confusion_matrix(labels=labels, predictions=predictions,
metric_ops_test.py 487 predictions=array_ops.ones((10, 1)),
496 predictions=array_ops.ones((10, 1)),
504 predictions=array_ops.ones((10, 1)),
510 predictions = array_ops.ones((10, 3))
513 metrics.streaming_accuracy(predictions, labels)
516 predictions = array_ops.ones((10, 3))
520 metrics.streaming_accuracy(predictions, labels, weights)
523 predictions = random_ops.random_uniform(
527 accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
543 # Create the queue that populates the predictions
    [all...]
metric_ops.py 49 'order of the labels and predictions arguments has been switched.')
50 def streaming_true_positives(predictions,
61 predictions: The predicted values, a `Tensor` of arbitrary dimensions. Will
64 `predictions`. Will be cast to `bool`.
80 ValueError: If `predictions` and `labels` have mismatched shapes, or if
81 `weights` is not `None` and its shape doesn't match `predictions`, or if
86 predictions=predictions,
95 'order of the labels and predictions arguments has been switched.')
96 def streaming_true_negatives(predictions,
    [all...]
  /external/tensorflow/tensorflow/contrib/learn/python/learn/
metric_spec_test.py 35 def _fn0(predictions, labels, weights=None):
36 self.assertEqual("p1_value", predictions)
41 def _fn1(predictions, targets, weights=None):
42 self.assertEqual("p1_value", predictions)
150 def _fn(predictions):
151 self.assertEqual(predictions_, predictions)
191 def _fn0(predictions, labels):
192 self.assertEqual("p1_value", predictions)
196 def _fn1(predictions, targets):
197 self.assertEqual("p1_value", predictions)
    [all...]
metric_spec.py 37 '`labels`, `predictions`, and optionally `weights`.')
59 _CANONICAL_PREDICTIONS_ARG = 'predictions'
116 This returns a function that takes only named args `labels`, `predictions`,
119 passed (usually by name, but positionally if both it and `predictions` need
122 passed by name. Otherwise, `predictions` are passed positionally as the
134 Function accepting only named args `labels, `predictions`, and `weights`,
160 # Both labels and predictions are named args.
162 _sentinel=None, labels=None, predictions=None, weights=None):
166 predictions_arg: predictions,
174 # labels is a named arg, and first. predictions is not a named arg, so w
    [all...]
  /external/tensorflow/tensorflow/contrib/learn/python/learn/utils/
export.py 95 def generic_signature_fn(examples, unused_features, predictions):
96 """Creates generic signature from given examples and predictions.
104 predictions: `Tensor` or `dict` of `Tensor`s.
116 if not isinstance(predictions, dict):
117 predictions = {'outputs': predictions}
118 tensors.update(predictions)
127 def classification_signature_fn(examples, unused_features, predictions):
128 """Creates classification signature from given examples and predictions.
133 predictions: `Tensor` or dict of tensors that contains the classes tenso
    [all...]
  /external/tensorflow/tensorflow/python/ops/losses/
losses_impl.py 213 labels, predictions, weights=1.0, scope=None,
222 `weights` matches the shape of `predictions`, then the loss of each
223 measurable element of `predictions` is scaled by the corresponding value of
227 labels: The ground truth output tensor, same dimensions as 'predictions'.
228 predictions: The predicted outputs.
241 ValueError: If the shape of `predictions` doesn't match that of
243 or `predictions` is None.
252 if predictions is None:
253 raise ValueError("predictions must not be None.")
255 (predictions, labels, weights)) as scope
    [all...]
  /external/tensorflow/tensorflow/contrib/learn/python/learn/ops/
losses_ops.py 41 predictions = nn.xw_plus_b(tensor_in, weights, biases)
42 if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
43 predictions = array_ops_.squeeze(predictions, axis=[1])
44 return predictions, losses.mean_squared_error(labels, predictions)
58 predictions, use `tf.argmax` on the returned probabilities.
74 `tuple` of softmax predictions and loss `Tensor`s.
  /external/tensorflow/tensorflow/contrib/tensor_forest/client/
eval_metrics.py 46 def _accuracy(predictions, targets, weights=None):
48 labels=targets, predictions=predictions, weights=weights)
83 def _predictions(predictions, unused_targets, **unused_kwargs):
84 return predictions
95 def _precision(predictions, targets, weights=None):
97 labels=targets, predictions=predictions, weights=weights)
100 def _precision_at_thresholds(predictions, targets, weights=None):
103 predictions=array_ops.slice(predictions, [0, 1], [-1, 1])
    [all...]
  /external/tensorflow/tensorflow/contrib/losses/python/losses/
loss_ops.py 240 def absolute_difference(predictions, labels=None, weights=1.0, scope=None):
247 `weights` matches the shape of `predictions`, then the loss of each
248 measurable element of `predictions` is scaled by the corresponding value of
252 predictions: The predicted outputs.
253 labels: The ground truth output tensor, same dimensions as 'predictions'.
255 [batch_size] or a tensor whose shape matches `predictions`.
262 ValueError: If the shape of `predictions` doesn't match that of `labels` or
266 [predictions, labels, weights]) as scope:
267 predictions.get_shape().assert_is_compatible_with(labels.get_shape())
268 predictions = math_ops.cast(predictions, dtypes.float32
    [all...]
  /external/tensorflow/tensorflow/contrib/metrics/python/metrics/
classification.py 32 def accuracy(predictions, labels, weights=None, name=None):
33 """Computes the percentage of times that predictions matches labels.
36 predictions: the predicted values, a `Tensor` whose dtype and shape
55 if not labels.dtype.is_compatible_with(predictions.dtype):
56 raise ValueError('Dtypes of predictions and labels should match. '
57 'Given: predictions (%r) and labels (%r)' %
58 (predictions.dtype, labels.dtype))
59 with ops.name_scope(name, 'accuracy', values=[predictions, labels]):
61 math_ops.equal(predictions, labels), dtypes.float32)
70 def f1_score(labels, predictions, weights=None, num_thresholds=200
    [all...]
classification_test.py 126 predictions=array_ops.ones((10, 1)),
142 predictions=array_ops.ones((10, 1)),
151 predictions=array_ops.ones((10, 1)),
158 predictions = random_ops.random_uniform(
162 f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
180 predictions = constant_op.constant(inputs, dtype=dtypes.float32)
182 f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=3)
190 predictions = constant_op.constant(
193 f1, f1_op = classification.f1_score(predictions, labels, num_thresholds=1)
205 predictions = constant_op.constant(inputs, dtype=dtypes.float32
    [all...]
  /external/tensorflow/tensorflow/contrib/distribute/python/
metrics_v1_test.py 33 # First four batches of x: labels, predictions -> (labels == predictions)
39 lambda x: {"labels": x % 5, "predictions": x % 3}).batch(
44 # First four batches of labels, predictions: {TP, FP, TN, FN}
52 "predictions": [True, True, False, False]}).repeat().batch(
57 # First four batches of labels, predictions: {TP, FP, TN, FN}
65 "predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch(
72 "predictions": [1., .75, .25, 0.]}).repeat()
153 predictions = x["predictions"]
    [all...]
  /external/tensorflow/tensorflow/contrib/eager/python/
metrics_impl.py 366 """Calculates how often `predictions` matches `labels`.
376 def call(self, labels, predictions, weights=None):
379 For example, if labels is [1, 2, 3, 4] and predictions is [0, 2, 3, 4]
383 `labels` and `predictions` should have the same shape and type.
388 predictions: Tensor with the predicted label for each example.
395 array_ops.shape(labels), array_ops.shape(predictions),
396 message="Shapes of labels and predictions are unequal")
397 matches = math_ops.equal(labels, predictions)
401 return labels, predictions
402 return labels, predictions, weight
    [all...]
  /external/tensorflow/tensorflow/contrib/linear_optimizer/python/kernel_tests/
sdca_ops_test.py 208 def get_binary_predictions_for_logistic(predictions, cutoff=0.5):
210 math_ops.greater_equal(predictions,
211 array_ops.ones_like(predictions) * cutoff),
215 def get_binary_predictions_for_hinge(predictions):
217 math_ops.greater_equal(predictions, array_ops.zeros_like(predictions)),
262 predictions = lr.predictions(examples)
278 predicted_labels = get_binary_predictions_for_logistic(predictions)
310 predictions = lr.predictions(examples
    [all...]
  /external/tensorflow/tensorflow/contrib/boosted_trees/python/utils/
losses.py 30 def per_example_squared_hinge_loss(labels, weights, predictions):
31 loss = losses.hinge_loss(labels=labels, logits=predictions, weights=weights)
35 def per_example_logistic_loss(labels, weights, predictions):
36 """Logistic loss given labels, example weights and predictions.
41 predictions: Rank 2 (N, 1) tensor of per-example predictions.
49 labels=labels, logits=predictions)
60 def per_example_quantile_regression_loss(labels, weights, predictions,
71 predictions: Rank 2 (N, D) tensor of per-example predictions
    [all...]
  /external/tensorflow/tensorflow/contrib/distribute/python/examples/
simple_estimator_example.py 42 predictions = {"logits": logits}
43 return tf.estimator.EstimatorSpec(mode, predictions=predictions)
97 predictions = [prediction_iterable.next() for _ in range(10)]
98 print("Prediction results: {}".format(predictions))

Completed in 1734 milliseconds

1 2 3 4 5 6