/external/tensorflow/tensorflow/contrib/nn/python/ops/ |
cross_entropy.py | 29 labels, 32 """Computes softmax cross entropy between `logits` and `labels`. 43 need not be. All that is required is that each row of `labels` is 47 If using exclusive `labels` (wherein one and only 54 `logits` and `labels` must have the same shape `[batch_size, num_classes]` 59 labels: Each row `labels[i]` must be a valid probability distribution. 68 labels=labels, logits=logits, dim=dim, name=name) 76 labels, [all...] |
/external/toolchain-utils/crosperf/ |
results_organizer_unittest.py | 8 We create some labels, benchmark_runs and then create a ResultsOrganizer, 137 labels = [mock_instance.label1, mock_instance.label2] 140 benchmark_runs[0] = BenchmarkRun('b1', benchmarks[0], labels[0], 1, '', '', 142 benchmark_runs[1] = BenchmarkRun('b2', benchmarks[0], labels[0], 2, '', '', 144 benchmark_runs[2] = BenchmarkRun('b3', benchmarks[0], labels[1], 1, '', '', 146 benchmark_runs[3] = BenchmarkRun('b4', benchmarks[0], labels[1], 2, '', '', 148 benchmark_runs[4] = BenchmarkRun('b5', benchmarks[1], labels[0], 1, '', '', 150 benchmark_runs[5] = BenchmarkRun('b6', benchmarks[1], labels[0], 2, '', '', 152 benchmark_runs[6] = BenchmarkRun('b7', benchmarks[1], labels[1], 1, '', '', 154 benchmark_runs[7] = BenchmarkRun('b8', benchmarks[1], labels[1], 2, '', '' [all...] |
/external/autotest/utils/ |
labellib_unittest.py | 44 labels = ['webcam', 'pool:suites'] 45 mapping = labellib.LabelsMapping(labels) 46 self.assertEqual(mapping.getlabels(), labels) 49 labels = ['webcam', 'pool:suites', 'pool:party'] 50 mapping = labellib.LabelsMapping(labels) 54 labels = ['ohse:tsubame', 'webcam'] 55 mapping = labellib.LabelsMapping(labels) 59 labels = ['webcam', 'exec', 'method'] 60 mapping = labellib.LabelsMapping(labels) 64 labels = ['class:protecta', 'method:metafalica', 'exec:chronicle_key' [all...] |
/external/autotest/contrib/ |
print_host_labels.py | 16 labels = host.get_labels() variable 17 print 'Labels:' 18 print labels
|
/external/tensorflow/tensorflow/contrib/boosted_trees/python/utils/ |
losses.py | 30 def per_example_squared_hinge_loss(labels, weights, predictions): 31 loss = losses.hinge_loss(labels=labels, logits=predictions, weights=weights) 35 def per_example_logistic_loss(labels, weights, predictions): 36 """Logistic loss given labels, example weights and predictions. 39 labels: Rank 2 (N, 1) tensor of per-example labels. 47 labels = math_ops.cast(labels, dtypes.float32) 49 labels=labels, logits=predictions [all...] |
/external/tensorflow/tensorflow/contrib/libsvm/python/kernel_tests/ |
decode_libsvm_op_test.py | 37 sparse_features, labels = libsvm_ops.decode_libsvm( 42 self.assertAllEqual(labels.get_shape().as_list(), [3]) 44 features, labels = sess.run([features, labels]) 45 self.assertAllEqual(labels, [1, 1, 2]) 55 sparse_features, labels = libsvm_ops.decode_libsvm( 60 self.assertAllEqual(labels.get_shape().as_list(), [3, 2]) 62 features, labels = sess.run([features, labels]) 63 self.assertAllEqual(labels, [[1, 1], [1, 1], [2, 2]] [all...] |
/external/tensorflow/tensorflow/contrib/kernel_methods/python/ |
losses_test.py | 37 labels = constant_op.constant([0, 1]) 39 _ = losses.sparse_multiclass_hinge_loss(labels, logits) 42 """An error is raised when labels have invalid shape.""" 45 labels = constant_op.constant([1, 0], shape=(1, 1, 2)) 47 _ = losses.sparse_multiclass_hinge_loss(labels, logits) 53 labels = constant_op.constant([1, 0], shape=(2,)) 56 _ = losses.sparse_multiclass_hinge_loss(labels, logits, weights) 59 """An error is raised when labels have invalid shape.""" 62 labels = constant_op.constant([1, 0], dtype=dtypes.float32) 64 _ = losses.sparse_multiclass_hinge_loss(labels, logits [all...] |
/external/autotest/server/hosts/ |
afe_store.py | 54 return host_info.HostInfo(host.labels, host.attributes) 64 # copy of HostInfo from the AFE and then add/remove labels / attribtes 66 # parallel, we'll end up with corrupted labels / attributes. 69 list(set(old_info.labels) - set(new_info.labels))) 71 list(set(new_info.labels) - set(old_info.labels))) 75 def _remove_labels_on_afe(self, labels): 76 """Requests the AFE to remove the given labels. 78 @param labels: Remove these [all...] |
/external/tensorflow/tensorflow/python/ops/ |
confusion_matrix.py | 34 labels, predictions, expected_rank_diff=0, name=None): 41 But, for example, if `labels` contains class IDs and `predictions` contains 1 43 `labels`, so `expected_rank_diff` would be 1. In this case, we'd squeeze 44 `labels` if `rank(predictions) - rank(labels) == 0`, and 45 `predictions` if `rank(predictions) - rank(labels) == 2`. 51 labels: Label values, a `Tensor` whose dimensions match `predictions`. 53 expected_rank_diff: Expected result of `rank(predictions) - rank(labels)`. 57 Tuple of `labels` and `predictions`, possibly with last dim squeezed. 60 [labels, predictions]) [all...] |
metrics_impl.py | 88 def _remove_squeezable_dimensions(predictions, labels, weights): 91 Squeezes last dim of `predictions` or `labels` if their rank differs by 1 103 labels: Optional label `Tensor` whose dimensions match `predictions`. 108 Tuple of `predictions`, `labels` and `weights`. Each of them possibly has 112 if labels is not None: 113 labels, predictions = confusion_matrix.remove_squeezable_dimensions( 114 labels, predictions) 115 predictions.get_shape().assert_is_compatible_with(labels.get_shape()) 118 return predictions, labels, None 124 return predictions, labels, weight [all...] |
/external/python/cpython3/Lib/encodings/ |
idna.py | 162 labels = result.split(b'.') 163 for label in labels[:-1]: 166 if len(labels[-1]) >= 64: 171 labels = dots.split(input) 172 if labels and not labels[-1]: 174 del labels[-1] 177 for label in labels: 204 labels = input.split(b".") 206 if labels and len(labels[-1]) == 0 [all...] |
/external/tensorflow/tensorflow/contrib/sparsemax/python/ops/ |
sparsemax_loss.py | 28 def sparsemax_loss(logits, sparsemax, labels, name=None): 37 labels: A `Tensor`. Must have the same type as `logits`. 45 [logits, sparsemax, labels]) as name: 48 labels = ops.convert_to_tensor(labels, name="labels") 65 q_part = labels * (0.5 * labels - z) 66 # Fix the case where labels = 0 and z = -inf, where q_part would 73 math_ops.logical_and(math_ops.equal(labels, 0), math_ops.is_inf(z)) [all...] |
/external/tensorflow/tensorflow/contrib/losses/python/losses/ |
loss_ops.py | 240 def absolute_difference(predictions, labels=None, weights=1.0, scope=None): 253 labels: The ground truth output tensor, same dimensions as 'predictions'. 262 ValueError: If the shape of `predictions` doesn't match that of `labels` or 266 [predictions, labels, weights]) as scope: 267 predictions.get_shape().assert_is_compatible_with(labels.get_shape()) 269 labels = math_ops.cast(labels, dtypes.float32) 270 losses = math_ops.abs(math_ops.subtract(predictions, labels)) 276 "of the predictions and labels arguments has been changed.") 289 If `label_smoothing` is nonzero, smooth the labels towards 1/2 [all...] |
/external/tensorflow/tensorflow/python/ops/losses/ |
losses_impl.py | 213 labels, predictions, weights=1.0, scope=None, 227 labels: The ground truth output tensor, same dimensions as 'predictions'. 230 `labels`, and must be broadcastable to `labels` (i.e., all dimensions must 238 shape as `labels`; otherwise, it is scalar. 242 `labels` or if the shape of `weights` is invalid or if `labels` 250 if labels is None: 251 raise ValueError("labels must not be None.") 255 (predictions, labels, weights)) as scope [all...] |
/external/tensorflow/tensorflow/contrib/metrics/python/ops/ |
confusion_matrix_ops.py | 25 def confusion_matrix(labels, predictions, num_classes=None, dtype=dtypes.int32, 28 return cm.confusion_matrix(labels=labels, predictions=predictions,
|
/device/linaro/bootloader/edk2/AppPkg/Applications/Python/Python-2.7.10/Lib/encodings/ |
idna.py | 157 labels = dots.split(input)
158 if labels and len(labels[-1])==0:
160 del labels[-1]
163 for label in labels:
178 labels = dots.split(input)
183 labels = input.split(".")
185 if labels and len(labels[-1]) == 0:
187 del labels[-1] [all...] |
/device/linaro/bootloader/edk2/AppPkg/Applications/Python/Python-2.7.2/Lib/encodings/ |
idna.py | 157 labels = dots.split(input)
158 if labels and len(labels[-1])==0:
160 del labels[-1]
163 for label in labels:
178 labels = dots.split(input)
183 labels = input.split(".")
185 if labels and len(labels[-1]) == 0:
187 del labels[-1] [all...] |
/external/grpc-grpc/tools/run_tests/sanity/ |
check_test_filtering.py | 40 def test_filtering(self, changed_files=[], labels=_LIST_OF_LANGUAGE_LABELS): 43 default labels should be able to match all jobs 45 :param labels: list of job labels that should be skipped 61 if "sanity" in job.labels: 63 all_jobs = [job for job in all_jobs if "sanity" not in job.labels] 65 if "sanity" in job.labels: 68 job for job in filtered_jobs if "sanity" not in job.labels 73 for label in labels: 75 self.assertNotIn(label, job.labels) [all...] |
/external/python/cpython2/Lib/encodings/ |
idna.py | 157 labels = dots.split(input) 158 if labels and len(labels[-1])==0: 160 del labels[-1] 163 for label in labels: 178 labels = dots.split(input) 183 labels = input.split(".") 185 if labels and len(labels[-1]) == 0: 187 del labels[-1 [all...] |
/external/tensorflow/tensorflow/contrib/metrics/python/metrics/ |
classification_test.py | 39 labels = array_ops.placeholder(dtypes.int32, shape=[None]) 40 acc = classification.accuracy(pred, labels) 43 labels: [1, 1, 0, 0]}) 49 labels = array_ops.placeholder(dtypes.bool, shape=[None]) 50 acc = classification.accuracy(pred, labels) 53 labels: [1, 1, 0, 0]}) 59 labels = array_ops.placeholder(dtypes.int64, shape=[None]) 60 acc = classification.accuracy(pred, labels) 63 labels: [1, 1, 0, 0]}) 69 labels = array_ops.placeholder(dtypes.string, shape=[None] [all...] |
/external/autotest/frontend/client/src/autotest/common/table/ |
MultipleListFilter.java | 22 JSONArray labels = new JSONArray(); local 26 labels.set(labels.size(), 30 return labels;
|
/external/tensorflow/tensorflow/compiler/tests/ |
dense_layer_test.py | 35 """Returns all labels in run_metadata.""" 36 labels = [] 39 labels.append(node_stats.timeline_label) 40 return labels 43 def InLabels(labels, substr): 44 """Returns true iff one of the labels contains substr.""" 45 return any(substr in x for x in labels) 50 def countXlaOps(self, labels): 51 """Count how many XlaCompile/XlaRun labels are present.""" 52 xla_compile_count = sum("XlaCompile(" in x for x in labels) [all...] |
/external/tensorflow/tensorflow/core/kernels/ |
sparse_xent_op.cc | 33 Status CheckInvalidLabelIndex(const Tensor& labels, int64 max_index) { 34 if (labels.NumElements() == 0) return Status::OK(); 35 const auto label_values = labels.vec<Index>(); 45 "). Label values: ", labels.SummarizeValue(labels.NumElements())); 58 const Tensor& labels = context->input(1); variable 62 OP_REQUIRES(context, TensorShapeUtils::IsVector(labels.shape()), 63 errors::InvalidArgument("labels must be 1-D, but got shape ", 64 labels.shape().DebugString())); 65 OP_REQUIRES(context, logits.dim_size(0) == labels.dim_size(0) [all...] |
/external/tensorflow/tensorflow/contrib/learn/python/learn/learn_io/ |
dask_io.py | 93 def extract_dask_labels(labels): 94 """Extract data from dask.Series or dask.DataFrame for labels. 101 labels: A distributed dask.DataFrame or dask.Series with exactly one 114 if isinstance(labels, dd.DataFrame): 115 ncol = labels.columns 116 elif isinstance(labels, dd.Series): 117 ncol = labels.name 118 if isinstance(labels, allowed_classes): 120 raise ValueError('Only one column for labels is allowed.') 121 return _construct_dask_df_with_divisions(labels) [all...] |
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
target_column_test.py | 36 labels = constant_op.constant([[0.], [1.], [1.]]) 38 5. / 3, sess.run(target_column.loss(prediction, labels, {}))) 46 labels = constant_op.constant([[0.], [1.], [1.]]) 49 sess.run(target_column.loss(prediction, labels, features)), 53 sess.run(target_column.training_loss(prediction, labels, features)), 63 labels = constant_op.constant([[1.], [0.]]) 68 sess.run(target_column.loss(logits, labels, {})), 77 labels = constant_op.constant([[1.], [0.]]) 82 sess.run(target_column.loss(logits, labels, features)), 89 labels = constant_op.constant([[1.], [0.], [1.]] [all...] |