/frameworks/ml/nn/common/ |
OperationsUtils.cpp | 342 const Shape& weights, 347 NN_OPS_CHECK(input.type == weights.type); 356 NN_OPS_CHECK(getNumberOfDimensions(weights) == 2); 358 uint32_t num_units = getSizeOfDimension(weights, 0); 359 uint32_t input_size = getSizeOfDimension(weights, 1); [all...] |
/development/cmds/monkey/src/com/android/commands/monkey/ |
MonkeySourceRandom.java | 168 Logger.err.println("** Event weights > 100%"); 172 // if the user specified all of the weights, then they need to be 100% 174 Logger.err.println("** Event weights != 100%");
|
/external/deqp/framework/randomshaders/ |
rsgProgramExecutor.cpp | 327 tcu::Vec2 weights = computeGridCellWeights(cellWidth, cellHeight, x, y); local 334 weights.x(), weights.y());
|
/external/llvm/unittests/IR/ |
IRBuilderTest.cpp | 95 MDNode *Weights = MDBuilder(Ctx).createBranchWeights(42, 13); 96 BI = Builder.CreateCondBr(Builder.getTrue(), TBB, FBB, Weights); 102 EXPECT_EQ(Weights, TI->getMetadata(LLVMContext::MD_prof));
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
convolution_variants_test.cc | 1154 auto weights = builder.ConstantR4FromArray4D<float>( local 1173 auto weights = builder.ConstantR4FromArray4D<float>( local 1194 auto weights = builder.ConstantR4FromArray4D<float>( local 1215 auto weights = builder.ConstantR4FromArray4D<float>( local 1307 auto weights = local 1346 auto weights = builder.ConstantLiteral(*weights_literal); local [all...] |
while_test.cc | 379 // Add 1 to the iteration variable and permute the weights. 432 // Add 1 to the iteration variable permute the weights. 494 auto weights = builder.GetTupleElement(prev, 1); local 496 auto new_weights = builder.Add(weights, input); 662 auto weights = builder.GetTupleElement(prev, 1); local 664 auto new_weights = builder.Add(weights, input); 675 auto weights = builder.GetTupleElement(prev, 1); local 677 auto new_weights = builder.Add(weights, input); 741 auto weights = builder.GetTupleElement(prev, 1); local 743 auto new_weights = builder.Add(weights, input) 808 auto weights = builder.GetTupleElement(prev, 1); local [all...] |
/external/tensorflow/tensorflow/contrib/boosted_trees/kernels/ |
prediction_ops.cc | 249 const std::vector<float> weights = ensemble_resource->GetTreeWeights(); local 252 weights, &dropped_trees, &original_weights)); 298 // Output dropped trees and original weights.
|
/external/tensorflow/tensorflow/contrib/lite/kernels/ |
conv.cc | 209 // convolution, it expects the filter weights to be transposed compared to 210 // the normal TF Lite buffer format. Typical TF Lite weights are 255 // Because we're treating the filter weights as a matrix when we do the
|
lstm.cc | 51 // Peephole weights tensors of size {n_cell}, representing a diagonal matrix. 160 // Making sure the peephole weights are there all or none. 358 // Since we have already checked that weights are all there or none, we can
|
unidirectional_sequence_lstm.cc | 51 // Peephole weights tensors of size {n_cell}, representing a diagonal matrix. 160 // Making sure the peephole weights are there all or none. 361 // Since we have already checked that weights are all there or none, we can
|
/external/tensorflow/tensorflow/contrib/model_pruning/python/layers/ |
rnn_cells.py | 77 share weights, but to avoid mistakes we require reuse=True in such 111 # Add masked_weights in the weights namescope so as to make it easier 260 # Add masked_weights in the weights namescope so as to make it easier
|
/external/tensorflow/tensorflow/contrib/rnn/ops/ |
lstm_ops.cc | 87 use_peephole: Whether to use peephole weights. 240 use_peephole: Whether to use peephole weights. 319 use_peephole: Whether to use peephole weights.
|
/external/tensorflow/tensorflow/contrib/rnn/python/ops/ |
core_rnn_cell.py | 171 weights = vs.get_variable( 176 res = math_ops.matmul(args[0], weights) 178 res = math_ops.matmul(array_ops.concat(args, 1), weights)
|
/external/tensorflow/tensorflow/core/kernels/ |
resize_bilinear_op.cc | 92 * and the linear interpolation weights. 224 // Compute the cached interpolation weights on the x and y dimensions. 229 // Scale x interpolation weights to avoid a multiplication during iteration.
|
/external/tensorflow/tensorflow/python/keras/_impl/keras/layers/ |
lstm_test.py | 114 units, return_sequences=False, stateful=True, weights=None) 167 weights=None, 189 weights=None,
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
sparse_xent_op_test.py | 206 weights = variables.Variable(random_ops.truncated_normal([2], stddev=1.0)) 207 weights_with_zeros = array_ops.stack([array_ops.zeros([2]), weights], 218 _ = gradients_impl.hessians(loss, [weights])
|
/external/tensorflow/tensorflow/tools/graph_transforms/ |
README.md | 51 quantizing the weights, or optimize away batch normalization or other 71 quantize weights, you'll need to run 136 normalization by pre-multiplying the weights for convolutions. 196 largest contributors to the file size are the weights passed in to convolutional 199 it's possible to change the representation of those weights in a lossy way 204 provide raw data that compresses more easily. By default the weights are stored 207 the weights so that nearby numbers are stored as exactly the same values, the 236 As a further step, you can store the weights into eight-bit values directly. 260 So far we've been concentrating on weights because those generally take up the 380 multiplies the convolution's (or matrix multiplication's) weights with the Mu [all...] |
/external/freetype/include/freetype/internal/ |
ftobjs.h | 325 FT_Byte* weights ); 332 FT_LcdFiveTapFilter weights ); 395 /* If subpixel rendering is activated, the LCD filtering weights */ 420 FT_LcdFiveTapFilter lcd_weights; /* filter weights, if any */ [all...] |
/external/icu/icu4c/source/i18n/ |
collationiterator.h | 234 * The code point is used for fallbacks, context and implicit weights. 255 * map to their own implicit primary weights (for UTF-16),
|
/external/swiftshader/third_party/LLVM/include/llvm/Analysis/ |
PathNumbering.h | 182 // increments along a spanning tree. The sum over the edge weights gives 209 // edge weights that determine the path number.
|
/external/swiftshader/third_party/LLVM/lib/Transforms/Instrumentation/ |
OptimalEdgeProfiling.cpp | 132 // Calculate a Maximum Spanning Tree with the edge weights determined by 133 // ProfileEstimator. ProfileEstimator also assign weights to the virtual
|
/external/tensorflow/tensorflow/compiler/tests/ |
ftrl_test.py | 228 The addition of this parameter which places a constant pressure on weights 230 weights will tend to have smaller magnitudes with this parameter set.
|
/external/tensorflow/tensorflow/contrib/gan/python/estimator/python/ |
gan_estimator_test.py | 67 '%s/fully_connected/weights:0' % generator_scope_name, 70 '%s/fully_connected/weights:0' % discriminator_scope_name,
|
/external/tensorflow/tensorflow/contrib/kernel_methods/python/ |
kernel_estimators_test.py | 182 # dimension 2 so the model will learn a 2-dimension weights vector (and a 184 # a 30-dimensional feature space and so the weights variable will also have
|
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
debug.py | 173 weights. It is used to down weight or boost examples during training. It 287 weights. It is used to down weight or boost examples during training. It
|