/external/tensorflow/tensorflow/contrib/model_pruning/python/ |
pruning.py | 18 # elementwise masking of weights 19 apply_mask(weights) 91 """Create a mask for the weights. 113 """Create a scalar threshold for the weights. 256 # Add masked_weights in the weights namescope so as to make it easier 288 """Get sparsity of the weights. 471 def _update_mask(self, weights, threshold): 475 the threshold value such that 'desired_sparsity' fraction of weights 479 weights: The weight tensor that needs to be masked. 485 new_threshold: The new value of the threshold based on weights, an [all...] |
/prebuilts/misc/darwin-x86_64/freetype/include/freetype2/ |
ftlcdfil.h | 73 * weights (as given by FT_LCD_FILTER_DEFAULT) are no longer optimal, as 75 * gamma correction. To preserve color neutrality, weights for a FIR5 77 * and the FIR weights should be 83 * This formula generates equal weights for all the color primaries 85 * set of weights is 91 * where `a' has value 0x30 and `b' value 0x20. The weights in filter 209 * Use this function to override the filter weights selected by 219 * weights :: 221 * uses them to specify the filter weights. 241 unsigned char *weights ); [all...] |
/external/tensorflow/tensorflow/contrib/learn/python/learn/estimators/ |
head.py | 192 weights. It is used to down weight or boost examples during training. It 230 weights. It is used to down weight or boost examples during training. It 280 weights. It is used to down weight or boost examples during training. It 292 loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as 293 parameter and returns a weighted scalar loss. `weights` should be 352 weights. It is used to down weight or boost examples during training. It 395 weights. It is used to down weight or boost examples during training. It 406 loss_fn: Optional function that takes (`labels`, `logits`, `weights`) as 407 parameter and returns a weighted scalar loss. `weights` should be 455 loss_weights: optional list of weights to be used to merge losses fro [all...] |
/external/libopus/src/ |
mlp_train.c | 58 net->weights = malloc((nbLayers-1)*sizeof(net->weights)); 59 net->best_weights = malloc((nbLayers-1)*sizeof(net->weights)); 62 net->weights[i] = malloc((topo[i]+1)*topo[i+1]*sizeof(net->weights[0][0])); 63 net->best_weights[i] = malloc((topo[i]+1)*topo[i+1]*sizeof(net->weights[0][0])); 83 net->weights[0][k*(topo[0]+1)+j+1] = randn(std); 90 sum += inMean[k]*net->weights[0][j*(topo[0]+1)+k+1]; 91 net->weights[0][j*(topo[0]+1)] = -sum; 101 net->weights[nbLayers-2][j*(topo[nbLayers-2]+1)] = mean [all...] |
/external/swiftshader/third_party/LLVM/lib/Analysis/ |
BranchProbabilityInfo.cpp | 41 DenseMap<Edge, uint32_t> *Weights; 48 // Weights are for internal use only. They are used by heuristics to help to 51 // Using "Loop Branch Heuristics" we predict weights of edges for the 122 : Weights(W), BP(BP), LI(LI) { 125 // Metadata Weights 157 // Ensure there are weights for all of the successors. Note that the first 162 // Build up the final weights that will be used in a temporary buffer, but 166 SmallVector<uint32_t, 2> Weights; 167 Weights.reserve(TI->getNumSuccessors()); 172 Weights.push_back [all...] |
/external/tensorflow/tensorflow/python/keras/_impl/keras/applications/ |
inception_resnet_v2.py | 198 weights='imagenet', 205 Optionally loads weights pre-trained on ImageNet. 210 The model and the weights are compatible with TensorFlow, Theano and 222 weights: one of `None` (random initialization), 224 or the path to the weights file to be loaded. 245 if no `weights` argument is specified. 251 ValueError: in case of invalid argument for `weights`, 254 if not (weights in {'imagenet', None} or os.path.exists(weights)): 255 raise ValueError('The `weights` argument should be either [all...] |
inception_v3.py | 108 weights='imagenet', 115 Optionally loads weights pre-trained 120 The model and the weights are compatible with both 129 weights: one of `None` (random initialization), 131 or the path to the weights file to be loaded. 154 if no `weights` argument is specified. 160 ValueError: in case of invalid argument for `weights`, 163 if not (weights in {'imagenet', None} or os.path.exists(weights)): 164 raise ValueError('The `weights` argument should be either [all...] |
resnet50.py | 153 weights='imagenet', 160 Optionally loads weights pre-trained 166 The model and the weights are compatible with both 174 weights: one of `None` (random initialization), 176 or the path to the weights file to be loaded. 199 if no `weights` argument is specified. 205 ValueError: in case of invalid argument for `weights`, 208 if not (weights in {'imagenet', None} or os.path.exists(weights)): 209 raise ValueError('The `weights` argument should be either [all...] |
xception.py | 70 weights='imagenet', 77 Optionally loads weights pre-trained 89 weights: one of `None` (random initialization), 91 or the path to the weights file to be loaded. 113 if no `weights` argument is specified. 119 ValueError: in case of invalid argument for `weights`, 124 if not (weights in {'imagenet', None} or os.path.exists(weights)): 125 raise ValueError('The `weights` argument should be either ' 128 'or the path to the weights file to be loaded.' [all...] |
nasnet.py | 27 Only the NASNet-A models, and their respective weights, which are suited 86 weights=None, 126 weights: `None` (random initialization) or 127 `imagenet` (ImageNet weights) 145 if no `weights` argument is specified. 152 ValueError: In case of invalid argument for `weights`, 162 if not (weights in {'imagenet', None} or os.path.exists(weights)): 163 raise ValueError('The `weights` argument should be either ' 166 'or the path to the weights file to be loaded.' [all...] |
/external/tensorflow/tensorflow/contrib/kfac/python/kernel_tests/ |
estimator_test.py | 75 self.weights = variable_scope.get_variable( 79 self.output = math_ops.matmul(self.inputs, self.weights) + self.bias 81 # Only register the weights. 83 params=(self.weights,), inputs=self.inputs, outputs=self.output) 93 estimator.FisherEstimator([self.weights], 0.1, 0.2, self.layer_collection) 98 estimator.FisherEstimator([self.weights, self.bias], 0.1, 0.2, 102 # i.e. self.weights 109 estimator.FisherEstimator([self.weights], 0.1, 0.2, self.layer_collection) 113 estimator.FisherEstimator([self.weights], 0.1, 0.2, self.layer_collection, 118 est = estimator.FisherEstimator([self.weights], 0.1, 0.2 [all...] |
/external/llvm/lib/Analysis/ |
BlockFrequencyInfoImpl.cpp | 64 /// 1. Initialize by saving the sum of the weights in \a RemWeight and the 117 Weights.push_back(Weight(Type, Node, Amount)); 136 static void combineWeightsBySorting(WeightList &Weights) { 138 std::sort(Weights.begin(), Weights.end(), 143 WeightList::iterator O = Weights.begin(); 144 for (WeightList::const_iterator I = O, L = O, E = Weights.end(); I != E; 148 // Find the adjacent weights to the same node. 154 Weights.erase(O, Weights.end()) [all...] |
/external/opencv/ml/src/ |
mlann_mlp.cpp | 97 weights = 0; 110 weights = 0; 127 cvFree( &weights ); 185 double* w = weights[i]; 187 // initialize weights using Nguyen-Widrow algorithm 254 CV_CALL( weights = (double**)cvAlloc( (l_count+1)*sizeof(weights[0]) )); 256 weights[0] = wbuf->data.db; 257 weights[1] = weights[0] + l_dst[0]*2 [all...] |
mlboost.cpp | 185 const double* weights = ensemble->get_subtree_weights()->data.db; local 200 double w = weights[i]; 221 double w = weights[idx]; 229 double w = weights[idx]; 249 const double* weights = ensemble->get_subtree_weights()->data.db; local 252 const double* rcw0 = weights + n; 263 double w = weights[idx]; 278 double w = weights[idx], w2 = w*w; 303 double w = weights[idx]; 341 const double* weights = ensemble->get_subtree_weights()->data.db local 445 const double* weights = ensemble->get_subtree_weights()->data.db; local 493 const double* weights = ensemble->get_subtree_weights()->data.db; local 575 const double* weights = ensemble->get_subtree_weights()->data.db; local 641 const double* weights = ensemble->get_subtree_weights()->data.db; local 702 const double* weights = ensemble->get_weights()->data.db; local [all...] |
/external/tensorflow/tensorflow/contrib/learn/python/learn/ |
models.py | 34 """Linear regression subgraph with zero-value initial weights and bias. 47 """Logistic regression subgraph with zero-value initial weights and bias. 72 The variables linear_regression.weights and linear_regression.bias are 91 weights = vs.get_variable( 92 'weights', [x.get_shape()[1], output_shape], dtype=dtype) 95 weights = vs.get_variable( 96 'weights', [x.get_shape()[1], output_shape], 105 summary.histogram('%s.weights' % scope_name, weights) 107 return losses_ops.mean_squared_error_regressor(x, y, weights, bias [all...] |
/external/tensorflow/tensorflow/tools/graph_transforms/ |
fold_batch_norms.cc | 31 // ops with the Mul baked into the convolution weights, to save computation 71 Tensor weights = GetNodeTensorAttr(weights_node, "value"); 75 // there are columns in the weights. 77 const int64 weights_cols = weights.shape().dim_size(weights_cols_index); 85 // Multiply the original weights by the scale vector. 86 auto weights_matrix = weights.flat_inner_dims<float>(); 87 Tensor scaled_weights(DT_FLOAT, weights.shape());
|
/external/tensorflow/tensorflow/contrib/lite/kernels/ |
embedding_lookup_sparse.cc | 30 // squares of the weights. 36 // Tensor[3]: Weights to use for aggregation, float. 50 // For instance, if params is a 10x20 matrix and ids, weights are: 96 TfLiteTensor* weights = GetInput(context, node, 3); local 97 TF_LITE_ENSURE_EQ(context, NumDimensions(weights), 1); 98 TF_LITE_ENSURE_EQ(context, weights->type, kTfLiteFloat32); 103 SizeOfDimension(weights, 0)); 145 TfLiteTensor* weights = GetInput(context, node, 3); local 222 const float w = weights->data.f[i];
|
/external/tensorflow/tensorflow/contrib/quantize/python/ |
fold_batch_norms.py | 83 # new weights = old weights * gamma / sqrt(variance + epsilon) 100 # The shape of depthwise weights is different, so we need to reshape the 103 weights = match.weight_tensor 117 weights = math_ops.multiply( 118 correction_scale, weights, name='correction_mult') 121 weights, multiplier_tensor, name='mul_fold') 293 a) The weights are quantized after scaling by gamma/sigma_mv. This enables 294 smoother training as the scaling on the weights changes slowly, rather than 572 weights = op_below.inputs[1 [all...] |
/external/tensorflow/tensorflow/contrib/crf/ |
README.md | 38 weights = tf.get_variable("weights", [num_features, num_tags]) 40 matricized_unary_scores = tf.matmul(matricized_x_t, weights)
|
/external/tensorflow/tensorflow/contrib/labeled_tensor/ |
README.md | 37 weights = lt.LabeledTensor(tf.constant([0.1, 0.3, 0.6]), 47 image * weights 48 lt.matmul(image[0, :, :], weights)
|
/external/tensorflow/tensorflow/contrib/linear_optimizer/python/ |
sdca_estimator_test.py | 41 'weights': constant_op.constant([[1.0], [1.0]]) 51 weight_column_name='weights') 86 'weights': constant_op.constant([[1.0], [1.0], [1.0]]) 99 weight_column_name='weights', 119 'weights': 130 weight_column_name='weights') 214 'weights': 232 weight_column_name='weights') 250 weights = [[3.0], [-1.2], [0.5]] 251 y = np.dot(x, weights) [all...] |
/external/tensorflow/tensorflow/python/estimator/ |
warm_starting_util_test.py | 112 _, weights = self._create_prev_run_var( 117 prev_val = np.concatenate([weights[0], weights[1]], axis=0) 148 _, weights = self._create_prev_run_var( 153 prev_val = np.concatenate([weights[0], weights[1]], axis=0) 314 "linear_model/sc_int/weights", shape=[10, 1], initializer=ones()) 324 # Without warm-starting, the weights should be initialized using default 337 # Verify weights were correctly warm-started. 347 "linear_model/sc_hash/weights", shape=[15, 1], initializer=norms() [all...] |
/external/tensorflow/tensorflow/contrib/eager/python/ |
metrics_impl.py | 285 def call(self, values, weights=None): 289 If the weights were specified as [1, 1, 0, 0] then the mean would be 2. 293 weights: Optional weighting of each example. Defaults to 1. 298 if weights is None: 304 weights = math_ops.cast(weights, self.dtype) 305 self.denom.assign_add(math_ops.reduce_sum(weights)) 306 values = math_ops.cast(values, self.dtype) * weights 308 if weights is None: 310 return values, weights [all...] |
/external/icu/android_icu4j/src/main/java/android/icu/impl/coll/ |
CollationKeys.java | 268 // Secondary level: Compress up to 33 common weights as 05..25 or 25..45. 274 // Case level, lowerFirst: Compress up to 7 common weights as 1..7 or 7..13. 280 // Case level, upperFirst: Compress up to 13 common weights as 3..15. 286 // Tertiary level only (no case): Compress up to 97 common weights as 05..65 or 65..C5. 292 // Tertiary with case, lowerFirst: Compress up to 33 common weights as 05..25 or 25..45. 298 // Tertiary with case, upperFirst: Compress up to 33 common weights as 85..A5 or A5..C5. 304 // Quaternary level: Compress up to 113 common weights as 1C..8C or 8C..FC. 309 // Primary weights shifted to quaternary level must be encoded with 389 // Shifted primary weights are lower than the common weight. 495 // Append reverse weights. The level will be re-reversed later [all...] |
/external/icu/icu4j/main/classes/collate/src/com/ibm/icu/impl/coll/ |
CollationKeys.java | 264 // Secondary level: Compress up to 33 common weights as 05..25 or 25..45. 270 // Case level, lowerFirst: Compress up to 7 common weights as 1..7 or 7..13. 276 // Case level, upperFirst: Compress up to 13 common weights as 3..15. 282 // Tertiary level only (no case): Compress up to 97 common weights as 05..65 or 65..C5. 288 // Tertiary with case, lowerFirst: Compress up to 33 common weights as 05..25 or 25..45. 294 // Tertiary with case, upperFirst: Compress up to 33 common weights as 85..A5 or A5..C5. 300 // Quaternary level: Compress up to 113 common weights as 1C..8C or 8C..FC. 305 // Primary weights shifted to quaternary level must be encoded with 385 // Shifted primary weights are lower than the common weight. 491 // Append reverse weights. The level will be re-reversed later [all...] |