HomeSort by relevance Sort by last modified time
    Searched full:weights (Results 301 - 325 of 1517) sorted by null

<<11121314151617181920>>

  /external/tensorflow/tensorflow/contrib/gan/python/losses/python/
losses_impl_test.py 106 weights = array_ops.ones_like(logits, dtype=dtypes.float32)
108 loss = self._g_loss_fn(logits, weights=weights)
138 self._discriminator_gen_outputs, weights=self._weights)
153 weights=constant_op.constant(self._weights))
159 weights = constant_op.constant(self._weights)
162 real_weights=weights, generated_weights=weights)
362 loss = self._g_loss_fn(weights=self._weights, **self._generator_kwargs)
377 weights=constant_op.constant(self._weights), **self._generator_kwargs
    [all...]
  /external/tensorflow/tensorflow/python/layers/
convolutional_test.py 275 weights = variables.trainable_variables()
276 # Check the names of weights in order.
277 self.assertTrue('kernel' in weights[0].name)
278 self.assertTrue('bias' in weights[1].name)
280 weights = sess.run(weights)
281 # Check that the kernel weights got initialized to ones (from scope)
282 self.assertAllClose(weights[0], np.ones((3, 3, 3, 32)))
284 self.assertAllClose(weights[1], np.zeros((32)))
654 weights = variables.trainable_variables(
    [all...]
core.py 51 argument (if not `None`), `kernel` is a weights matrix created by the layer,
61 If `None` (default), weights are initialized using the default
69 norm constraints or value constraints for layer weights). The function
78 share weights, but to avoid mistakes we require reuse=True in such cases.
79 reuse: Boolean, whether to reuse the weights of a previous layer
199 argument (if not `None`), `kernel` is a weights matrix created by the layer,
210 If `None` (default), weights are initialized using the default
218 norm constraints or value constraints for layer weights). The function
227 reuse: Boolean, whether to reuse the weights of a previous layer
  /external/tensorflow/tensorflow/contrib/cudnn_rnn/ops/
cudnn_rnn_ops.cc 45 save and restoration should be converted to and from the canonical weights and
60 weights: the canonical form of weights that can be used for saving
112 params: a 1-D tensor that contains the weights and biases in an opaque layout.
212 Compute the backprop of both data and weights in a RNN.
237 .Output("weights: num_params * T")
264 Retrieves a set of weights from the opaque params buffer that can be saved and
278 .Input("weights: num_params * T")
294 Writes a set of weights into the opaque params buffer so they can be used in
  /external/tensorflow/tensorflow/contrib/factorization/python/ops/
factorization_ops.py 59 W: weight matrix. Note that the (element-wise) square root of the weights
91 The sum_weights tensor contains the normalized sum of weights
222 each inner list are the weights for the rows of the corresponding row
226 all row weights and w_ij = unobserved_weight + row_weights *
231 use_factors_weights_cache: When True, the factors and weights will be
233 that the weights cache is initialized through `worker_init`, and the
235 `initialize_{col/row}_update_op`. In the case where the weights are
238 weights cache to take effect.
348 wt_init: init value for the weight. If None, weights are not created. This
352 num_shards: number of shards for the weights
    [all...]
  /external/icu/icu4c/source/i18n/
collationweights.cpp 18 * This file contains code for allocating n collation element weights
41 /* helper functions for CE weights */
125 // We use only the lower 16 bits for secondary weights.
139 // We use only the lower 16 bits for tertiary weights.
146 // The other bits are used for case & quaternary weights.
303 // Note: The lowerEnd and upperStart weights are versions of
393 // See if the first few minLength and minLength+1 ranges have enough weights.
398 // Reduce the number of weights from the last minLength+1 range
400 // so that we use all weights in the minLength ranges.
424 // See if the minLength ranges have enough weights
    [all...]
  /external/tensorflow/tensorflow/examples/speech_commands/
models.py 137 [MatMul]<-(weights)
155 weights = tf.Variable(
158 logits = tf.matmul(fingerprint_input, weights) + bias
176 [Conv2D]<-(weights)
184 [Conv2D]<-(weights)
192 [MatMul]<-(weights)
285 [Conv2D]<-(weights)
291 [MatMul]<-(weights)
295 [MatMul]<-(weights)
299 [MatMul]<-(weights)
    [all...]
  /external/tensorflow/tensorflow/tools/graph_transforms/
sparsify_gather_test.cc 88 Tensor weights(DT_FLOAT, TensorShape({4, 1}));
89 test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001});
93 SetNodeTensorAttr<float>("value", weights, w_node);
130 TF_ASSERT_OK(writer.Add("w", weights));
309 Tensor weights(DT_FLOAT, TensorShape({4, 1}));
310 test::FillValues<float>(&weights, {0.2, 0.000001, 1.2, 0.001});
314 SetNodeTensorAttr<float>("value", weights, w_node1);
315 SetNodeTensorAttr<float>("value", weights, w_node2);
367 TF_ASSERT_OK(writer.Add("w1", weights));
368 TF_ASSERT_OK(writer.Add("w2", weights));
    [all...]
  /external/tensorflow/tensorflow/contrib/slim/python/slim/
learning_test.py 822 # First, train only the weights of the model.
827 weights = variables_lib2.get_variables_by_name('weights')
830 total_loss, optimizer, variables_to_train=weights)
    [all...]
  /external/tensorflow/tensorflow/python/keras/_impl/keras/
optimizers.py 92 self.weights = []
106 def set_weights(self, weights):
107 """Sets the weights of the optimizer, from Numpy arrays.
110 (otherwise the optimizer has no weights).
113 weights: a list of Numpy arrays. The number
115 number of the dimensions of the weights
122 params = self.weights
125 for pv, p, w in zip(param_values, params, weights):
134 """Returns the current value of the weights of the optimizer.
139 return K.batch_get_value(self.weights)
715 def weights(self): member in class:TFOptimizer
    [all...]
  /external/tensorflow/tensorflow/contrib/lite/toco/graph_transformations/
fuse_binary_into_preceding_affine.cc 93 auto& weights = model->GetArray(weights_name); local
100 const Shape& weights_shape = weights.shape();
103 auto& weights_buffer = weights.GetMutableBuffer<ArrayDataType::kFloat>();
265 const auto& weights = model->GetArray(preceding_op->inputs[1]); local
277 if (!weights.buffer || !bias.buffer) {
279 "Not fusing %s because the preceding %s has non-constant weights or "
  /external/tensorflow/tensorflow/docs_src/extend/tool_developers/
index.md 64 editing, but can get large when there's numerical data like weights stored in
149 One confusing part about this is that the weights usually aren't stored inside
159 `Const` that has the numerical data for the weights stored in its attributes
168 script, is as `Const` ops containing the weights as `Tensors`. These are
175 This will give you an object representing the weights data. The data itself
180 converting between different frameworks. In TensorFlow, the filter weights for
  /frameworks/base/core/java/com/android/internal/graphics/palette/
Target.java 203 * <p>The larger the weight, relative to the other weights, the more important that a color
216 * <p>The larger the weight, relative to the other weights, the more important that a color
229 * <p>The larger the weight, relative to the other weights, the more important that a
371 * <p>The larger the weight, relative to the other weights, the more important that a color
387 * <p>The larger the weight, relative to the other weights, the more important that a color
404 * <p>The larger the weight, relative to the other weights, the more important that a
  /frameworks/support/palette/src/main/java/androidx/palette/graphics/
Target.java 186 * <p>The larger the weight, relative to the other weights, the more important that a color
199 * <p>The larger the weight, relative to the other weights, the more important that a color
212 * <p>The larger the weight, relative to the other weights, the more important that a
360 * <p>The larger the weight, relative to the other weights, the more important that a color
377 * <p>The larger the weight, relative to the other weights, the more important that a color
395 * <p>The larger the weight, relative to the other weights, the more important that a
  /sdk/eclipse/plugins/com.android.ide.eclipse.adt/src/com/android/ide/common/layout/
LinearLayoutRule.java 81 LinearLayoutRule.class.getResource("weights.png"); //$NON-NLS-1$
170 // Weights
215 actions.add(RuleAction.createAction(ACTION_DISTRIBUTE, "Distribute Weights Evenly",
221 actions.add(RuleAction.createAction(ACTION_CLEAR, "Clear All Weights",
564 // Don't adjust widths/heights/weights when just moving within a single
594 // If you insert into a layout that already is using layout weights,
595 // and all the layout weights are the same (nonzero) value, then use
777 /** Map from nodes to preferred bounds of nodes where the weights have been cleared */
779 /** Total required size required by the siblings <b>without</b> weights */
781 /** List of nodes which should have their weights cleared *
    [all...]
  /external/python/cpython3/Doc/library/
random.rst 142 .. function:: choices(population, weights=None, *, cum_weights=None, k=1)
147 If a *weights* sequence is specified, selections are made according to the
148 relative weights. Alternatively, if a *cum_weights* sequence is given, the
149 selections are made according to the cumulative weights (perhaps computed
150 using :func:`itertools.accumulate`). For example, the relative weights
151 ``[10, 5, 30, 5]`` are equivalent to the cumulative weights
152 ``[10, 15, 45, 50]``. Internally, the relative weights are converted to
153 cumulative weights before making selections, so supplying the cumulative
154 weights saves work.
156 If neither *weights* nor *cum_weights* are specified, selections are mad
    [all...]
  /external/tensorflow/tensorflow/contrib/boosted_trees/lib/learner/batch/
ordinal_split_handler.py 106 min_node_weight: Minimum sum of weights of examples in each partition to
165 min_node_weight: Minimum sum of weights of examples in each partition to
199 hessians, empty_gradients, empty_hessians, weights,
213 weights: A dense float32 tensor with a weight for each example.
227 example_partition_ids, gradients, hessians, weights, empty_gradients,
305 min_node_weight: Minimum sum of weights of examples in each partition to
339 hessians, empty_gradients, empty_hessians, weights,
353 weights: A dense float32 tensor with a weight for each example.
369 example_partition_ids, gradients, hessians, weights, empty_gradients,
425 hessians, weights, empty_gradients, empty_hessians)
    [all...]
  /external/apache-commons-math/src/main/java/org/apache/commons/math/optimization/
DifferentiableMultivariateVectorialOptimizer.java 101 * @param weights weight for the least squares cost computation
110 double[] target, double[] weights,
  /external/libopus/silk/
NLSF_VQ_weights_laroia.c 41 /* Laroia low complexity NLSF weights */
43 opus_int16 *pNLSFW_Q_OUT, /* O Pointer to input vector weights [D] */
  /external/libxcam/modules/ocl/
cv_edgetaper.cpp 64 cv::Mat weights = expanded (cv::Rect (expanded.cols / 2 - image.cols / 2, expanded.rows / 2 - image.rows / 2, image.cols, image.rows)); local
65 coefficients = weights.clone ();
  /external/llvm/test/Analysis/BranchProbabilityInfo/
pr18705.ll 5 ; calcLoopBranchHeuristics should return early without setting the weights.
6 ; calcFloatingPointHeuristics, which is run later, sets the weights.
  /external/skia/include/private/
SkPathRef.h 79 * requisite points & weights.
81 * If 'verb' is kConic_Verb, 'weights' will return a pointer to the
82 * space for the conic weights (indexed normally).
86 SkScalar** weights = nullptr) {
87 return fPathRef->growForRepeatedVerb(verb, numVbs, weights);
424 * verb. If 'verb' is kConic_Verb, 'weights' will return a pointer to the
425 * uninitialized conic weights.
427 SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, int numVbs, SkScalar** weights);
  /external/skqp/include/private/
SkPathRef.h 79 * requisite points & weights.
81 * If 'verb' is kConic_Verb, 'weights' will return a pointer to the
82 * space for the conic weights (indexed normally).
86 SkScalar** weights = nullptr) {
87 return fPathRef->growForRepeatedVerb(verb, numVbs, weights);
421 * verb. If 'verb' is kConic_Verb, 'weights' will return a pointer to the
422 * uninitialized conic weights.
424 SkPoint* growForRepeatedVerb(int /*SkPath::Verb*/ verb, int numVbs, SkScalar** weights);
  /external/swiftshader/third_party/LLVM/include/llvm/CodeGen/
MachineBranchProbabilityInfo.h 32 // weight to an edge that may have siblings with non-zero weights. This can
37 // Get sum of the block successors' weights.
  /external/swiftshader/third_party/PowerVR_SDK/Examples/Advanced/ChameleonMan/OGLES2/
SkinnedVertShader.vsh 4 to 4 bone indices (inBoneIndex) and bone weights (inBoneWeights).
9 weights which should always total 1. So if a vertex is affected by 2 bones

Completed in 322 milliseconds

<<11121314151617181920>>