HomeSort by relevance Sort by last modified time
    Searched full:weights (Results 351 - 375 of 1517) sorted by null

<<11121314151617181920>>

  /external/tensorflow/tensorflow/core/kernels/
candidate_sampler_ops.cc 223 std::vector<float> weights; variable
232 weights.push_back(-FLT_MAX);
250 2, TensorShape({static_cast<int>(weights.size())}), &out_weights));
255 out_weights->vec<float>()(i) = weights[i];
mfcc_mel_filterbank.cc 17 // weights to create a mel-frequency filter bank. For filter i centered at f_i,
131 // Check the sum of FFT bin weights for every mel band to identify
146 // weights sum. But given that the target gain at the center frequency
147 // is 1.0, if the total sum of weights is 0.5, we're in bad shape.
  /external/tensorflow/tensorflow/python/debug/examples/
debug_mnist.py 81 # This Variable will hold the state of the weights for the layer
82 with tf.name_scope("weights"):
83 weights = weight_variable([input_dim, output_dim])
87 preactivate = tf.matmul(input_tensor, weights) + biases
  /external/tensorflow/tensorflow/tools/api/golden/
tensorflow.-graph-keys.pbtxt 130 name: "WEIGHTS"
tensorflow.keras.layers.-bidirectional.pbtxt 97 name: "weights"
102 argspec: "args=[\'self\', \'layer\', \'merge_mode\', \'weights\'], varargs=None, keywords=kwargs, defaults=[\'concat\', \'None\'], "
194 argspec: "args=[\'self\', \'weights\'], varargs=None, keywords=None, defaults=None"
  /external/tensorflow/tensorflow/tools/docker/notebooks/
2_getting_started.ipynb     [all...]
  /external/trappy/trappy/stats/
Correlator.py 78 weights = []
90 weights.append(len(series_x[series_x != 0]) + len(series_y[series_y != 0]))
93 for weight, corr in zip(weights, corr_output):
96 total += (weight * corr) / sum(weights)
  /frameworks/ml/nn/runtime/test/specs/V1_0/
rnn.mod.py 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable
34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
38 weights: [
  /frameworks/ml/nn/runtime/test/specs/V1_1/
rnn_relaxed.mod.py 24 weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size)) variable
34 model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
39 weights: [
  /hardware/intel/common/utils/ituxd/src/com/intel/thermal/
ThermalSensorAttrib.java 75 Log.i(TAG, "mismatch in weights and order array, raw sensor temp will be used");
  /external/apache-commons-math/src/main/java/org/apache/commons/math/ode/nonstiff/
HighamHall54Integrator.java 47 /** Internal weights Butcher array. */
57 /** Propagation weights Butcher array. */
62 /** Error weights Butcher array. */
  /external/apache-commons-math/src/main/java/org/apache/commons/math/optimization/fitting/
CurveFitter.java 131 double[] weights = new double[observations.size()]; local
135 weights[i] = point.getWeight();
141 optimizer.optimize(new TheoreticalValuesFunction(f), target, weights, initialGuess);
  /external/deqp/external/vulkancts/modules/vulkan/texture/
vktSampleVerifier.hpp 208 const tcu::Vec2& weights,
215 const tcu::Vec3& weights,
222 const tcu::Vec3& weights,
  /external/swiftshader/third_party/LLVM/lib/CodeGen/
CalcSpillWeights.cpp 29 "Calculate spill weights", false, false)
33 "Calculate spill weights", false, false)
44 DEBUG(dbgs() << "********** Compute Spill Weights **********\n"
  /external/tensorflow/tensorflow/contrib/boosted_trees/python/kernel_tests/
quantile_ops_test.py 54 | Instance | instance weights | Dense 0 | Sparse 0 | SparseM
116 weights = array_ops.placeholder(dtypes.float32)
120 example_weights=weights)
128 weights: [1] * i})
151 weights = array_ops.placeholder(dtypes.float32)
155 example_weights=weights)
175 weights: [1] * len(inputs)})
198 weights = array_ops.placeholder(dtypes.float32)
202 example_weights=weights)
207 weights: [1] * len(inputs)}
    [all...]
  /external/tensorflow/tensorflow/contrib/factorization/python/ops/
gmm.py 90 Can contain any combination of "w" for weights, "m" for means,
129 def weights(self): member in class:GMM
130 """Returns the cluster weights."""
factorization_ops_test.py 113 # test_rows: True to test row weights, False to test column weights.
212 # Using the specified projection weights for the 2 row feature vectors.
214 # weights and feature vectors are identical to that used in model
221 # weights will be those specified in model.
285 # Using the specified projection weights for the 3 column feature vectors.
287 # weights and feature vectors are identical to that used in model
294 # weights will be those specified in model.
387 # Using the specified projection weights for the 2 row feature vectors.
389 # weights and feature vectors are identical to that used in mode
    [all...]
  /external/tensorflow/tensorflow/python/ops/
nn_batchnorm_test.py 450 # weights as well as the input values.
596 non-constant weights and varying broadcasting situations. (It
603 weights = constant_op.constant(1, dtype=x.dtype)
605 # We want to assert gradients WRT weights as well as X!
606 extra_out_grads.append(weights)
607 return nn_impl.weighted_moments(x, axes, weights, keep_dims=keep_dims)
619 # 1:1 weights and inputs
624 # try broadcasting weights in all positions
647 weights_numpy = np.absolute( # weights must be positive
660 weights = array_ops.placeholder(dtype, shape=weights_shape
    [all...]
  /external/tensorflow/tensorflow/tools/graph_transforms/
fuse_convolutions.cc 57 // We'll be reusing the old weights and pad dimensions.
104 // We'll be reusing the old weights.
168 // We'll be reusing the old weights and pad dimensions.
  /frameworks/base/libs/hwui/
FontRenderer.h 197 static void computeGaussianWeights(float* weights, int32_t radius);
198 static void horizontalBlur(float* weights, int32_t radius, const uint8_t* source, uint8_t* dest,
200 static void verticalBlur(float* weights, int32_t radius, const uint8_t* source, uint8_t* dest,
  /external/tensorflow/tensorflow/contrib/legacy_seq2seq/python/kernel_tests/
seq2seq_test.py 583 # within a variable scope that already has a weights tensor.
721 weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
726 weights,
735 weights,
744 weights,
761 weights = [constant_op.constant(1.0, shape=[2]) for i in range(3)]
764 logits, targets, weights, average_across_timesteps=True))
769 logits, targets, weights, average_across_timesteps=False)
774 # within a variable scope that already has a weights tensor.
783 # def SampleGRUSeq2Seq(enc_inp, dec_inp, weights, per_example_loss)
    [all...]
  /external/tensorflow/tensorflow/contrib/legacy_seq2seq/python/ops/
seq2seq.py 33 - tied_rnn_seq2seq: The basic model with tied encoder and decoder weights.
250 output_projection: None or a pair (W, B) of output projection weights and
327 output_projection: None or a pair (W, B) of output projection weights and
441 output_projection: None or a pair (W, B) of output projection weights and
    [all...]
  /external/tensorflow/tensorflow/python/keras/_impl/keras/
models.py 63 - the model's weights
167 # Save optimizer weights.
168 symbolic_weights = getattr(model.optimizer, 'weights')
251 # set weights
283 # Set optimizer weights.
640 weights = self._gather_list_attr('non_trainable_weights')
643 return trainable_weights + weights
644 return weights
653 """Retrieves the weights of the model.
663 def set_weights(self, weights)
    [all...]
  /external/apache-commons-math/src/main/java/org/apache/commons/math/optimization/general/
AbstractLeastSquaresOptimizer.java 255 * the reciprocal of the weights.
327 final double[] target, final double[] weights,
331 if (target.length != weights.length) {
333 target.length, weights.length);
345 residualsWeights = weights.clone();
  /external/icu/android_icu4j/src/main/java/android/icu/util/
LocalePriorityList.java 62 * If it is off (the default), then all weights are reset to 1.0 after reordering.
124 * the weights may be adjusted from those used to build the list.
198 * These store the input languages and weights, in chronological order,
223 * @param preserveWeights when true, the weights originally came
228 // Walk through the input list, collecting the items with the same weights.

Completed in 468 milliseconds

<<11121314151617181920>>