/frameworks/base/packages/FusedLocation/src/com/android/location/fused/ |
FusedLocationService.java | 17 package com.android.location.fused;
|
FusedLocationProvider.java | 17 package com.android.location.fused;
|
FusionEngine.java | 17 package com.android.location.fused; 46 private static final String FUSED = LocationProviderBase.FUSED_PROVIDER; 235 mFusedLocation.setProvider(FUSED); 288 s.append("fused=").append(mFusedLocation).append('\n');
|
/cts/tests/inputmethod/mockime/src/com/android/cts/mockime/ |
ImeEventStream.java | 144 boolean fused) { 147 fused ? event.getEnterWallTime() : 153 sb.append(fused ? "" : event.isEnterEvent() ? "[" : "]"); 154 if (fused || event.isEnterEvent()) { 174 final boolean fused = areEnterExitPairedMessages(latest, i); 175 if (i == mCurrentPosition || (fused && ((i + 1) == mCurrentPosition))) { 178 dumpEvent(sb, latest.mArray[fused ? ++i : i], fused);
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
logical_buffer_analysis.cc | 33 for (auto* fused : instruction->fused_instructions()) { 34 if (fused->opcode() == HloOpcode::kFusion) { 35 GatherFusionInstructions(fused, fusion_instructions);
|
batchnorm_expander.cc | 221 auto fused = computation_->CreateFusionInstruction( local 225 sum = add(HloInstruction::CreateGetTupleElement(feature_shape, fused, 0)); 228 add(HloInstruction::CreateGetTupleElement(feature_shape, fused, 1)); 518 auto fused = computation_->CreateFusionInstruction( local 523 add(HloInstruction::CreateGetTupleElement(feature_shape, fused, 0)); 526 add(HloInstruction::CreateGetTupleElement(feature_shape, fused, 1));
|
tuple_points_to_analysis.cc | 130 for (auto* fused : instruction->fused_instructions()) { 131 if (fused->opcode() == HloOpcode::kFusion) { 132 GatherFusionInstructions(fused, fusion_instructions); 553 for (auto* fused : instruction->fused_instructions()) { 554 InstructionToString(fused, &output);
|
hlo_verifier.cc | 622 "Instruction of fused computation does not match expected instruction " 627 // Fused root instruction and fused parameters must all be owned by the fusion 664 // Fused root must have no users. 670 // All uses of fused instructions must be in the fusion computation, and every 690 // Fused parameter instructions must be numbered contiguously and match up 755 for (const auto& fused : instruction->fused_instructions()) { 756 TF_RET_CHECK(fused->parent() == 758 << "Fused HLO was missing a parent: " << fused->ToString( [all...] |
tuple_points_to_analysis_test.cc | 654 // Run points-to analysis (should include fused instructions from 'fusion'). 709 fusion->fused_instructions().end(), [=](const HloInstruction* fused) { 710 return fused->opcode() == HloOpcode::kParameter && 711 fusion->operand(fused->parameter_number()) == operand [all...] |
/external/tensorflow/tensorflow/contrib/rnn/python/kernel_tests/ |
fused_rnn_cell_test.py | 83 for basic, fused in zip(basic_wgrads, fused_static_wgrads): 84 self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2) 105 for basic, fused in zip(basic_wgrads, fused_dynamic_wgrads): 106 self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2) 137 with variable_scope.variable_scope("fused", initializer=initializer): 149 if v.name.startswith("fused/") 161 for basic, fused in zip(basic_wgrads, fused_wgrads): 162 self.assertAllClose(basic, fused, rtol=1e-2, atol=1e-2)
|
lstm_ops_test.py | 332 for basic, fused in zip(block_wgrads, fused_wgrads): 333 self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6) 350 for basic, fused in zip(block_wgrads, fused_wgrads): 351 self.assertAllClose(basic, fused, rtol=1e-6, atol=1e-6) 429 for fused, unfused in zip(fused_wgrads, unfused_wgrads): 430 self.assertAllClose(fused, unfused, rtol=1e-6, atol=1e-6)
|
/external/tensorflow/tensorflow/contrib/quantize/python/ |
fold_batch_norms_test.py | 59 # Fused batch norm always has scaling enabled. 78 fused_batch_norm: Bool, when true the batch norm is fused. 99 scale=has_scaling, fused=fused_batch_norm), 148 fused_batch_norm: Bool, when true the batch norm is fused. 168 scale=has_scaling, fused=fused_batch_norm), 213 fused_batch_norm: Bool, when true the batch norm is fused. 231 scale=has_scaling, fused=fused_batch_norm), 277 fused_batch_norm: Bool, when true the batch norm is fused. 298 scale=has_scaling, fused=fused_batch_norm), 354 fused_batch_norm: Bool, when true the batch norm is fused [all...] |
graph_matcher_test.py | 43 [layers.batch_norm], fused=True, is_training=True, trainable=True):
|
quantize_parameterized_test.py | 578 def _BatchNormParams(self, fused=False): 579 return {'center': True, 'scale': True, 'decay': 1.0 - 0.003, 'fused': fused}
|
/prebuilts/go/darwin-x86/src/cmd/compile/internal/gc/testdata/ |
fp.go | 242 // supports fused multiply-add instructions. 244 {0.6046603, 0.9405091, 0.6645601, 1.2332485}, // fused multiply-add result: 1.2332486 245 {0.67908466, 0.21855305, 0.20318687, 0.3516029}, // fused multiply-add result: 0.35160288 246 {0.29311424, 0.29708257, 0.752573, 0.8396522}, // fused multiply-add result: 0.8396521 247 {0.5305857, 0.2535405, 0.282081, 0.41660595}, // fused multiply-add result: 0.41660598 248 {0.29711226, 0.89436173, 0.097454615, 0.36318043}, // fused multiply-add result: 0.36318046 249 {0.6810783, 0.24151509, 0.31152245, 0.47601312}, // fused multiply-add result: 0.47601315 250 {0.73023146, 0.18292491, 0.4283571, 0.5619346}, // fused multiply-add result: 0.56193465 251 {0.89634174, 0.32208398, 0.7211478, 1.009845}, // fused multiply-add result: 1.0098451 252 {0.6280982, 0.12675293, 0.2813303, 0.36094356}, // fused multiply-add result: 0.360943 [all...] |
/prebuilts/go/linux-x86/src/cmd/compile/internal/gc/testdata/ |
fp.go | 242 // supports fused multiply-add instructions. 244 {0.6046603, 0.9405091, 0.6645601, 1.2332485}, // fused multiply-add result: 1.2332486 245 {0.67908466, 0.21855305, 0.20318687, 0.3516029}, // fused multiply-add result: 0.35160288 246 {0.29311424, 0.29708257, 0.752573, 0.8396522}, // fused multiply-add result: 0.8396521 247 {0.5305857, 0.2535405, 0.282081, 0.41660595}, // fused multiply-add result: 0.41660598 248 {0.29711226, 0.89436173, 0.097454615, 0.36318043}, // fused multiply-add result: 0.36318046 249 {0.6810783, 0.24151509, 0.31152245, 0.47601312}, // fused multiply-add result: 0.47601315 250 {0.73023146, 0.18292491, 0.4283571, 0.5619346}, // fused multiply-add result: 0.56193465 251 {0.89634174, 0.32208398, 0.7211478, 1.009845}, // fused multiply-add result: 1.0098451 252 {0.6280982, 0.12675293, 0.2813303, 0.36094356}, // fused multiply-add result: 0.360943 [all...] |
/external/tensorflow/tensorflow/python/layers/ |
normalization.py | 97 fused: if `None` or `True`, use a faster, fused implementation if possible. 139 fused=None, 166 if fused is None: 167 fused = True 169 self.fused = fused 219 if self.fused: 220 # Currently fused batch norm doesn't support renorm. It also only supports 224 self.fused = (not self.renorm an [all...] |
normalization_test.py | 42 def _simple_model(self, image, fused, freeze_mode): 50 bn_layer = normalization_layers.BatchNormalization(fused=fused) 286 bn = normalization_layers.BatchNormalization(axis=1, fused=True) 519 axis=3, epsilon=epsilon, momentum=0.9, fused=True) 560 axis=1, epsilon=epsilon, momentum=0.9, fused=True) [all...] |
/frameworks/compile/libbcc/include/bcc/ |
RSCompilerDriver.h | 155 const std::list<std::string>& fused,
|
/external/tensorflow/tensorflow/contrib/layers/python/layers/ |
layers_test.py | [all...] |
/frameworks/compile/libbcc/lib/ |
RSCompilerDriver.cpp | 308 const std::list<std::string>& fused, 361 // Create fused kernels 365 for (const std::string& nameOfFused : fused) { 397 // Compile the new module with fused kernels
|
/packages/services/Car/tests/carservice_test/src/com/android/car/ |
VmsHalServiceSubscriptionEventTest.java | 142 int fused = v.get(VmsMessageWithLayerIntegerValuesIndex.LAYER_SUBTYPE); local
|
/build/soong/cc/config/ |
clang.go | 73 "-mno-fused-madd",
|
mips_device.go | 61 "-mno-fused-madd",
|
/external/tensorflow/tensorflow/compiler/tests/ |
image_ops_test.py | 284 fused = False 285 if not fused: 286 # The tests are known to pass with the fused adjust_hue. We will enable 287 # them when the fused implementation is the default. 291 fused = False
|