/external/tensorflow/tensorflow/contrib/tensorrt/segment/ |
segment_test.cc | 129 // add0 add1 141 TF_Operation* add0 = Add(feed, feed, graph, s, "add0"); local 145 TF_Operation* add2 = Add(add0, add1, graph, s, "add2"); 147 TF_Operation* add3 = Add(add0, add2, graph, s, "add3"); 160 MakeCandidateFn({"add0", "add1", "add2", "add3", "add4"}), 166 std::vector<string> expected{"add0", "add1", "add2", "add3", "add4"}; 179 // add2 is not a TRT candidate so add0/add3 cannot be formed as a 184 // add0 add1 196 TF_Operation* add0 = Add(feed, feed, graph, s, "add0") local 244 TF_Operation* add0 = Add(feed, feed, graph, s, "add0"); local 318 TF_Operation* add0 = Add(feed, feed, graph, s, "add0"); local [all...] |
/external/vulkan-validation-layers/libs/glm/detail/ |
intrinsic_geometric.inl | 56 __m128 add0 = _mm_add_ps(mul0, swp0); 57 __m128 swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); 58 __m128 add1 = _mm_add_ps(add0, swp1); 68 __m128 add0 = _mm_add_ps(mov0, mul0); 69 __m128 swp1 = _mm_shuffle_ps(add0, add0, 1); 70 __m128 add1 = _mm_add_ss(add0, swp1); 137 __m128 add0 = _mm_add_ps(mul3, sqt0); 138 __m128 mul4 = _mm_mul_ps(add0, N) [all...] |
intrinsic_common.inl | 195 __m128 add0 = _mm_add_ps(x, or0); 196 __m128 sub0 = _mm_sub_ps(add0, or0); 205 __m128 add0 = _mm_add_ps(x, or0); 206 __m128 sub0 = _mm_sub_ps(add0, or0); 215 __m128 add0 = _mm_add_ps(rnd0, and0); 216 return add0; 260 __m128 add0 = _mm_add_ps(mul0, mul1); 261 return add0;
|
/prebuilts/ndk/r16/sources/third_party/vulkan/src/libs/glm/detail/ |
intrinsic_geometric.inl | 56 __m128 add0 = _mm_add_ps(mul0, swp0); 57 __m128 swp1 = _mm_shuffle_ps(add0, add0, _MM_SHUFFLE(0, 1, 2, 3)); 58 __m128 add1 = _mm_add_ps(add0, swp1); 68 __m128 add0 = _mm_add_ps(mov0, mul0); 69 __m128 swp1 = _mm_shuffle_ps(add0, add0, 1); 70 __m128 add1 = _mm_add_ss(add0, swp1); 137 __m128 add0 = _mm_add_ps(mul3, sqt0); 138 __m128 mul4 = _mm_mul_ps(add0, N) [all...] |
intrinsic_common.inl | 195 __m128 add0 = _mm_add_ps(x, or0); 196 __m128 sub0 = _mm_sub_ps(add0, or0); 205 __m128 add0 = _mm_add_ps(x, or0); 206 __m128 sub0 = _mm_sub_ps(add0, or0); 215 __m128 add0 = _mm_add_ps(rnd0, and0); 216 return add0; 260 __m128 add0 = _mm_add_ps(mul0, mul1); 261 return add0;
|
/external/llvm/test/CodeGen/SystemZ/ |
frame-08.ll | 41 %add0 = add i32 %l0, %l0 42 %add1 = add i32 %l1, %add0 54 store volatile i32 %add0, i32 *%ptr 93 %add0 = add i32 %l0, %l0 94 %add1 = add i32 %l1, %add0 98 store volatile i32 %add0, i32 *%ptr 144 %add0 = add i32 %l0, %l0 145 %add1 = add i32 %l1, %add0 157 store volatile i32 %add0, i32 *%ptr 195 %add0 = add i32 %l0, %l [all...] |
frame-04.ll | 49 %add0 = fadd fp128 %l0, %l0 50 %add1 = fadd fp128 %l1, %add0 57 store volatile fp128 %add0, fp128 *%ptr 104 %add0 = fadd fp128 %l0, %l0 105 %add1 = fadd fp128 %l1, %add0 111 store volatile fp128 %add0, fp128 *%ptr 147 %add0 = fadd fp128 %l0, %l0 148 %add1 = fadd fp128 %l1, %add0 152 store volatile fp128 %add0, fp128 *%ptr 178 %add0 = fadd fp128 %l0, %l [all...] |
frame-05.ll | 49 %add0 = add i32 %l0, %l0 50 %add1 = add i32 %l1, %add0 63 store volatile i32 %add0, i32 *%ptr 116 %add0 = add i32 %l0, %l0 117 %add1 = add i32 %l1, %add0 129 store volatile i32 %add0, i32 *%ptr 172 %add0 = add i32 %l0, %l0 173 %add1 = add i32 %l1, %add0 178 store volatile i32 %add0, i32 *%ptr 207 %add0 = add i32 %l0, %l [all...] |
frame-06.ll | 46 %add0 = add i64 %l0, %l0 47 %add1 = add i64 %l1, %add0 60 store volatile i64 %add0, i64 *%ptr 113 %add0 = add i64 %l0, %l0 114 %add1 = add i64 %l1, %add0 126 store volatile i64 %add0, i64 *%ptr 169 %add0 = add i64 %l0, %l0 170 %add1 = add i64 %l1, %add0 175 store volatile i64 %add0, i64 *%ptr 204 %add0 = add i64 %l0, %l [all...] |
frame-02.ll | 56 %add0 = fadd float %l0, %l0 57 %add1 = fadd float %l1, %add0 72 store volatile float %add0, float *%ptr 137 %add0 = fadd float %l0, %l0 138 %add1 = fadd float %l1, %add0 152 store volatile float %add0, float *%ptr 197 %add0 = fadd float %l0, %l0 198 %add1 = fadd float %l1, %add0 206 store volatile float %add0, float *%ptr 240 %add0 = fadd float %l0, %l [all...] |
frame-03.ll | 58 %add0 = fadd double %l0, %l0 59 %add1 = fadd double %l1, %add0 74 store volatile double %add0, double *%ptr 139 %add0 = fadd double %l0, %l0 140 %add1 = fadd double %l1, %add0 154 store volatile double %add0, double *%ptr 199 %add0 = fadd double %l0, %l0 200 %add1 = fadd double %l1, %add0 208 store volatile double %add0, double *%ptr 242 %add0 = fadd double %l0, %l [all...] |
frame-07.ll | 87 %add0 = fadd double %l0, %l0 88 %add1 = fadd double %l1, %add0 103 store volatile double %add0, double *%ptr 216 %add0 = fadd double %l0, %l0 217 %add1 = fadd double %l1, %add0 232 store volatile double %add0, double *%ptr
|
frame-09.ll | 80 %add0 = add i32 %l0, %l0 81 %add1 = add i32 %l1, %add0 93 store volatile i32 %add0, i32 *%ptr
|
fp-add-01.ll | 106 %add0 = fadd float %ret, %val0 107 %add1 = fadd float %add0, %val1
|
fp-add-02.ll | 107 %add0 = fadd double %ret, %val0 108 %add1 = fadd double %add0, %val1
|
int-add-05.ll | 128 %add0 = add i64 %ret, %val0 129 %add1 = add i64 %add0, %val1
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
return.ll | 24 %add0 = fadd double %a0, %b0 28 %add2 = fadd double %add0, %add1
|
/external/swiftshader/third_party/LLVM/test/Transforms/SimplifyLibCalls/ |
StrSpn.ll | 38 %add0 = add i64 %test1, %test3 40 ret i64 %add0
|
/external/llvm/test/CodeGen/AMDGPU/ |
ds_read2_superreg.ll | 41 ; CI-DAG: v_add_f32_e32 v[[ADD0:[0-9]+]], v[[REG_Z]], v[[REG_X]] 43 ; CI: v_add_f32_e32 v[[ADD2:[0-9]+]], v[[ADD1]], v[[ADD0]] 55 %add0 = fadd float %elt0, %elt2 57 %add2 = fadd float %add0, %add1 67 ; CI-DAG: v_add_f32_e32 v[[ADD0:[0-9]+]], v[[REG_Z]], v[[REG_X]] 68 ; CI-DAG: v_add_f32_e32 v[[ADD1:[0-9]+]], v[[REG_Y]], v[[ADD0]] 79 %add0 = fadd float %elt0, %elt2 80 %add1 = fadd float %add0, %elt1
|
s_addk_i32.ll | 23 %add0 = add i32 %a, 65 25 store i32 %add0, i32 addrspace(1)* %out0
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
bfloat16_conversion_folding_test.cc | 88 HloInstruction* add0 = builder.AddInstruction( local 91 builder.AddInstruction(HloInstruction::CreateConvert(bf16_shape, add0)); 105 EXPECT_EQ(add0->shape().element_type(), BF16); 107 EXPECT_EQ(add1->operand(0), add0);
|
bfloat16_normalization_test.cc | 89 HloInstruction* add0 = builder.AddInstruction( local 93 HloInstruction::CreateBinary(f32_shape, HloOpcode::kAdd, add0, c)); 101 EXPECT_EQ(add0->shape().element_type(), BF16);
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
while_transformer_test.cc | 70 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 82 ? builder.AddInstruction(HloInstruction::CreateTuple({add0, add1})) 83 : builder.AddInstruction(HloInstruction::CreateTuple({add1, add0}));
|
/device/linaro/bootloader/edk2/IntelSiliconPkg/ |
IntelSiliconPkg.dec | 28 # {A9F8D54E-1107-4F0A-ADD0-4587E7A4A735}
|
/external/vulkan-validation-layers/libs/glm/gtx/ |
simd_quat.inl | 129 __m128 add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff); 135 __m128 add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul0, mul0)); 136 add0 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1)); 155 //__m128 xxyy = _mm_shuffle_ps(add0, add1, _MM_SHUFFLE(0, 0, 0, 0)); 165 _mm_store_ss(&x, add0);
|