/prebuilts/ndk/r16/sources/third_party/vulkan/src/libs/glm/gtx/ |
simd_quat.inl | 129 __m128 add0 = _mm_dp_ps(mul0, _mm_set_ps(1.0f, -1.0f, 1.0f, 1.0f), 0xff); 135 __m128 add0 = _mm_add_ps(mul0, _mm_movehl_ps(mul0, mul0)); 136 add0 = _mm_add_ss(add0, _mm_shuffle_ps(add0, add0, 1)); 155 //__m128 xxyy = _mm_shuffle_ps(add0, add1, _MM_SHUFFLE(0, 0, 0, 0)); 165 _mm_store_ss(&x, add0);
|
/external/llvm/test/CodeGen/SystemZ/ |
fp-move-02.ll | 218 %add0 = fadd double %start, %double0 219 %add1 = fadd double %add0, %double1 251 %int0 = phi i64 [ 0, %entry ], [ %add0, %loop ] 263 %add0 = add i64 %int0, %bias 280 %conv0 = bitcast i64 %add0 to double 360 %add0 = add i64 %conv0, %bias 361 store volatile i64 %add0, i64 *@iptr
|
int-add-11.ll | 158 %add0 = add i32 %val0, 127 177 %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ] 241 %add0 = add i32 %val0, -128 260 %new0 = phi i32 [ %val0, %entry ], [ %add0, %add ]
|
int-add-12.ll | 157 %add0 = add i64 %val0, 127 176 %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ] 240 %add0 = add i64 %val0, -128 259 %new0 = phi i64 [ %val0, %entry ], [ %add0, %add ]
|
int-add-02.ll | 163 %add0 = add i32 %ret, %val0 164 %add1 = add i32 %add0, %val1
|
int-add-03.ll | 168 %add0 = add i64 %ret, %ext0 169 %add1 = add i64 %add0, %ext1
|
int-add-04.ll | 168 %add0 = add i64 %ret, %ext0 169 %add1 = add i64 %add0, %ext1
|
int-add-08.ll | 136 %add0 = add i128 %ret, %val0 137 %add1 = add i128 %add0, %val1
|
vec-combine-02.ll | 395 %add0 = fadd float %elt0, %elt2 397 %ret = fmul float %add0, %add1 429 %add0 = add i32 %elt0, %elt2 431 %ret = or i32 %add0, %add1
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
buffer_liveness_test.cc | 494 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 511 builder.AddInstruction(HloInstruction::CreateTuple({add0, add1})); 523 // 1) (input_tuple_element, output_tuple_element) = ('tuple_element0', 'add0') 526 // Tuple output element 'add0' does not depend on input 'tuple_element1'. 558 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 573 builder.AddInstruction(HloInstruction::CreateTuple({add0, add1})); 585 // 1) (input_tuple_element, output_tuple_element) = ('tuple_element0', 'add0') 588 // The first tuple element pair output 'add0', has no dependency on second 595 // compute that all references to 'tuple_element0' are executed before 'add0' [all...] |
reshape_mover_test.cc | 518 // +- reshape3 - add0 540 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 545 builder.AddInstruction(HloInstruction::CreateReshape(shape3, add0));
|
copy_insertion_test.cc | 441 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 452 builder.AddInstruction(HloInstruction::CreateTuple({add0, add1})); 480 // add0 = Add(in0, 1) 481 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 492 builder.AddInstruction(HloInstruction::CreateTuple({add0, data1, data2})); 548 // add0 = Add(in0, 1) 549 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 571 builder.AddInstruction(HloInstruction::CreateTuple({add0, nested_tuple})); 573 builder.AddInstruction(HloInstruction::CreateTuple({add0, add1})); 597 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary local [all...] |
/art/compiler/optimizing/ |
scheduler_test.cc | 226 HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, i, c0); local 236 new (GetAllocator()) HArraySet(arr, add0, c0, DataType::Type::kInt32, 0); 259 add0, 312 loc2 = heap_location_collector.GetArrayHeapLocation(arr, add0);
|
load_store_analysis_test.cc | 197 HInstruction* add0 = new (GetAllocator()) HAdd(DataType::Type::kInt32, index, c0); local 206 new (GetAllocator()) HArraySet(array, add0, c0, DataType::Type::kInt32, 0); 220 entry->AddInstruction(add0); 254 loc1 = heap_location_collector.GetArrayHeapLocation(array, add0);
|
/external/llvm/test/CodeGen/AMDGPU/ |
ds-sub-offset.ll | 65 %add0 = add i32 123, %shl 67 %ptr0 = inttoptr i32 %add0 to i32 addrspace(3)*
|
salu-to-valu.ll | 334 %add0 = add i32 %elt0, %elt1 335 %add1 = add i32 %add0, %elt2 414 %add0 = add i32 %elt0, %elt1 415 %add1 = add i32 %add0, %elt2
|
and.ll | 231 %add0 = add i64 %and0, %c 233 store volatile i64 %add0, i64 addrspace(1)* %out
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
loopfilter_neon.c | 174 const uint8x8_t add0, const uint8x8_t add1, 178 *sum = vaddw_u8(*sum, add0); 184 const uint8x16_t add0, 191 *sum0 = vaddw_u8(*sum0, vget_low_u8(add0)); 192 *sum1 = vaddw_u8(*sum1, vget_high_u8(add0)); 199 const uint8x8_t add0, 202 filter_update_8(sub0, sub1, add0, add1, sum); 207 const uint8x16_t sub0, const uint8x16_t sub1, const uint8x16_t add0, 209 filter_update_16(sub0, sub1, add0, add1, sum0, sum1); 215 const uint8x8_t add0, const uint8x8_t add1, const uint8x8_t in [all...] |
highbd_loopfilter_neon.c | 122 const uint16x8_t add0, const uint16x8_t add1, 126 *sum = vaddq_u16(*sum, add0); 132 const uint16x8_t add0, 135 filter_update(sub0, sub1, add0, add1, sum); 141 const uint16x8_t add0, const uint16x8_t add1, const uint16x8_t in, 143 filter_update(sub0, sub1, add0, add1, sum);
|
/external/tensorflow/tensorflow/core/common_runtime/ |
constant_folding_test.cc | 445 auto add0 = ops::Add(s.WithControlDependencies(c), rank, size); local 448 auto send0 = ops::_Send(s.WithOpName("send0"), add0, "send0", "sender", 0, 587 auto add0 = ops::Add(s, rank, rank); local 588 auto send0 = ops::_Send(s.WithOpName("send0"), add0, "send0", "sender", 0,
|
/external/javassist/src/main/javassist/ |
CtNewMethod.java | 413 * CtMethod addMethod = pool.getMethod("Sample", "add0"); 423 * public Object add0(Object[] args) {
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
fusion_merger_test.cc | 144 auto add0 = builder.AddInstruction(HloInstruction::CreateBinary( local 147 data_shape_, HloOpcode::kMultiply, add0, one_vec));
|
/external/tensorflow/tensorflow/core/distributed_runtime/ |
executor_test.cc | 327 auto add0 = test::graph::Add(g.get(), in0, in1); local 329 auto add2 = test::graph::Add(g.get(), add0, add1);
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
deblock_msa.c | 661 v4i32 add0, add1, add2, add3; local [all...] |
/external/vulkan-validation-layers/libs/glm/detail/ |
type_mat4x4.inl | 680 typename tmat4x4<T, P>::col_type const Add0 = Mul0 + Mul1; 686 typename tmat4x4<T, P>::col_type const Add2 = Add0 + Add1;
|