HomeSort by relevance Sort by last modified time
    Searched full:storeu (Results 1 - 25 of 33) sorted by null

1 2

  /external/llvm/test/CodeGen/X86/
pr18846.ll 58 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4577) #1
70 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4547) #1
71 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4545) #1
72 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4543) #1
80 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4439) #1
81 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4437) #1
86 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> zeroinitializer) #1
87 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4399) #1
88 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> %add.i4397) #1
89 call void @llvm.x86.avx.storeu.ps.256(i8* undef, <8 x float> undef) #
    [all...]
sse-intrinsics-x86-upgrade.ll 22 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
25 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
pmovext.ll 16 tail call void @llvm.x86.sse2.storeu.dq(i8* %5, <16 x i8> %6) nounwind
21 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
vec-loadsingles-alignment.ll 30 tail call void @llvm.x86.avx.storeu.dq.256(i8* bitcast ([8 x i32]* @d to i8*), <32 x i8> %8)
34 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
avx-intrinsics-x86-upgrade.ll 394 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
397 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
411 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
414 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
423 call void @llvm.x86.sse.storeu.ps(i8* %a0, <4 x float> %a1)
426 declare void @llvm.x86.sse.storeu.ps(i8*, <4 x float>) nounwind
444 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2)
447 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
461 call void @llvm.x86.avx.storeu.pd.256(i8* %a0, <4 x double> %a2)
464 declare void @llvm.x86.avx.storeu.pd.256(i8*, <4 x double>) nounwin
    [all...]
sse2-intrinsics-x86-upgrade.ll 101 call void @llvm.x86.sse2.storeu.dq(i8* %a0, <16 x i8> %a2)
104 declare void @llvm.x86.sse2.storeu.dq(i8*, <16 x i8>) nounwind
118 call void @llvm.x86.sse2.storeu.pd(i8* %a0, <2 x double> %a2)
121 declare void @llvm.x86.sse2.storeu.pd(i8*, <2 x double>) nounwind
avx512bwvl-intrinsics-upgrade.ll 124 declare void @llvm.x86.avx512.mask.storeu.b.128(i8*, <16 x i8>, i16)
133 call void @llvm.x86.avx512.mask.storeu.b.128(i8* %ptr1, <16 x i8> %x1, i16 %x2)
134 call void @llvm.x86.avx512.mask.storeu.b.128(i8* %ptr2, <16 x i8> %x1, i16 -1)
138 declare void @llvm.x86.avx512.mask.storeu.b.256(i8*, <32 x i8>, i32)
147 call void @llvm.x86.avx512.mask.storeu.b.256(i8* %ptr1, <32 x i8> %x1, i32 %x2)
148 call void @llvm.x86.avx512.mask.storeu.b.256(i8* %ptr2, <32 x i8> %x1, i32 -1)
152 declare void @llvm.x86.avx512.mask.storeu.w.128(i8*, <8 x i16>, i8)
161 call void @llvm.x86.avx512.mask.storeu.w.128(i8* %ptr1, <8 x i16> %x1, i8 %x2)
162 call void @llvm.x86.avx512.mask.storeu.w.128(i8* %ptr2, <8 x i16> %x1, i8 -1)
166 declare void @llvm.x86.avx512.mask.storeu.w.256(i8*, <16 x i16>, i16
    [all...]
avx512-intrinsics-upgrade.ll 193 call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr, <16 x float> %data, i16 %mask)
194 call void @llvm.x86.avx512.mask.storeu.ps.512(i8* %ptr2, <16 x float> %data, i16 -1)
198 declare void @llvm.x86.avx512.mask.storeu.ps.512(i8*, <16 x float>, i16 )
207 call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr, <8 x double> %data, i8 %mask)
208 call void @llvm.x86.avx512.mask.storeu.pd.512(i8* %ptr2, <8 x double> %data, i8 -1)
212 declare void @llvm.x86.avx512.mask.storeu.pd.512(i8*, <8 x double>, i8)
249 call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr1, <8 x i64> %x1, i8 %x2)
250 call void @llvm.x86.avx512.mask.storeu.q.512(i8* %ptr2, <8 x i64> %x1, i8 -1)
254 declare void @llvm.x86.avx512.mask.storeu.q.512(i8*, <8 x i64>, i8)
263 call void @llvm.x86.avx512.mask.storeu.d.512(i8* %ptr1, <16 x i32> %x1, i16 %x2
    [all...]
avx512bw-intrinsics-upgrade.ll 5 declare void @llvm.x86.avx512.mask.storeu.b.512(i8*, <64 x i8>, i64)
23 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr1, <64 x i8> %x1, i64 %x2)
24 call void @llvm.x86.avx512.mask.storeu.b.512(i8* %ptr2, <64 x i8> %x1, i64 -1)
28 declare void @llvm.x86.avx512.mask.storeu.w.512(i8*, <32 x i16>, i32)
46 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr1, <32 x i16> %x1, i32 %x2)
47 call void @llvm.x86.avx512.mask.storeu.w.512(i8* %ptr2, <32 x i16> %x1, i32 -1)
    [all...]
avx512vl-intrinsics-upgrade.ll 448 declare void @llvm.x86.avx512.mask.storeu.pd.128(i8*, <2 x double>, i8)
457 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr1, <2 x double> %x1, i8 %x2)
458 call void @llvm.x86.avx512.mask.storeu.pd.128(i8* %ptr2, <2 x double> %x1, i8 -1)
462 declare void @llvm.x86.avx512.mask.storeu.pd.256(i8*, <4 x double>, i8)
471 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr1, <4 x double> %x1, i8 %x2)
472 call void @llvm.x86.avx512.mask.storeu.pd.256(i8* %ptr2, <4 x double> %x1, i8 -1)
504 declare void @llvm.x86.avx512.mask.storeu.ps.128(i8*, <4 x float>, i8)
513 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr1, <4 x float> %x1, i8 %x2)
514 call void @llvm.x86.avx512.mask.storeu.ps.128(i8* %ptr2, <4 x float> %x1, i8 -1)
518 declare void @llvm.x86.avx512.mask.storeu.ps.256(i8*, <8 x float>, i8
    [all...]
avx2-intrinsics-x86-upgrade.ll 378 call void @llvm.x86.avx.storeu.dq.256(i8* %a0, <32 x i8> %a2)
381 declare void @llvm.x86.avx.storeu.dq.256(i8*, <32 x i8>) nounwind
  /external/python/cpython3/Modules/_blake2/impl/
blake2b.c 320 STOREU( &S->h[0], _mm_xor_si128( LOADU( &S->h[0] ), row1l ) );
321 STOREU( &S->h[2], _mm_xor_si128( LOADU( &S->h[2] ), row1h ) );
324 STOREU( &S->h[4], _mm_xor_si128( LOADU( &S->h[4] ), row2l ) );
325 STOREU( &S->h[6], _mm_xor_si128( LOADU( &S->h[6] ), row2h ) );
blake2s-round.h 20 #define STOREU(p,r) _mm_storeu_si128((__m128i *)(p), r)
blake2s.c 298 STOREU( &S->h[0], _mm_xor_si128( ff0, _mm_xor_si128( row1, row3 ) ) );
299 STOREU( &S->h[4], _mm_xor_si128( ff1, _mm_xor_si128( row2, row4 ) ) );
blake2b-round.h 20 #define STOREU(p,r) _mm_storeu_si128((__m128i *)(p), r)
  /external/llvm/lib/IR/
AutoUpgrade.cpp 264 Name.startswith("sse.storeu.") ||
265 Name.startswith("sse2.storeu.") ||
266 Name.startswith("avx.storeu.") ||
267 Name.startswith("avx512.mask.storeu.p") ||
268 Name.startswith("avx512.mask.storeu.b.") ||
269 Name.startswith("avx512.mask.storeu.w.") ||
270 Name.startswith("avx512.mask.storeu.d.") ||
271 Name.startswith("avx512.mask.storeu.q.") ||
783 } else if (IsX86 && (Name.startswith("sse.storeu.") ||
784 Name.startswith("sse2.storeu.") |
    [all...]
  /prebuilts/gcc/linux-x86/host/x86_64-w64-mingw32-4.8/x86_64-w64-mingw32/include/
fvec.h 119 inline void storeu(float *p,const F32vec4 &a) { _mm_storeu_ps(p,a); } function
dvec.h 823 inline void storeu(double *p,const F64vec2 &a) { _mm_storeu_pd(p,a); } function
    [all...]