Home | History | Annotate | Download | only in AArch64

Lines Matching full:align

15   %tmp1 = load %type1** %argtable, align 8
17 store <16 x i8> zeroinitializer, <16 x i8>* %tmp2, align 16
26 %tmp1 = load %type2** %argtable, align 8
28 store <8 x i8> zeroinitializer, <8 x i8>* %tmp2, align 8
34 @globalArray64x2 = common global <2 x i64>* null, align 8
35 @globalArray32x4 = common global <4 x i32>* null, align 8
36 @globalArray16x8 = common global <8 x i16>* null, align 8
37 @globalArray8x16 = common global <16 x i8>* null, align 8
38 @globalArray64x1 = common global <1 x i64>* null, align 8
39 @globalArray32x2 = common global <2 x i32>* null, align 8
40 @globalArray16x4 = common global <4 x i16>* null, align 8
41 @globalArray8x8 = common global <8 x i8>* null, align 8
42 @floatglobalArray64x2 = common global <2 x double>* null, align 8
43 @floatglobalArray32x4 = common global <4 x float>* null, align 8
44 @floatglobalArray64x1 = common global <1 x double>* null, align 8
45 @floatglobalArray32x2 = common global <2 x float>* null, align 8
55 %tmp = load <2 x i64>* %arrayidx, align 16
56 %tmp1 = load <2 x i64>** @globalArray64x2, align 8
58 store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
69 %tmp = load <2 x i64>* %arrayidx, align 16
70 %tmp1 = load <2 x i64>** @globalArray64x2, align 8
72 store <2 x i64> %tmp, <2 x i64>* %arrayidx1, align 16
84 %tmp = load <4 x i32>* %arrayidx, align 16
85 %tmp1 = load <4 x i32>** @globalArray32x4, align 8
87 store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
98 %tmp = load <4 x i32>* %arrayidx, align 16
99 %tmp1 = load <4 x i32>** @globalArray32x4, align 8
101 store <4 x i32> %tmp, <4 x i32>* %arrayidx1, align 16
113 %tmp = load <8 x i16>* %arrayidx, align 16
114 %tmp1 = load <8 x i16>** @globalArray16x8, align 8
116 store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
127 %tmp = load <8 x i16>* %arrayidx, align 16
128 %tmp1 = load <8 x i16>** @globalArray16x8, align 8
130 store <8 x i16> %tmp, <8 x i16>* %arrayidx1, align 16
142 %tmp = load <16 x i8>* %arrayidx, align 16
143 %tmp1 = load <16 x i8>** @globalArray8x16, align 8
145 store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
156 %tmp = load <16 x i8>* %arrayidx, align 16
157 %tmp1 = load <16 x i8>** @globalArray8x16, align 8
159 store <16 x i8> %tmp, <16 x i8>* %arrayidx1, align 16
171 %tmp = load <1 x i64>* %arrayidx, align 8
172 %tmp1 = load <1 x i64>** @globalArray64x1, align 8
174 store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
185 %tmp = load <1 x i64>* %arrayidx, align 8
186 %tmp1 = load <1 x i64>** @globalArray64x1, align 8
188 store <1 x i64> %tmp, <1 x i64>* %arrayidx1, align 8
200 %tmp = load <2 x i32>* %arrayidx, align 8
201 %tmp1 = load <2 x i32>** @globalArray32x2, align 8
203 store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
214 %tmp = load <2 x i32>* %arrayidx, align 8
215 %tmp1 = load <2 x i32>** @globalArray32x2, align 8
217 store <2 x i32> %tmp, <2 x i32>* %arrayidx1, align 8
229 %tmp = load <4 x i16>* %arrayidx, align 8
230 %tmp1 = load <4 x i16>** @globalArray16x4, align 8
232 store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
243 %tmp = load <4 x i16>* %arrayidx, align 8
244 %tmp1 = load <4 x i16>** @globalArray16x4, align 8
246 store <4 x i16> %tmp, <4 x i16>* %arrayidx1, align 8
258 %tmp = load <8 x i8>* %arrayidx, align 8
259 %tmp1 = load <8 x i8>** @globalArray8x8, align 8
261 store <8 x i8> %tmp, <8 x i8>* %arrayidx1, align 8
267 @str = global [63 x i8] c"Test case for rdar://13258794: LDUR/STUR for D and Q registers\00", align 1
273 %0 = load <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
281 %0 = load <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
289 %0 = load <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
297 %0 = load <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
305 %0 = load <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
313 %0 = load <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
321 %0 = load <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
329 %0 = load <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
338 %0 = load <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <1 x i64>*), align 8
339 store <1 x i64> %0, <1 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <1 x i64>*), align 8
348 %0 = load <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i32>*), align 8
349 store <2 x i32> %0, <2 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <2 x i32>*), align 8
358 %0 = load <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i16>*), align 8
359 store <4 x i16> %0, <4 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <4 x i16>*), align 8
368 %0 = load <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i8>*), align 8
369 store <8 x i8> %0, <8 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <8 x i8>*), align 8
378 %0 = load <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <2 x i64>*), align 16
379 store <2 x i64> %0, <2 x i64>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <2 x i64>*), align 16
388 %0 = load <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <4 x i32>*), align 16
389 store <4 x i32> %0, <4 x i32>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <4 x i32>*), align 16
398 %0 = load <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <8 x i16>*), align 16
399 store <8 x i16> %0, <8 x i16>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <8 x i16>*), align 16
408 %0 = load <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 3) to <16 x i8>*), align 16
409 store <16 x i8> %0, <16 x i8>* bitcast (i8* getelementptr inbounds ([63 x i8]* @str, i64 0, i64 4) to <16 x i8>*), align 16
423 %pix_sp0.0.copyload = load i8* %addr, align 1
435 %pix_sp0.0.copyload = load i8* %addr, align 1
447 %pix_sp0.0.copyload = load i16* %addr, align 1
459 %pix_sp0.0.copyload = load i16* %addr, align 1
471 %pix_sp0.0.copyload = load i32* %addr, align 1
483 %pix_sp0.0.copyload = load i32* %addr, align 1
494 %pix_sp0.0.copyload = load i64* %addr, align 1
504 %pix_sp0.0.copyload = load i64* %addr, align 1
517 %pix_sp0.0.copyload = load i8* %addr, align 1
529 %pix_sp0.0.copyload = load i8* %addr, align 1
541 %pix_sp0.0.copyload = load i16* %addr, align 1
553 %pix_sp0.0.copyload = load i16* %addr, align 1
565 %pix_sp0.0.copyload = load i32* %addr, align 1
577 %pix_sp0.0.copyload = load i32* %addr, align 1
588 %pix_sp0.0.copyload = load i64* %addr, align 1
598 %pix_sp0.0.copyload = load i64* %addr, align 1