Home | History | Annotate | Download | only in PowerPC

Lines Matching full:i64

9 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
18 @sl = common global i64 0, align 8
19 @ul = common global i64 0, align 8
20 @sll = common global i64 0, align 8
21 @ull = common global i64 0, align 8
35 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
36 %11 = atomicrmw add i64* %10, i64 1 monotonic
37 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
38 %13 = atomicrmw add i64* %12, i64 1 monotonic
49 %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
50 %25 = atomicrmw sub i64* %24, i64 1 monotonic
51 %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
52 %27 = atomicrmw sub i64* %26, i64 1 monotonic
63 %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
64 %39 = atomicrmw or i64* %38, i64 1 monotonic
65 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
66 %41 = atomicrmw or i64* %40, i64 1 monotonic
77 %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
78 %53 = atomicrmw xor i64* %52, i64 1 monotonic
79 %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
80 %55 = atomicrmw xor i64* %54, i64 1 monotonic
91 %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
92 %67 = atomicrmw and i64* %66, i64 1 monotonic
93 %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
94 %69 = atomicrmw and i64* %68, i64 1 monotonic
105 %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
106 %81 = atomicrmw nand i64* %80, i64 1 monotonic
107 %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
108 %83 = atomicrmw nand i64* %82, i64 1 monotonic
133 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
134 %11 = atomicrmw add i64* %10, i64 11 monotonic
135 store i64 %11, i64* @sl, align 8
136 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
137 %13 = atomicrmw add i64* %12, i64 11 monotonic
138 store i64 %13, i64* @ul, align 8
155 %24 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
156 %25 = atomicrmw sub i64* %24, i64 11 monotonic
157 store i64 %25, i64* @sl, align 8
158 %26 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
159 %27 = atomicrmw sub i64* %26, i64 11 monotonic
160 store i64 %27, i64* @ul, align 8
177 %38 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
178 %39 = atomicrmw or i64* %38, i64 11 monotonic
179 store i64 %39, i64* @sl, align 8
180 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
181 %41 = atomicrmw or i64* %40, i64 11 monotonic
182 store i64 %41, i64* @ul, align 8
199 %52 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
200 %53 = atomicrmw xor i64* %52, i64 11 monotonic
201 store i64 %53, i64* @sl, align 8
202 %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
203 %55 = atomicrmw xor i64* %54, i64 11 monotonic
204 store i64 %55, i64* @ul, align 8
221 %66 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
222 %67 = atomicrmw and i64* %66, i64 11 monotonic
223 store i64 %67, i64* @sl, align 8
224 %68 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
225 %69 = atomicrmw and i64* %68, i64 11 monotonic
226 store i64 %69, i64* @ul, align 8
243 %80 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
244 %81 = atomicrmw nand i64* %80, i64 11 monotonic
245 store i64 %81, i64* @sl, align 8
246 %82 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
247 %83 = atomicrmw nand i64* %82, i64 11 monotonic
248 store i64 %83, i64* @ul, align 8
290 %27 = zext i8 %26 to i64
291 %28 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
292 %29 = atomicrmw add i64* %28, i64 %27 monotonic
293 %30 = add i64 %29, %27
294 store i64 %30, i64* @sl, align 8
296 %32 = zext i8 %31 to i64
297 %33 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
298 %34 = atomicrmw add i64* %33, i64 %32 monotonic
299 %35 = add i64 %34, %32
300 store i64 %35, i64* @ul, align 8
334 %63 = zext i8 %62 to i64
335 %64 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
336 %65 = atomicrmw sub i64* %64, i64 %63 monotonic
337 %66 = sub i64 %65, %63
338 store i64 %66, i64* @sl, align 8
340 %68 = zext i8 %67 to i64
341 %69 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
342 %70 = atomicrmw sub i64* %69, i64 %68 monotonic
343 %71 = sub i64 %70, %68
344 store i64 %71, i64* @ul, align 8
378 %99 = zext i8 %98 to i64
379 %100 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
380 %101 = atomicrmw or i64* %100, i64 %99 monotonic
381 %102 = or i64 %101, %99
382 store i64 %102, i64* @sl, align 8
384 %104 = zext i8 %103 to i64
385 %105 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
386 %106 = atomicrmw or i64* %105, i64 %104 monotonic
387 %107 = or i64 %106, %104
388 store i64 %107, i64* @ul, align 8
422 %135 = zext i8 %134 to i64
423 %136 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
424 %137 = atomicrmw xor i64* %136, i64 %135 monotonic
425 %138 = xor i64 %137, %135
426 store i64 %138, i64* @sl, align 8
428 %140 = zext i8 %139 to i64
429 %141 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
430 %142 = atomicrmw xor i64* %141, i64 %140 monotonic
431 %143 = xor i64 %142, %140
432 store i64 %143, i64* @ul, align 8
466 %171 = zext i8 %170 to i64
467 %172 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
468 %173 = atomicrmw and i64* %172, i64 %171 monotonic
469 %174 = and i64 %173, %171
470 store i64 %174, i64* @sl, align 8
472 %176 = zext i8 %175 to i64
473 %177 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
474 %178 = atomicrmw and i64* %177, i64 %176 monotonic
475 %179 = and i64 %178, %176
476 store i64 %179, i64* @ul, align 8
516 %213 = zext i8 %212 to i64
517 %214 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
518 %215 = atomicrmw nand i64* %214, i64 %213 monotonic
519 %216 = xor i64 %215, -1
520 %217 = and i64 %216, %213
521 store i64 %217, i64* @sl, align 8
523 %219 = zext i8 %218 to i64
524 %220 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
525 %221 = atomicrmw nand i64* %220, i64 %219 monotonic
526 %222 = xor i64 %221, -1
527 %223 = and i64 %222, %219
528 store i64 %223, i64* @ul, align 8
574 %31 = zext i8 %30 to i64
576 %33 = sext i8 %32 to i64
577 %34 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
578 %35 = cmpxchg i64* %34, i64 %31, i64 %33 monotonic
579 store i64 %35, i64* @sl, align 8
581 %37 = zext i8 %36 to i64
583 %39 = sext i8 %38 to i64
584 %40 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
585 %41 = cmpxchg i64* %40, i64 %37, i64 %39 monotonic
586 store i64 %41, i64* @ul, align 8
642 %91 = zext i8 %90 to i64
644 %93 = sext i8 %92 to i64
645 %94 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
646 %95 = cmpxchg i64* %94, i64 %91, i64 %93 monotonic
647 %96 = icmp eq i64 %95, %91
652 %100 = zext i8 %99 to i64
654 %102 = sext i8 %101 to i64
655 %103 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
656 %104 = cmpxchg i64* %103, i64 %100, i64 %102 monotonic
657 %105 = icmp eq i64 %104, %100
685 %10 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
686 %11 = atomicrmw xchg i64* %10, i64 1 monotonic
687 store i64 %11, i64* @sl, align 8
688 %12 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
689 %13 = atomicrmw xchg i64* %12, i64 1 monotonic
690 store i64 %13, i64* @ul, align 8
702 %18 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
703 store volatile i64 0, i64* %18, align 8
704 %19 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
705 store volatile i64 0, i64* %19, align 8
706 %20 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
707 store volatile i64 0, i64* %20, align 8
708 %21 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
709 store volatile i64 0, i64* %21, align 8