1 ; RUN: llc < %s -mtriple=powerpc-apple-darwin -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=PPC32 2 ; FIXME: -verify-machineinstrs currently fail on ppc64 (mismatched register/instruction). 3 ; This is already checked for in Atomics-64.ll 4 ; RUN: llc < %s -mtriple=powerpc64-apple-darwin | FileCheck %s --check-prefix=CHECK --check-prefix=PPC64 5 6 ; In this file, we check that atomic load/store can make use of the indexed 7 ; versions of the instructions. 8 9 ; Indexed version of loads 10 define i8 @load_x_i8_seq_cst([100000 x i8]* %mem) { 11 ; CHECK-LABEL: load_x_i8_seq_cst 12 ; CHECK: sync 13 ; CHECK: lbzx [[VAL:r[0-9]+]] 14 ; CHECK-PPC32: lwsync 15 ; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]] 16 ; CHECK-PPC64: bne- [[CR]], .+4 17 ; CHECK-PPC64: isync 18 %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000 19 %val = load atomic i8, i8* %ptr seq_cst, align 1 20 ret i8 %val 21 } 22 define i16 @load_x_i16_acquire([100000 x i16]* %mem) { 23 ; CHECK-LABEL: load_x_i16_acquire 24 ; CHECK: lhzx [[VAL:r[0-9]+]] 25 ; CHECK-PPC32: lwsync 26 ; CHECK-PPC64: cmpw [[CR:cr[0-9]+]], [[VAL]], [[VAL]] 27 ; CHECK-PPC64: bne- [[CR]], .+4 28 ; CHECK-PPC64: isync 29 %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000 30 %val = load atomic i16, i16* %ptr acquire, align 2 31 ret i16 %val 32 } 33 define i32 @load_x_i32_monotonic([100000 x i32]* %mem) { 34 ; CHECK-LABEL: load_x_i32_monotonic 35 ; CHECK: lwzx 36 ; CHECK-NOT: sync 37 %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000 38 %val = load atomic i32, i32* %ptr monotonic, align 4 39 ret i32 %val 40 } 41 define i64 @load_x_i64_unordered([100000 x i64]* %mem) { 42 ; CHECK-LABEL: load_x_i64_unordered 43 ; PPC32: __sync_ 44 ; PPC64-NOT: __sync_ 45 ; PPC64: ldx 46 ; CHECK-NOT: sync 47 %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000 48 %val = load atomic i64, i64* %ptr unordered, align 8 49 ret i64 %val 50 } 51 52 ; Indexed version of stores 53 define void @store_x_i8_seq_cst([100000 x i8]* %mem) { 54 ; CHECK-LABEL: store_x_i8_seq_cst 55 ; CHECK: sync 56 ; CHECK: stbx 57 %ptr = getelementptr inbounds [100000 x i8], [100000 x i8]* %mem, i64 0, i64 90000 58 store atomic i8 42, i8* %ptr seq_cst, align 1 59 ret void 60 } 61 define void @store_x_i16_release([100000 x i16]* %mem) { 62 ; CHECK-LABEL: store_x_i16_release 63 ; CHECK: lwsync 64 ; CHECK: sthx 65 %ptr = getelementptr inbounds [100000 x i16], [100000 x i16]* %mem, i64 0, i64 90000 66 store atomic i16 42, i16* %ptr release, align 2 67 ret void 68 } 69 define void @store_x_i32_monotonic([100000 x i32]* %mem) { 70 ; CHECK-LABEL: store_x_i32_monotonic 71 ; CHECK-NOT: sync 72 ; CHECK: stwx 73 %ptr = getelementptr inbounds [100000 x i32], [100000 x i32]* %mem, i64 0, i64 90000 74 store atomic i32 42, i32* %ptr monotonic, align 4 75 ret void 76 } 77 define void @store_x_i64_unordered([100000 x i64]* %mem) { 78 ; CHECK-LABEL: store_x_i64_unordered 79 ; CHECK-NOT: sync 80 ; PPC32: __sync_ 81 ; PPC64-NOT: __sync_ 82 ; PPC64: stdx 83 %ptr = getelementptr inbounds [100000 x i64], [100000 x i64]* %mem, i64 0, i64 90000 84 store atomic i64 42, i64* %ptr unordered, align 8 85 ret void 86 } 87