1 ; RUN: llc < %s -march=x86-64 | FileCheck %s 2 3 ; rdar://7103704 4 5 define void @sub1(i32* nocapture %p, i32 %v) nounwind ssp { 6 entry: 7 ; CHECK: sub1: 8 ; CHECK: subl 9 %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0] 10 ret void 11 } 12 13 define void @inc4(i64* nocapture %p) nounwind ssp { 14 entry: 15 ; CHECK: inc4: 16 ; CHECK: incq 17 %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0] 18 ret void 19 } 20 21 declare i64 @llvm.atomic.load.add.i64.p0i64(i64* nocapture, i64) nounwind 22 23 define void @add8(i64* nocapture %p) nounwind ssp { 24 entry: 25 ; CHECK: add8: 26 ; CHECK: addq $2 27 %0 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0] 28 ret void 29 } 30 31 define void @add4(i64* nocapture %p, i32 %v) nounwind ssp { 32 entry: 33 ; CHECK: add4: 34 ; CHECK: addq 35 %0 = sext i32 %v to i64 ; <i64> [#uses=1] 36 %1 = tail call i64 @llvm.atomic.load.add.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0] 37 ret void 38 } 39 40 define void @inc3(i8* nocapture %p) nounwind ssp { 41 entry: 42 ; CHECK: inc3: 43 ; CHECK: incb 44 %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0] 45 ret void 46 } 47 48 declare i8 @llvm.atomic.load.add.i8.p0i8(i8* nocapture, i8) nounwind 49 50 define void @add7(i8* nocapture %p) nounwind ssp { 51 entry: 52 ; CHECK: add7: 53 ; CHECK: addb $2 54 %0 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0] 55 ret void 56 } 57 58 define void @add3(i8* nocapture %p, i32 %v) nounwind ssp { 59 entry: 60 ; CHECK: add3: 61 ; CHECK: addb 62 %0 = trunc i32 %v to i8 ; <i8> [#uses=1] 63 %1 = tail call i8 @llvm.atomic.load.add.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0] 64 ret void 65 } 66 67 define void @inc2(i16* nocapture %p) nounwind ssp { 68 entry: 69 ; CHECK: inc2: 70 ; CHECK: incw 71 %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0] 72 ret void 73 } 74 75 declare i16 @llvm.atomic.load.add.i16.p0i16(i16* nocapture, i16) nounwind 76 77 define void @add6(i16* nocapture %p) nounwind ssp { 78 entry: 79 ; CHECK: add6: 80 ; CHECK: addw $2 81 %0 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0] 82 ret void 83 } 84 85 define void @add2(i16* nocapture %p, i32 %v) nounwind ssp { 86 entry: 87 ; CHECK: add2: 88 ; CHECK: addw 89 %0 = trunc i32 %v to i16 ; <i16> [#uses=1] 90 %1 = tail call i16 @llvm.atomic.load.add.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0] 91 ret void 92 } 93 94 define void @inc1(i32* nocapture %p) nounwind ssp { 95 entry: 96 ; CHECK: inc1: 97 ; CHECK: incl 98 %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0] 99 ret void 100 } 101 102 declare i32 @llvm.atomic.load.add.i32.p0i32(i32* nocapture, i32) nounwind 103 104 define void @add5(i32* nocapture %p) nounwind ssp { 105 entry: 106 ; CHECK: add5: 107 ; CHECK: addl $2 108 %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0] 109 ret void 110 } 111 112 define void @add1(i32* nocapture %p, i32 %v) nounwind ssp { 113 entry: 114 ; CHECK: add1: 115 ; CHECK: addl 116 %0 = tail call i32 @llvm.atomic.load.add.i32.p0i32(i32* %p, i32 %v) ; <i32> [#uses=0] 117 ret void 118 } 119 120 define void @dec4(i64* nocapture %p) nounwind ssp { 121 entry: 122 ; CHECK: dec4: 123 ; CHECK: decq 124 %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 1) ; <i64> [#uses=0] 125 ret void 126 } 127 128 declare i64 @llvm.atomic.load.sub.i64.p0i64(i64* nocapture, i64) nounwind 129 130 define void @sub8(i64* nocapture %p) nounwind ssp { 131 entry: 132 ; CHECK: sub8: 133 ; CHECK: subq $2 134 %0 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 2) ; <i64> [#uses=0] 135 ret void 136 } 137 138 define void @sub4(i64* nocapture %p, i32 %v) nounwind ssp { 139 entry: 140 ; CHECK: sub4: 141 ; CHECK: subq 142 %0 = sext i32 %v to i64 ; <i64> [#uses=1] 143 %1 = tail call i64 @llvm.atomic.load.sub.i64.p0i64(i64* %p, i64 %0) ; <i64> [#uses=0] 144 ret void 145 } 146 147 define void @dec3(i8* nocapture %p) nounwind ssp { 148 entry: 149 ; CHECK: dec3: 150 ; CHECK: decb 151 %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 1) ; <i8> [#uses=0] 152 ret void 153 } 154 155 declare i8 @llvm.atomic.load.sub.i8.p0i8(i8* nocapture, i8) nounwind 156 157 define void @sub7(i8* nocapture %p) nounwind ssp { 158 entry: 159 ; CHECK: sub7: 160 ; CHECK: subb $2 161 %0 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 2) ; <i8> [#uses=0] 162 ret void 163 } 164 165 define void @sub3(i8* nocapture %p, i32 %v) nounwind ssp { 166 entry: 167 ; CHECK: sub3: 168 ; CHECK: subb 169 %0 = trunc i32 %v to i8 ; <i8> [#uses=1] 170 %1 = tail call i8 @llvm.atomic.load.sub.i8.p0i8(i8* %p, i8 %0) ; <i8> [#uses=0] 171 ret void 172 } 173 174 define void @dec2(i16* nocapture %p) nounwind ssp { 175 entry: 176 ; CHECK: dec2: 177 ; CHECK: decw 178 %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 1) ; <i16> [#uses=0] 179 ret void 180 } 181 182 declare i16 @llvm.atomic.load.sub.i16.p0i16(i16* nocapture, i16) nounwind 183 184 define void @sub6(i16* nocapture %p) nounwind ssp { 185 entry: 186 ; CHECK: sub6: 187 ; CHECK: subw $2 188 %0 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 2) ; <i16> [#uses=0] 189 ret void 190 } 191 192 define void @sub2(i16* nocapture %p, i32 %v) nounwind ssp { 193 entry: 194 ; CHECK: sub2: 195 ; CHECK: negl 196 %0 = trunc i32 %v to i16 ; <i16> [#uses=1] 197 %1 = tail call i16 @llvm.atomic.load.sub.i16.p0i16(i16* %p, i16 %0) ; <i16> [#uses=0] 198 ret void 199 } 200 201 define void @dec1(i32* nocapture %p) nounwind ssp { 202 entry: 203 ; CHECK: dec1: 204 ; CHECK: decl 205 %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 1) ; <i32> [#uses=0] 206 ret void 207 } 208 209 declare i32 @llvm.atomic.load.sub.i32.p0i32(i32* nocapture, i32) nounwind 210 211 define void @sub5(i32* nocapture %p) nounwind ssp { 212 entry: 213 ; CHECK: sub5: 214 ; CHECK: subl $2 215 %0 = tail call i32 @llvm.atomic.load.sub.i32.p0i32(i32* %p, i32 2) ; <i32> [#uses=0] 216 ret void 217 } 218