1 ; RUN: llc < %s -O0 -march=x86-64 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X64 2 ; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32 3 4 @sc8 = external global i8 5 6 define void @atomic_fetch_add8() nounwind { 7 ; X64: atomic_fetch_add8 8 ; X32: atomic_fetch_add8 9 entry: 10 ; 32-bit 11 %t1 = atomicrmw add i8* @sc8, i8 1 acquire 12 ; X64: lock 13 ; X64: incb 14 ; X32: lock 15 ; X32: incb 16 %t2 = atomicrmw add i8* @sc8, i8 3 acquire 17 ; X64: lock 18 ; X64: addb $3 19 ; X32: lock 20 ; X32: addb $3 21 %t3 = atomicrmw add i8* @sc8, i8 5 acquire 22 ; X64: lock 23 ; X64: xaddb 24 ; X32: lock 25 ; X32: xaddb 26 %t4 = atomicrmw add i8* @sc8, i8 %t3 acquire 27 ; X64: lock 28 ; X64: addb 29 ; X32: lock 30 ; X32: addb 31 ret void 32 ; X64: ret 33 ; X32: ret 34 } 35 36 define void @atomic_fetch_sub8() nounwind { 37 ; X64: atomic_fetch_sub8 38 ; X32: atomic_fetch_sub8 39 %t1 = atomicrmw sub i8* @sc8, i8 1 acquire 40 ; X64: lock 41 ; X64: decb 42 ; X32: lock 43 ; X32: decb 44 %t2 = atomicrmw sub i8* @sc8, i8 3 acquire 45 ; X64: lock 46 ; X64: subb $3 47 ; X32: lock 48 ; X32: subb $3 49 %t3 = atomicrmw sub i8* @sc8, i8 5 acquire 50 ; X64: lock 51 ; X64: xaddb 52 ; X32: lock 53 ; X32: xaddb 54 %t4 = atomicrmw sub i8* @sc8, i8 %t3 acquire 55 ; X64: lock 56 ; X64: subb 57 ; X32: lock 58 ; X32: subb 59 ret void 60 ; X64: ret 61 ; X32: ret 62 } 63 64 define void @atomic_fetch_and8() nounwind { 65 ; X64: atomic_fetch_and8 66 ; X32: atomic_fetch_and8 67 %t1 = atomicrmw and i8* @sc8, i8 3 acquire 68 ; X64: lock 69 ; X64: andb $3 70 ; X32: lock 71 ; X32: andb $3 72 %t2 = atomicrmw and i8* @sc8, i8 5 acquire 73 ; X64: andb 74 ; X64: lock 75 ; X64: cmpxchgb 76 ; X32: andb 77 ; X32: lock 78 ; X32: cmpxchgb 79 %t3 = atomicrmw and i8* @sc8, i8 %t2 acquire 80 ; X64: lock 81 ; X64: andb 82 ; X32: lock 83 ; X32: andb 84 ret void 85 ; X64: ret 86 ; X32: ret 87 } 88 89 define void @atomic_fetch_or8() nounwind { 90 ; X64: atomic_fetch_or8 91 ; X32: atomic_fetch_or8 92 %t1 = atomicrmw or i8* @sc8, i8 3 acquire 93 ; X64: lock 94 ; X64: orb $3 95 ; X32: lock 96 ; X32: orb $3 97 %t2 = atomicrmw or i8* @sc8, i8 5 acquire 98 ; X64: orb 99 ; X64: lock 100 ; X64: cmpxchgb 101 ; X32: orb 102 ; X32: lock 103 ; X32: cmpxchgb 104 %t3 = atomicrmw or i8* @sc8, i8 %t2 acquire 105 ; X64: lock 106 ; X64: orb 107 ; X32: lock 108 ; X32: orb 109 ret void 110 ; X64: ret 111 ; X32: ret 112 } 113 114 define void @atomic_fetch_xor8() nounwind { 115 ; X64: atomic_fetch_xor8 116 ; X32: atomic_fetch_xor8 117 %t1 = atomicrmw xor i8* @sc8, i8 3 acquire 118 ; X64: lock 119 ; X64: xorb $3 120 ; X32: lock 121 ; X32: xorb $3 122 %t2 = atomicrmw xor i8* @sc8, i8 5 acquire 123 ; X64: xorb 124 ; X64: lock 125 ; X64: cmpxchgb 126 ; X32: xorb 127 ; X32: lock 128 ; X32: cmpxchgb 129 %t3 = atomicrmw xor i8* @sc8, i8 %t2 acquire 130 ; X64: lock 131 ; X64: xorb 132 ; X32: lock 133 ; X32: xorb 134 ret void 135 ; X64: ret 136 ; X32: ret 137 } 138 139 define void @atomic_fetch_nand8(i8 %x) nounwind { 140 ; X64: atomic_fetch_nand8 141 ; X32: atomic_fetch_nand8 142 %t1 = atomicrmw nand i8* @sc8, i8 %x acquire 143 ; X64: andb 144 ; X64: notb 145 ; X64: lock 146 ; X64: cmpxchgb 147 ; X32: andb 148 ; X32: notb 149 ; X32: lock 150 ; X32: cmpxchgb 151 ret void 152 ; X64: ret 153 ; X32: ret 154 } 155 156 define void @atomic_fetch_max8(i8 %x) nounwind { 157 %t1 = atomicrmw max i8* @sc8, i8 %x acquire 158 ; X64: cmpb 159 ; X64: cmov 160 ; X64: lock 161 ; X64: cmpxchgb 162 163 ; X32: cmpb 164 ; X32: cmov 165 ; X32: lock 166 ; X32: cmpxchgb 167 ret void 168 ; X64: ret 169 ; X32: ret 170 } 171 172 define void @atomic_fetch_min8(i8 %x) nounwind { 173 %t1 = atomicrmw min i8* @sc8, i8 %x acquire 174 ; X64: cmpb 175 ; X64: cmov 176 ; X64: lock 177 ; X64: cmpxchgb 178 179 ; X32: cmpb 180 ; X32: cmov 181 ; X32: lock 182 ; X32: cmpxchgb 183 ret void 184 ; X64: ret 185 ; X32: ret 186 } 187 188 define void @atomic_fetch_umax8(i8 %x) nounwind { 189 %t1 = atomicrmw umax i8* @sc8, i8 %x acquire 190 ; X64: cmpb 191 ; X64: cmov 192 ; X64: lock 193 ; X64: cmpxchgb 194 195 ; X32: cmpb 196 ; X32: cmov 197 ; X32: lock 198 ; X32: cmpxchgb 199 ret void 200 ; X64: ret 201 ; X32: ret 202 } 203 204 define void @atomic_fetch_umin8(i8 %x) nounwind { 205 %t1 = atomicrmw umin i8* @sc8, i8 %x acquire 206 ; X64: cmpb 207 ; X64: cmov 208 ; X64: lock 209 ; X64: cmpxchgb 210 ; X32: cmpb 211 ; X32: cmov 212 ; X32: lock 213 ; X32: cmpxchgb 214 ret void 215 ; X64: ret 216 ; X32: ret 217 } 218 219 define void @atomic_fetch_cmpxchg8() nounwind { 220 %t1 = cmpxchg i8* @sc8, i8 0, i8 1 acquire 221 ; X64: lock 222 ; X64: cmpxchgb 223 ; X32: lock 224 ; X32: cmpxchgb 225 ret void 226 ; X64: ret 227 ; X32: ret 228 } 229 230 define void @atomic_fetch_store8(i8 %x) nounwind { 231 store atomic i8 %x, i8* @sc8 release, align 4 232 ; X64-NOT: lock 233 ; X64: movb 234 ; X32-NOT: lock 235 ; X32: movb 236 ret void 237 ; X64: ret 238 ; X32: ret 239 } 240 241 define void @atomic_fetch_swap8(i8 %x) nounwind { 242 %t1 = atomicrmw xchg i8* @sc8, i8 %x acquire 243 ; X64-NOT: lock 244 ; X64: xchgb 245 ; X32-NOT: lock 246 ; X32: xchgb 247 ret void 248 ; X64: ret 249 ; X32: ret 250 } 251