1 ; RUN: llc < %s -mattr=+atomics -asm-verbose=false -disable-wasm-fallthrough-return-opt -disable-wasm-explicit-locals | FileCheck %s 2 3 ; Test loads and stores with custom alignment values. 4 5 target datalayout = "e-m:e-p:32:32-i64:64-n32:64-S128" 6 target triple = "wasm32-unknown-unknown" 7 8 ;===---------------------------------------------------------------------------- 9 ; Loads 10 ;===---------------------------------------------------------------------------- 11 12 ; CHECK-LABEL: ldi64_a1: 13 ; CHECK-NEXT: .param i32{{$}} 14 ; CHECK-NEXT: .result i64{{$}} 15 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}} 16 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 17 define i64 @ldi64_a1(i64 *%p) { 18 %v = load i64, i64* %p, align 1 19 ret i64 %v 20 } 21 22 ; CHECK-LABEL: ldi64_a2: 23 ; CHECK-NEXT: .param i32{{$}} 24 ; CHECK-NEXT: .result i64{{$}} 25 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}} 26 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 27 define i64 @ldi64_a2(i64 *%p) { 28 %v = load i64, i64* %p, align 2 29 ret i64 %v 30 } 31 32 ; CHECK-LABEL: ldi64_a4: 33 ; CHECK-NEXT: .param i32{{$}} 34 ; CHECK-NEXT: .result i64{{$}} 35 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0):p2align=2{{$}} 36 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 37 define i64 @ldi64_a4(i64 *%p) { 38 %v = load i64, i64* %p, align 4 39 ret i64 %v 40 } 41 42 ; 8 is the default alignment for i64 so no attribute is needed. 43 44 ; CHECK-LABEL: ldi64_a8: 45 ; CHECK-NEXT: .param i32{{$}} 46 ; CHECK-NEXT: .result i64{{$}} 47 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}} 48 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 49 define i64 @ldi64_a8(i64 *%p) { 50 %v = load i64, i64* %p, align 8 51 ret i64 %v 52 } 53 54 ; The default alignment in LLVM is the same as the defualt alignment in wasm. 55 56 ; CHECK-LABEL: ldi64: 57 ; CHECK-NEXT: .param i32{{$}} 58 ; CHECK-NEXT: .result i64{{$}} 59 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}} 60 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 61 define i64 @ldi64(i64 *%p) { 62 %v = load i64, i64* %p 63 ret i64 %v 64 } 65 66 ; 16 is greater than the default alignment so it is ignored. 67 68 ; CHECK-LABEL: ldi64_a16: 69 ; CHECK-NEXT: .param i32{{$}} 70 ; CHECK-NEXT: .result i64{{$}} 71 ; CHECK-NEXT: i64.load $push[[NUM:[0-9]+]]=, 0($0){{$}} 72 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 73 define i64 @ldi64_a16(i64 *%p) { 74 %v = load i64, i64* %p, align 16 75 ret i64 %v 76 } 77 78 ;===---------------------------------------------------------------------------- 79 ; Extending loads 80 ;===---------------------------------------------------------------------------- 81 82 ; CHECK-LABEL: ldi8_a1: 83 ; CHECK-NEXT: .param i32{{$}} 84 ; CHECK-NEXT: .result i64{{$}} 85 ; CHECK-NEXT: i64.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}} 86 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 87 define i64 @ldi8_a1(i8 *%p) { 88 %v = load i8, i8* %p, align 1 89 %w = zext i8 %v to i64 90 ret i64 %w 91 } 92 93 ; CHECK-LABEL: ldi8_a2: 94 ; CHECK-NEXT: .param i32{{$}} 95 ; CHECK-NEXT: .result i64{{$}} 96 ; CHECK-NEXT: i64.load8_u $push[[NUM:[0-9]+]]=, 0($0){{$}} 97 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 98 define i64 @ldi8_a2(i8 *%p) { 99 %v = load i8, i8* %p, align 2 100 %w = zext i8 %v to i64 101 ret i64 %w 102 } 103 104 ; CHECK-LABEL: ldi16_a1: 105 ; CHECK-NEXT: .param i32{{$}} 106 ; CHECK-NEXT: .result i64{{$}} 107 ; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}} 108 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 109 define i64 @ldi16_a1(i16 *%p) { 110 %v = load i16, i16* %p, align 1 111 %w = zext i16 %v to i64 112 ret i64 %w 113 } 114 115 ; CHECK-LABEL: ldi16_a2: 116 ; CHECK-NEXT: .param i32{{$}} 117 ; CHECK-NEXT: .result i64{{$}} 118 ; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}} 119 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 120 define i64 @ldi16_a2(i16 *%p) { 121 %v = load i16, i16* %p, align 2 122 %w = zext i16 %v to i64 123 ret i64 %w 124 } 125 126 ; CHECK-LABEL: ldi16_a4: 127 ; CHECK-NEXT: .param i32{{$}} 128 ; CHECK-NEXT: .result i64{{$}} 129 ; CHECK-NEXT: i64.load16_u $push[[NUM:[0-9]+]]=, 0($0){{$}} 130 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 131 define i64 @ldi16_a4(i16 *%p) { 132 %v = load i16, i16* %p, align 4 133 %w = zext i16 %v to i64 134 ret i64 %w 135 } 136 137 ; CHECK-LABEL: ldi32_a1: 138 ; CHECK-NEXT: .param i32{{$}} 139 ; CHECK-NEXT: .result i64{{$}} 140 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0):p2align=0{{$}} 141 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 142 define i64 @ldi32_a1(i32 *%p) { 143 %v = load i32, i32* %p, align 1 144 %w = zext i32 %v to i64 145 ret i64 %w 146 } 147 148 ; CHECK-LABEL: ldi32_a2: 149 ; CHECK-NEXT: .param i32{{$}} 150 ; CHECK-NEXT: .result i64{{$}} 151 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0):p2align=1{{$}} 152 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 153 define i64 @ldi32_a2(i32 *%p) { 154 %v = load i32, i32* %p, align 2 155 %w = zext i32 %v to i64 156 ret i64 %w 157 } 158 159 ; CHECK-LABEL: ldi32_a4: 160 ; CHECK-NEXT: .param i32{{$}} 161 ; CHECK-NEXT: .result i64{{$}} 162 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0){{$}} 163 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 164 define i64 @ldi32_a4(i32 *%p) { 165 %v = load i32, i32* %p, align 4 166 %w = zext i32 %v to i64 167 ret i64 %w 168 } 169 170 ; CHECK-LABEL: ldi32_a8: 171 ; CHECK-NEXT: .param i32{{$}} 172 ; CHECK-NEXT: .result i64{{$}} 173 ; CHECK-NEXT: i64.load32_u $push[[NUM:[0-9]+]]=, 0($0){{$}} 174 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 175 define i64 @ldi32_a8(i32 *%p) { 176 %v = load i32, i32* %p, align 8 177 %w = zext i32 %v to i64 178 ret i64 %w 179 } 180 181 ;===---------------------------------------------------------------------------- 182 ; Stores 183 ;===---------------------------------------------------------------------------- 184 185 ; CHECK-LABEL: sti64_a1: 186 ; CHECK-NEXT: .param i32, i64{{$}} 187 ; CHECK-NEXT: i64.store 0($0):p2align=0, $1{{$}} 188 ; CHECK-NEXT: return{{$}} 189 define void @sti64_a1(i64 *%p, i64 %v) { 190 store i64 %v, i64* %p, align 1 191 ret void 192 } 193 194 ; CHECK-LABEL: sti64_a2: 195 ; CHECK-NEXT: .param i32, i64{{$}} 196 ; CHECK-NEXT: i64.store 0($0):p2align=1, $1{{$}} 197 ; CHECK-NEXT: return{{$}} 198 define void @sti64_a2(i64 *%p, i64 %v) { 199 store i64 %v, i64* %p, align 2 200 ret void 201 } 202 203 ; CHECK-LABEL: sti64_a4: 204 ; CHECK-NEXT: .param i32, i64{{$}} 205 ; CHECK-NEXT: i64.store 0($0):p2align=2, $1{{$}} 206 ; CHECK-NEXT: return{{$}} 207 define void @sti64_a4(i64 *%p, i64 %v) { 208 store i64 %v, i64* %p, align 4 209 ret void 210 } 211 212 ; 8 is the default alignment for i32 so no attribute is needed. 213 214 ; CHECK-LABEL: sti64_a8: 215 ; CHECK-NEXT: .param i32, i64{{$}} 216 ; CHECK-NEXT: i64.store 0($0), $1{{$}} 217 ; CHECK-NEXT: return{{$}} 218 define void @sti64_a8(i64 *%p, i64 %v) { 219 store i64 %v, i64* %p, align 8 220 ret void 221 } 222 223 ; The default alignment in LLVM is the same as the defualt alignment in wasm. 224 225 ; CHECK-LABEL: sti64: 226 ; CHECK-NEXT: .param i32, i64{{$}} 227 ; CHECK-NEXT: i64.store 0($0), $1{{$}} 228 ; CHECK-NEXT: return{{$}} 229 define void @sti64(i64 *%p, i64 %v) { 230 store i64 %v, i64* %p 231 ret void 232 } 233 234 ; CHECK-LABEL: sti64_a16: 235 ; CHECK-NEXT: .param i32, i64{{$}} 236 ; CHECK-NEXT: i64.store 0($0), $1{{$}} 237 ; CHECK-NEXT: return{{$}} 238 define void @sti64_a16(i64 *%p, i64 %v) { 239 store i64 %v, i64* %p, align 16 240 ret void 241 } 242 243 ;===---------------------------------------------------------------------------- 244 ; Truncating stores 245 ;===---------------------------------------------------------------------------- 246 247 ; CHECK-LABEL: sti8_a1: 248 ; CHECK-NEXT: .param i32, i64{{$}} 249 ; CHECK-NEXT: i64.store8 0($0), $1{{$}} 250 ; CHECK-NEXT: return{{$}} 251 define void @sti8_a1(i8 *%p, i64 %w) { 252 %v = trunc i64 %w to i8 253 store i8 %v, i8* %p, align 1 254 ret void 255 } 256 257 ; CHECK-LABEL: sti8_a2: 258 ; CHECK-NEXT: .param i32, i64{{$}} 259 ; CHECK-NEXT: i64.store8 0($0), $1{{$}} 260 ; CHECK-NEXT: return{{$}} 261 define void @sti8_a2(i8 *%p, i64 %w) { 262 %v = trunc i64 %w to i8 263 store i8 %v, i8* %p, align 2 264 ret void 265 } 266 267 ; CHECK-LABEL: sti16_a1: 268 ; CHECK-NEXT: .param i32, i64{{$}} 269 ; CHECK-NEXT: i64.store16 0($0):p2align=0, $1{{$}} 270 ; CHECK-NEXT: return{{$}} 271 define void @sti16_a1(i16 *%p, i64 %w) { 272 %v = trunc i64 %w to i16 273 store i16 %v, i16* %p, align 1 274 ret void 275 } 276 277 ; CHECK-LABEL: sti16_a2: 278 ; CHECK-NEXT: .param i32, i64{{$}} 279 ; CHECK-NEXT: i64.store16 0($0), $1{{$}} 280 ; CHECK-NEXT: return{{$}} 281 define void @sti16_a2(i16 *%p, i64 %w) { 282 %v = trunc i64 %w to i16 283 store i16 %v, i16* %p, align 2 284 ret void 285 } 286 287 ; CHECK-LABEL: sti16_a4: 288 ; CHECK-NEXT: .param i32, i64{{$}} 289 ; CHECK-NEXT: i64.store16 0($0), $1{{$}} 290 ; CHECK-NEXT: return{{$}} 291 define void @sti16_a4(i16 *%p, i64 %w) { 292 %v = trunc i64 %w to i16 293 store i16 %v, i16* %p, align 4 294 ret void 295 } 296 297 ; CHECK-LABEL: sti32_a1: 298 ; CHECK-NEXT: .param i32, i64{{$}} 299 ; CHECK-NEXT: i64.store32 0($0):p2align=0, $1{{$}} 300 ; CHECK-NEXT: return{{$}} 301 define void @sti32_a1(i32 *%p, i64 %w) { 302 %v = trunc i64 %w to i32 303 store i32 %v, i32* %p, align 1 304 ret void 305 } 306 307 ; CHECK-LABEL: sti32_a2: 308 ; CHECK-NEXT: .param i32, i64{{$}} 309 ; CHECK-NEXT: i64.store32 0($0):p2align=1, $1{{$}} 310 ; CHECK-NEXT: return{{$}} 311 define void @sti32_a2(i32 *%p, i64 %w) { 312 %v = trunc i64 %w to i32 313 store i32 %v, i32* %p, align 2 314 ret void 315 } 316 317 ; CHECK-LABEL: sti32_a4: 318 ; CHECK-NEXT: .param i32, i64{{$}} 319 ; CHECK-NEXT: i64.store32 0($0), $1{{$}} 320 ; CHECK-NEXT: return{{$}} 321 define void @sti32_a4(i32 *%p, i64 %w) { 322 %v = trunc i64 %w to i32 323 store i32 %v, i32* %p, align 4 324 ret void 325 } 326 327 ; CHECK-LABEL: sti32_a8: 328 ; CHECK-NEXT: .param i32, i64{{$}} 329 ; CHECK-NEXT: i64.store32 0($0), $1{{$}} 330 ; CHECK-NEXT: return{{$}} 331 define void @sti32_a8(i32 *%p, i64 %w) { 332 %v = trunc i64 %w to i32 333 store i32 %v, i32* %p, align 8 334 ret void 335 } 336 337 ;===---------------------------------------------------------------------------- 338 ; Atomic loads 339 ;===---------------------------------------------------------------------------- 340 341 ; Wasm atomics have the alignment field, but it must always have the type's 342 ; natural alignment. 343 344 ; CHECK-LABEL: ldi64_atomic_a8: 345 ; CHECK-NEXT: .param i32{{$}} 346 ; CHECK-NEXT: .result i64{{$}} 347 ; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}} 348 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 349 define i64 @ldi64_atomic_a8(i64 *%p) { 350 %v = load atomic i64, i64* %p seq_cst, align 8 351 ret i64 %v 352 } 353 354 ; 16 is greater than the default alignment so it is ignored. 355 356 ; CHECK-LABEL: ldi64_atomic_a16: 357 ; CHECK-NEXT: .param i32{{$}} 358 ; CHECK-NEXT: .result i64{{$}} 359 ; CHECK-NEXT: i64.atomic.load $push[[NUM:[0-9]+]]=, 0($0){{$}} 360 ; CHECK-NEXT: return $pop[[NUM]]{{$}} 361 define i64 @ldi64_atomic_a16(i64 *%p) { 362 %v = load atomic i64, i64* %p seq_cst, align 16 363 ret i64 %v 364 } 365 366 ;===---------------------------------------------------------------------------- 367 ; Atomic stores 368 ;===---------------------------------------------------------------------------- 369 370 ; CHECK-LABEL: sti64_atomic_a4: 371 ; CHECK-NEXT: .param i32, i64{{$}} 372 ; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}} 373 ; CHECK-NEXT: return{{$}} 374 define void @sti64_atomic_a4(i64 *%p, i64 %v) { 375 store atomic i64 %v, i64* %p seq_cst, align 8 376 ret void 377 } 378 379 ; 16 is greater than the default alignment so it is ignored. 380 381 ; CHECK-LABEL: sti64_atomic_a8: 382 ; CHECK-NEXT: .param i32, i64{{$}} 383 ; CHECK-NEXT: i64.atomic.store 0($0), $1{{$}} 384 ; CHECK-NEXT: return{{$}} 385 define void @sti64_atomic_a8(i64 *%p, i64 %v) { 386 store atomic i64 %v, i64* %p seq_cst, align 16 387 ret void 388 } 389