1 ; RUN: llc -mtriple=aarch64-none-linux-gnu < %s | FileCheck %s 2 3 define i64 @test_inline_constraint_r(i64 %base, i32 %offset) { 4 ; CHECK: test_inline_constraint_r: 5 %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 %base, i32 %offset) 6 ; CHECK: add {{x[0-9]+}}, {{x[0-9]+}}, {{w[0-9]+}}, sxtw 7 ret i64 %val 8 } 9 10 define i16 @test_small_reg(i16 %lhs, i16 %rhs) { 11 ; CHECK: test_small_reg: 12 %val = call i16 asm sideeffect "add $0, $1, $2, sxth", "=r,r,r"(i16 %lhs, i16 %rhs) 13 ; CHECK: add {{w[0-9]+}}, {{w[0-9]+}}, {{w[0-9]+}}, sxth 14 ret i16 %val 15 } 16 17 define i64 @test_inline_constraint_r_imm(i64 %base, i32 %offset) { 18 ; CHECK: test_inline_constraint_r_imm: 19 %val = call i64 asm "add $0, $1, $2, sxtw", "=r,r,r"(i64 4, i32 12) 20 ; CHECK: movz [[FOUR:x[0-9]+]], #4 21 ; CHECK: movz [[TWELVE:w[0-9]+]], #12 22 ; CHECK: add {{x[0-9]+}}, [[FOUR]], [[TWELVE]], sxtw 23 ret i64 %val 24 } 25 26 ; m is permitted to have a base/offset form. We don't do that 27 ; currently though. 28 define i32 @test_inline_constraint_m(i32 *%ptr) { 29 ; CHECK: test_inline_constraint_m: 30 %val = call i32 asm "ldr $0, $1", "=r,m"(i32 *%ptr) 31 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] 32 ret i32 %val 33 } 34 35 @arr = global [8 x i32] zeroinitializer 36 37 ; Q should *never* have base/offset form even if given the chance. 38 define i32 @test_inline_constraint_Q(i32 *%ptr) { 39 ; CHECK: test_inline_constraint_Q: 40 %val = call i32 asm "ldr $0, $1", "=r,Q"(i32* getelementptr([8 x i32]* @arr, i32 0, i32 1)) 41 ; CHECK: ldr {{w[0-9]+}}, [{{x[0-9]+}}] 42 ret i32 %val 43 } 44 45 @dump = global fp128 zeroinitializer 46 47 define void @test_inline_constraint_I() { 48 ; CHECK: test_inline_constraint_I: 49 call void asm sideeffect "add x0, x0, $0", "I"(i32 0) 50 call void asm sideeffect "add x0, x0, $0", "I"(i64 4095) 51 ; CHECK: add x0, x0, #0 52 ; CHECK: add x0, x0, #4095 53 54 ret void 55 } 56 57 ; Skip J because it's useless 58 59 define void @test_inline_constraint_K() { 60 ; CHECK: test_inline_constraint_K: 61 call void asm sideeffect "and w0, w0, $0", "K"(i32 2863311530) ; = 0xaaaaaaaa 62 call void asm sideeffect "and w0, w0, $0", "K"(i32 65535) 63 ; CHECK: and w0, w0, #-1431655766 64 ; CHECK: and w0, w0, #65535 65 66 ret void 67 } 68 69 define void @test_inline_constraint_L() { 70 ; CHECK: test_inline_constraint_L: 71 call void asm sideeffect "and x0, x0, $0", "L"(i64 4294967296) ; = 0xaaaaaaaa 72 call void asm sideeffect "and x0, x0, $0", "L"(i64 65535) 73 ; CHECK: and x0, x0, #4294967296 74 ; CHECK: and x0, x0, #65535 75 76 ret void 77 } 78 79 ; Skip M and N because we don't support MOV pseudo-instructions yet. 80 81 @var = global i32 0 82 83 define void @test_inline_constraint_S() { 84 ; CHECK: test_inline_constraint_S: 85 call void asm sideeffect "adrp x0, $0", "S"(i32* @var) 86 call void asm sideeffect "adrp x0, ${0:A}", "S"(i32* @var) 87 call void asm sideeffect "add x0, x0, ${0:L}", "S"(i32* @var) 88 ; CHECK: adrp x0, var 89 ; CHECK: adrp x0, var 90 ; CHECK: add x0, x0, #:lo12:var 91 ret void 92 } 93 94 define i32 @test_inline_constraint_S_label(i1 %in) { 95 ; CHECK: test_inline_constraint_S_label: 96 call void asm sideeffect "adr x0, $0", "S"(i8* blockaddress(@test_inline_constraint_S_label, %loc)) 97 ; CHECK: adr x0, .Ltmp{{[0-9]+}} 98 br i1 %in, label %loc, label %loc2 99 loc: 100 ret i32 0 101 loc2: 102 ret i32 42 103 } 104 105 define void @test_inline_constraint_Y() { 106 ; CHECK: test_inline_constraint_Y: 107 call void asm sideeffect "fcmp s0, $0", "Y"(float 0.0) 108 ; CHECK: fcmp s0, #0.0 109 ret void 110 } 111 112 define void @test_inline_constraint_Z() { 113 ; CHECK: test_inline_constraint_Z: 114 call void asm sideeffect "cmp w0, $0", "Z"(i32 0) 115 ; CHECK: cmp w0, #0 116 ret void 117 }