1 ; RUN: llc -mtriple=aarch64-none-linux-gnu -verify-machineinstrs < %s | FileCheck %s 2 3 @lhs = global fp128 zeroinitializer 4 @rhs = global fp128 zeroinitializer 5 6 define fp128 @test_add() { 7 ; CHECK-LABEL: test_add: 8 9 %lhs = load fp128* @lhs 10 %rhs = load fp128* @rhs 11 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs] 12 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs] 13 14 %val = fadd fp128 %lhs, %rhs 15 ; CHECK: bl __addtf3 16 ret fp128 %val 17 } 18 19 define fp128 @test_sub() { 20 ; CHECK-LABEL: test_sub: 21 22 %lhs = load fp128* @lhs 23 %rhs = load fp128* @rhs 24 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs] 25 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs] 26 27 %val = fsub fp128 %lhs, %rhs 28 ; CHECK: bl __subtf3 29 ret fp128 %val 30 } 31 32 define fp128 @test_mul() { 33 ; CHECK-LABEL: test_mul: 34 35 %lhs = load fp128* @lhs 36 %rhs = load fp128* @rhs 37 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs] 38 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs] 39 40 %val = fmul fp128 %lhs, %rhs 41 ; CHECK: bl __multf3 42 ret fp128 %val 43 } 44 45 define fp128 @test_div() { 46 ; CHECK-LABEL: test_div: 47 48 %lhs = load fp128* @lhs 49 %rhs = load fp128* @rhs 50 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs] 51 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs] 52 53 %val = fdiv fp128 %lhs, %rhs 54 ; CHECK: bl __divtf3 55 ret fp128 %val 56 } 57 58 @var32 = global i32 0 59 @var64 = global i64 0 60 61 define void @test_fptosi() { 62 ; CHECK-LABEL: test_fptosi: 63 %val = load fp128* @lhs 64 65 %val32 = fptosi fp128 %val to i32 66 store i32 %val32, i32* @var32 67 ; CHECK: bl __fixtfsi 68 69 %val64 = fptosi fp128 %val to i64 70 store i64 %val64, i64* @var64 71 ; CHECK: bl __fixtfdi 72 73 ret void 74 } 75 76 define void @test_fptoui() { 77 ; CHECK-LABEL: test_fptoui: 78 %val = load fp128* @lhs 79 80 %val32 = fptoui fp128 %val to i32 81 store i32 %val32, i32* @var32 82 ; CHECK: bl __fixunstfsi 83 84 %val64 = fptoui fp128 %val to i64 85 store i64 %val64, i64* @var64 86 ; CHECK: bl __fixunstfdi 87 88 ret void 89 } 90 91 define void @test_sitofp() { 92 ; CHECK-LABEL: test_sitofp: 93 94 %src32 = load i32* @var32 95 %val32 = sitofp i32 %src32 to fp128 96 store volatile fp128 %val32, fp128* @lhs 97 ; CHECK: bl __floatsitf 98 99 %src64 = load i64* @var64 100 %val64 = sitofp i64 %src64 to fp128 101 store volatile fp128 %val64, fp128* @lhs 102 ; CHECK: bl __floatditf 103 104 ret void 105 } 106 107 define void @test_uitofp() { 108 ; CHECK-LABEL: test_uitofp: 109 110 %src32 = load i32* @var32 111 %val32 = uitofp i32 %src32 to fp128 112 store volatile fp128 %val32, fp128* @lhs 113 ; CHECK: bl __floatunsitf 114 115 %src64 = load i64* @var64 116 %val64 = uitofp i64 %src64 to fp128 117 store volatile fp128 %val64, fp128* @lhs 118 ; CHECK: bl __floatunditf 119 120 ret void 121 } 122 123 define i1 @test_setcc1() { 124 ; CHECK-LABEL: test_setcc1: 125 126 %lhs = load fp128* @lhs 127 %rhs = load fp128* @rhs 128 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs] 129 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs] 130 131 ; Technically, everything after the call to __letf2 is redundant, but we'll let 132 ; LLVM have its fun for now. 133 %val = fcmp ole fp128 %lhs, %rhs 134 ; CHECK: bl __letf2 135 ; CHECK: cmp w0, #0 136 ; CHECK: csinc w0, wzr, wzr, gt 137 138 ret i1 %val 139 ; CHECK: ret 140 } 141 142 define i1 @test_setcc2() { 143 ; CHECK-LABEL: test_setcc2: 144 145 %lhs = load fp128* @lhs 146 %rhs = load fp128* @rhs 147 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs] 148 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs] 149 150 ; Technically, everything after the call to __letf2 is redundant, but we'll let 151 ; LLVM have its fun for now. 152 %val = fcmp ugt fp128 %lhs, %rhs 153 ; CHECK: bl __unordtf2 154 ; CHECK: mov x[[UNORDERED:[0-9]+]], x0 155 156 ; CHECK: bl __gttf2 157 ; CHECK: cmp w0, #0 158 ; CHECK: csinc [[GT:w[0-9]+]], wzr, wzr, le 159 ; CHECK: cmp w[[UNORDERED]], #0 160 ; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq 161 ; CHECK: orr w0, [[UNORDERED]], [[GT]] 162 163 ret i1 %val 164 ; CHECK: ret 165 } 166 167 define i32 @test_br_cc() { 168 ; CHECK-LABEL: test_br_cc: 169 170 %lhs = load fp128* @lhs 171 %rhs = load fp128* @rhs 172 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:lhs] 173 ; CHECK: ldr q1, [{{x[0-9]+}}, #:lo12:rhs] 174 175 ; olt == !uge, which LLVM unfortunately "optimizes" this to. 176 %cond = fcmp olt fp128 %lhs, %rhs 177 ; CHECK: bl __unordtf2 178 ; CHECK: mov x[[UNORDERED:[0-9]+]], x0 179 180 ; CHECK: bl __getf2 181 ; CHECK: cmp w0, #0 182 183 ; CHECK: csinc [[OGE:w[0-9]+]], wzr, wzr, lt 184 ; CHECK: cmp w[[UNORDERED]], #0 185 ; CHECK: csinc [[UNORDERED:w[0-9]+]], wzr, wzr, eq 186 ; CHECK: orr [[UGE:w[0-9]+]], [[UNORDERED]], [[OGE]] 187 ; CHECK: cbnz [[UGE]], [[RET29:.LBB[0-9]+_[0-9]+]] 188 br i1 %cond, label %iftrue, label %iffalse 189 190 iftrue: 191 ret i32 42 192 ; CHECK-NEXT: BB# 193 ; CHECK-NEXT: movz x0, #42 194 ; CHECK-NEXT: b [[REALRET:.LBB[0-9]+_[0-9]+]] 195 196 iffalse: 197 ret i32 29 198 ; CHECK: [[RET29]]: 199 ; CHECK-NEXT: movz x0, #29 200 ; CHECK-NEXT: [[REALRET]]: 201 ; CHECK: ret 202 } 203 204 define void @test_select(i1 %cond, fp128 %lhs, fp128 %rhs) { 205 ; CHECK-LABEL: test_select: 206 207 %val = select i1 %cond, fp128 %lhs, fp128 %rhs 208 store fp128 %val, fp128* @lhs 209 ; CHECK: cmp w0, #0 210 ; CHECK: str q1, [sp] 211 ; CHECK-NEXT: b.eq [[IFFALSE:.LBB[0-9]+_[0-9]+]] 212 ; CHECK-NEXT: BB# 213 ; CHECK-NEXT: str q0, [sp] 214 ; CHECK-NEXT: [[IFFALSE]]: 215 ; CHECK-NEXT: ldr q0, [sp] 216 ; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs] 217 ret void 218 ; CHECK: ret 219 } 220 221 @varfloat = global float 0.0 222 @vardouble = global double 0.0 223 224 define void @test_round() { 225 ; CHECK-LABEL: test_round: 226 227 %val = load fp128* @lhs 228 229 %float = fptrunc fp128 %val to float 230 store float %float, float* @varfloat 231 ; CHECK: bl __trunctfsf2 232 ; CHECK: str s0, [{{x[0-9]+}}, #:lo12:varfloat] 233 234 %double = fptrunc fp128 %val to double 235 store double %double, double* @vardouble 236 ; CHECK: bl __trunctfdf2 237 ; CHECK: str d0, [{{x[0-9]+}}, #:lo12:vardouble] 238 239 ret void 240 } 241 242 define void @test_extend() { 243 ; CHECK-LABEL: test_extend: 244 245 %val = load fp128* @lhs 246 247 %float = load float* @varfloat 248 %fromfloat = fpext float %float to fp128 249 store volatile fp128 %fromfloat, fp128* @lhs 250 ; CHECK: bl __extendsftf2 251 ; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs] 252 253 %double = load double* @vardouble 254 %fromdouble = fpext double %double to fp128 255 store volatile fp128 %fromdouble, fp128* @lhs 256 ; CHECK: bl __extenddftf2 257 ; CHECK: str q0, [{{x[0-9]+}}, #:lo12:lhs] 258 259 ret void 260 ; CHECK: ret 261 } 262 263 define fp128 @test_neg(fp128 %in) { 264 ; CHECK: [[MINUS0:.LCPI[0-9]+_0]]: 265 ; Make sure the weird hex constant below *is* -0.0 266 ; CHECK-NEXT: fp128 -0 267 268 ; CHECK-LABEL: test_neg: 269 270 ; Could in principle be optimized to fneg which we can't select, this makes 271 ; sure that doesn't happen. 272 %ret = fsub fp128 0xL00000000000000008000000000000000, %in 273 ; CHECK: str q0, [sp, #-16] 274 ; CHECK-NEXT: ldr q1, [sp], #16 275 ; CHECK: ldr q0, [{{x[0-9]+}}, #:lo12:[[MINUS0]]] 276 ; CHECK: bl __subtf3 277 278 ret fp128 %ret 279 ; CHECK: ret 280 } 281