1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "arm_lir.h" 18 #include "codegen_arm.h" 19 #include "dex/quick/mir_to_lir-inl.h" 20 21 namespace art { 22 23 /* This file contains codegen for the Thumb ISA. */ 24 25 static int EncodeImmSingle(int value) { 26 int res; 27 int bit_a = (value & 0x80000000) >> 31; 28 int not_bit_b = (value & 0x40000000) >> 30; 29 int bit_b = (value & 0x20000000) >> 29; 30 int b_smear = (value & 0x3e000000) >> 25; 31 int slice = (value & 0x01f80000) >> 19; 32 int zeroes = (value & 0x0007ffff); 33 if (zeroes != 0) 34 return -1; 35 if (bit_b) { 36 if ((not_bit_b != 0) || (b_smear != 0x1f)) 37 return -1; 38 } else { 39 if ((not_bit_b != 1) || (b_smear != 0x0)) 40 return -1; 41 } 42 res = (bit_a << 7) | (bit_b << 6) | slice; 43 return res; 44 } 45 46 /* 47 * Determine whether value can be encoded as a Thumb2 floating point 48 * immediate. If not, return -1. If so return encoded 8-bit value. 49 */ 50 static int EncodeImmDouble(int64_t value) { 51 int res; 52 int bit_a = (value & 0x8000000000000000ll) >> 63; 53 int not_bit_b = (value & 0x4000000000000000ll) >> 62; 54 int bit_b = (value & 0x2000000000000000ll) >> 61; 55 int b_smear = (value & 0x3fc0000000000000ll) >> 54; 56 int slice = (value & 0x003f000000000000ll) >> 48; 57 uint64_t zeroes = (value & 0x0000ffffffffffffll); 58 if (zeroes != 0) 59 return -1; 60 if (bit_b) { 61 if ((not_bit_b != 0) || (b_smear != 0xff)) 62 return -1; 63 } else { 64 if ((not_bit_b != 1) || (b_smear != 0x0)) 65 return -1; 66 } 67 res = (bit_a << 7) | (bit_b << 6) | slice; 68 return res; 69 } 70 71 LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) { 72 DCHECK(ARM_SINGLEREG(r_dest)); 73 if (value == 0) { 74 // TODO: we need better info about the target CPU. a vector exclusive or 75 // would probably be better here if we could rely on its existance. 76 // Load an immediate +2.0 (which encodes to 0) 77 NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0); 78 // +0.0 = +2.0 - +2.0 79 return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest); 80 } else { 81 int encoded_imm = EncodeImmSingle(value); 82 if (encoded_imm >= 0) { 83 return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm); 84 } 85 } 86 LIR* data_target = ScanLiteralPool(literal_list_, value, 0); 87 if (data_target == NULL) { 88 data_target = AddWordData(&literal_list_, value); 89 } 90 LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs, 91 r_dest, r15pc, 0, 0, 0, data_target); 92 SetMemRefType(load_pc_rel, true, kLiteral); 93 load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target); 94 AppendLIR(load_pc_rel); 95 return load_pc_rel; 96 } 97 98 static int LeadingZeros(uint32_t val) { 99 uint32_t alt; 100 int n; 101 int count; 102 103 count = 16; 104 n = 32; 105 do { 106 alt = val >> count; 107 if (alt != 0) { 108 n = n - count; 109 val = alt; 110 } 111 count >>= 1; 112 } while (count); 113 return n - val; 114 } 115 116 /* 117 * Determine whether value can be encoded as a Thumb2 modified 118 * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form. 119 */ 120 int ArmMir2Lir::ModifiedImmediate(uint32_t value) { 121 int z_leading; 122 int z_trailing; 123 uint32_t b0 = value & 0xff; 124 125 /* Note: case of value==0 must use 0:000:0:0000000 encoding */ 126 if (value <= 0xFF) 127 return b0; // 0:000:a:bcdefgh 128 if (value == ((b0 << 16) | b0)) 129 return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */ 130 if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0)) 131 return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */ 132 b0 = (value >> 8) & 0xff; 133 if (value == ((b0 << 24) | (b0 << 8))) 134 return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */ 135 /* Can we do it with rotation? */ 136 z_leading = LeadingZeros(value); 137 z_trailing = 32 - LeadingZeros(~value & (value - 1)); 138 /* A run of eight or fewer active bits? */ 139 if ((z_leading + z_trailing) < 24) 140 return -1; /* No - bail */ 141 /* left-justify the constant, discarding msb (known to be 1) */ 142 value <<= z_leading + 1; 143 /* Create bcdefgh */ 144 value >>= 25; 145 /* Put it all together */ 146 return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */ 147 } 148 149 bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) { 150 return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0); 151 } 152 153 bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) { 154 return EncodeImmSingle(value) >= 0; 155 } 156 157 bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) { 158 return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value)); 159 } 160 161 bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) { 162 return EncodeImmDouble(value) >= 0; 163 } 164 165 /* 166 * Load a immediate using a shortcut if possible; otherwise 167 * grab from the per-translation literal pool. 168 * 169 * No additional register clobbering operation performed. Use this version when 170 * 1) r_dest is freshly returned from AllocTemp or 171 * 2) The codegen is under fixed register usage 172 */ 173 LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) { 174 LIR* res; 175 int mod_imm; 176 177 if (ARM_FPREG(r_dest)) { 178 return LoadFPConstantValue(r_dest, value); 179 } 180 181 /* See if the value can be constructed cheaply */ 182 if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) { 183 return NewLIR2(kThumbMovImm, r_dest, value); 184 } 185 /* Check Modified immediate special cases */ 186 mod_imm = ModifiedImmediate(value); 187 if (mod_imm >= 0) { 188 res = NewLIR2(kThumb2MovImmShift, r_dest, mod_imm); 189 return res; 190 } 191 mod_imm = ModifiedImmediate(~value); 192 if (mod_imm >= 0) { 193 res = NewLIR2(kThumb2MvnImm12, r_dest, mod_imm); 194 return res; 195 } 196 /* 16-bit immediate? */ 197 if ((value & 0xffff) == value) { 198 res = NewLIR2(kThumb2MovImm16, r_dest, value); 199 return res; 200 } 201 /* Do a low/high pair */ 202 res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value)); 203 NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value)); 204 return res; 205 } 206 207 LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) { 208 LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched during assembly*/); 209 res->target = target; 210 return res; 211 } 212 213 LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) { 214 LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */, 215 ArmConditionEncoding(cc)); 216 branch->target = target; 217 return branch; 218 } 219 220 LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) { 221 ArmOpcode opcode = kThumbBkpt; 222 switch (op) { 223 case kOpBlx: 224 opcode = kThumbBlxR; 225 break; 226 case kOpBx: 227 opcode = kThumbBx; 228 break; 229 default: 230 LOG(FATAL) << "Bad opcode " << op; 231 } 232 return NewLIR1(opcode, r_dest_src); 233 } 234 235 LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2, 236 int shift) { 237 bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2)); 238 ArmOpcode opcode = kThumbBkpt; 239 switch (op) { 240 case kOpAdc: 241 opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR; 242 break; 243 case kOpAnd: 244 opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR; 245 break; 246 case kOpBic: 247 opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR; 248 break; 249 case kOpCmn: 250 DCHECK_EQ(shift, 0); 251 opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR; 252 break; 253 case kOpCmp: 254 if (thumb_form) 255 opcode = kThumbCmpRR; 256 else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2)) 257 opcode = kThumbCmpHH; 258 else if ((shift == 0) && ARM_LOWREG(r_dest_src1)) 259 opcode = kThumbCmpLH; 260 else if (shift == 0) 261 opcode = kThumbCmpHL; 262 else 263 opcode = kThumb2CmpRR; 264 break; 265 case kOpXor: 266 opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR; 267 break; 268 case kOpMov: 269 DCHECK_EQ(shift, 0); 270 if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2)) 271 opcode = kThumbMovRR; 272 else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2)) 273 opcode = kThumbMovRR_H2H; 274 else if (ARM_LOWREG(r_dest_src1)) 275 opcode = kThumbMovRR_H2L; 276 else 277 opcode = kThumbMovRR_L2H; 278 break; 279 case kOpMul: 280 DCHECK_EQ(shift, 0); 281 opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR; 282 break; 283 case kOpMvn: 284 opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR; 285 break; 286 case kOpNeg: 287 DCHECK_EQ(shift, 0); 288 opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR; 289 break; 290 case kOpOr: 291 opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR; 292 break; 293 case kOpSbc: 294 opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR; 295 break; 296 case kOpTst: 297 opcode = (thumb_form) ? kThumbTst : kThumb2TstRR; 298 break; 299 case kOpLsl: 300 DCHECK_EQ(shift, 0); 301 opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR; 302 break; 303 case kOpLsr: 304 DCHECK_EQ(shift, 0); 305 opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR; 306 break; 307 case kOpAsr: 308 DCHECK_EQ(shift, 0); 309 opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR; 310 break; 311 case kOpRor: 312 DCHECK_EQ(shift, 0); 313 opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR; 314 break; 315 case kOpAdd: 316 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR; 317 break; 318 case kOpSub: 319 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR; 320 break; 321 case kOp2Byte: 322 DCHECK_EQ(shift, 0); 323 return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8); 324 case kOp2Short: 325 DCHECK_EQ(shift, 0); 326 return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16); 327 case kOp2Char: 328 DCHECK_EQ(shift, 0); 329 return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16); 330 default: 331 LOG(FATAL) << "Bad opcode: " << op; 332 break; 333 } 334 DCHECK_GE(static_cast<int>(opcode), 0); 335 if (EncodingMap[opcode].flags & IS_BINARY_OP) { 336 return NewLIR2(opcode, r_dest_src1, r_src2); 337 } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) { 338 if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) { 339 return NewLIR3(opcode, r_dest_src1, r_src2, shift); 340 } else { 341 return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2); 342 } 343 } else if (EncodingMap[opcode].flags & IS_QUAD_OP) { 344 return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift); 345 } else { 346 LOG(FATAL) << "Unexpected encoding operand count"; 347 return NULL; 348 } 349 } 350 351 LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) { 352 return OpRegRegShift(op, r_dest_src1, r_src2, 0); 353 } 354 355 LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1, 356 int r_src2, int shift) { 357 ArmOpcode opcode = kThumbBkpt; 358 bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) && 359 ARM_LOWREG(r_src2); 360 switch (op) { 361 case kOpAdd: 362 opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR; 363 break; 364 case kOpSub: 365 opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR; 366 break; 367 case kOpRsub: 368 opcode = kThumb2RsubRRR; 369 break; 370 case kOpAdc: 371 opcode = kThumb2AdcRRR; 372 break; 373 case kOpAnd: 374 opcode = kThumb2AndRRR; 375 break; 376 case kOpBic: 377 opcode = kThumb2BicRRR; 378 break; 379 case kOpXor: 380 opcode = kThumb2EorRRR; 381 break; 382 case kOpMul: 383 DCHECK_EQ(shift, 0); 384 opcode = kThumb2MulRRR; 385 break; 386 case kOpOr: 387 opcode = kThumb2OrrRRR; 388 break; 389 case kOpSbc: 390 opcode = kThumb2SbcRRR; 391 break; 392 case kOpLsl: 393 DCHECK_EQ(shift, 0); 394 opcode = kThumb2LslRRR; 395 break; 396 case kOpLsr: 397 DCHECK_EQ(shift, 0); 398 opcode = kThumb2LsrRRR; 399 break; 400 case kOpAsr: 401 DCHECK_EQ(shift, 0); 402 opcode = kThumb2AsrRRR; 403 break; 404 case kOpRor: 405 DCHECK_EQ(shift, 0); 406 opcode = kThumb2RorRRR; 407 break; 408 default: 409 LOG(FATAL) << "Bad opcode: " << op; 410 break; 411 } 412 DCHECK_GE(static_cast<int>(opcode), 0); 413 if (EncodingMap[opcode].flags & IS_QUAD_OP) { 414 return NewLIR4(opcode, r_dest, r_src1, r_src2, shift); 415 } else { 416 DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP); 417 return NewLIR3(opcode, r_dest, r_src1, r_src2); 418 } 419 } 420 421 LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) { 422 return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0); 423 } 424 425 LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) { 426 LIR* res; 427 bool neg = (value < 0); 428 int abs_value = (neg) ? -value : value; 429 ArmOpcode opcode = kThumbBkpt; 430 ArmOpcode alt_opcode = kThumbBkpt; 431 bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1)); 432 int mod_imm = ModifiedImmediate(value); 433 int mod_imm_neg = ModifiedImmediate(-value); 434 435 switch (op) { 436 case kOpLsl: 437 if (all_low_regs) 438 return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value); 439 else 440 return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value); 441 case kOpLsr: 442 if (all_low_regs) 443 return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value); 444 else 445 return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value); 446 case kOpAsr: 447 if (all_low_regs) 448 return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value); 449 else 450 return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value); 451 case kOpRor: 452 return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value); 453 case kOpAdd: 454 if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) && 455 (value <= 1020) && ((value & 0x3) == 0)) { 456 return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2); 457 } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) && 458 (value <= 1020) && ((value & 0x3) == 0)) { 459 return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2); 460 } 461 // Note: intentional fallthrough 462 case kOpSub: 463 if (all_low_regs && ((abs_value & 0x7) == abs_value)) { 464 if (op == kOpAdd) 465 opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3; 466 else 467 opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3; 468 return NewLIR3(opcode, r_dest, r_src1, abs_value); 469 } else if ((abs_value & 0xff) == abs_value) { 470 if (op == kOpAdd) 471 opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12; 472 else 473 opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12; 474 return NewLIR3(opcode, r_dest, r_src1, abs_value); 475 } 476 if (mod_imm_neg >= 0) { 477 op = (op == kOpAdd) ? kOpSub : kOpAdd; 478 mod_imm = mod_imm_neg; 479 } 480 if (op == kOpSub) { 481 opcode = kThumb2SubRRI8; 482 alt_opcode = kThumb2SubRRR; 483 } else { 484 opcode = kThumb2AddRRI8; 485 alt_opcode = kThumb2AddRRR; 486 } 487 break; 488 case kOpRsub: 489 opcode = kThumb2RsubRRI8; 490 alt_opcode = kThumb2RsubRRR; 491 break; 492 case kOpAdc: 493 opcode = kThumb2AdcRRI8; 494 alt_opcode = kThumb2AdcRRR; 495 break; 496 case kOpSbc: 497 opcode = kThumb2SbcRRI8; 498 alt_opcode = kThumb2SbcRRR; 499 break; 500 case kOpOr: 501 opcode = kThumb2OrrRRI8; 502 alt_opcode = kThumb2OrrRRR; 503 break; 504 case kOpAnd: 505 opcode = kThumb2AndRRI8; 506 alt_opcode = kThumb2AndRRR; 507 break; 508 case kOpXor: 509 opcode = kThumb2EorRRI8; 510 alt_opcode = kThumb2EorRRR; 511 break; 512 case kOpMul: 513 // TUNING: power of 2, shift & add 514 mod_imm = -1; 515 alt_opcode = kThumb2MulRRR; 516 break; 517 case kOpCmp: { 518 int mod_imm = ModifiedImmediate(value); 519 LIR* res; 520 if (mod_imm >= 0) { 521 res = NewLIR2(kThumb2CmpRI12, r_src1, mod_imm); 522 } else { 523 int r_tmp = AllocTemp(); 524 res = LoadConstant(r_tmp, value); 525 OpRegReg(kOpCmp, r_src1, r_tmp); 526 FreeTemp(r_tmp); 527 } 528 return res; 529 } 530 default: 531 LOG(FATAL) << "Bad opcode: " << op; 532 } 533 534 if (mod_imm >= 0) { 535 return NewLIR3(opcode, r_dest, r_src1, mod_imm); 536 } else { 537 int r_scratch = AllocTemp(); 538 LoadConstant(r_scratch, value); 539 if (EncodingMap[alt_opcode].flags & IS_QUAD_OP) 540 res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0); 541 else 542 res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch); 543 FreeTemp(r_scratch); 544 return res; 545 } 546 } 547 548 /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */ 549 LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) { 550 bool neg = (value < 0); 551 int abs_value = (neg) ? -value : value; 552 bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1)); 553 ArmOpcode opcode = kThumbBkpt; 554 switch (op) { 555 case kOpAdd: 556 if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */ 557 DCHECK_EQ((value & 0x3), 0); 558 return NewLIR1(kThumbAddSpI7, value >> 2); 559 } else if (short_form) { 560 opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8; 561 } 562 break; 563 case kOpSub: 564 if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */ 565 DCHECK_EQ((value & 0x3), 0); 566 return NewLIR1(kThumbSubSpI7, value >> 2); 567 } else if (short_form) { 568 opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8; 569 } 570 break; 571 case kOpCmp: 572 if (ARM_LOWREG(r_dest_src1) && short_form) { 573 opcode = (short_form) ? kThumbCmpRI8 : kThumbCmpRR; 574 } else if (ARM_LOWREG(r_dest_src1)) { 575 opcode = kThumbCmpRR; 576 } else { 577 short_form = false; 578 opcode = kThumbCmpHL; 579 } 580 break; 581 default: 582 /* Punt to OpRegRegImm - if bad case catch it there */ 583 short_form = false; 584 break; 585 } 586 if (short_form) { 587 return NewLIR2(opcode, r_dest_src1, abs_value); 588 } else { 589 return OpRegRegImm(op, r_dest_src1, r_dest_src1, value); 590 } 591 } 592 593 LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) { 594 LIR* res = NULL; 595 int32_t val_lo = Low32Bits(value); 596 int32_t val_hi = High32Bits(value); 597 int target_reg = S2d(r_dest_lo, r_dest_hi); 598 if (ARM_FPREG(r_dest_lo)) { 599 if ((val_lo == 0) && (val_hi == 0)) { 600 // TODO: we need better info about the target CPU. a vector exclusive or 601 // would probably be better here if we could rely on its existance. 602 // Load an immediate +2.0 (which encodes to 0) 603 NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0); 604 // +0.0 = +2.0 - +2.0 605 res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg); 606 } else { 607 int encoded_imm = EncodeImmDouble(value); 608 if (encoded_imm >= 0) { 609 res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm); 610 } 611 } 612 } else { 613 if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) { 614 res = LoadConstantNoClobber(r_dest_lo, val_lo); 615 LoadConstantNoClobber(r_dest_hi, val_hi); 616 } 617 } 618 if (res == NULL) { 619 // No short form - load from the literal pool. 620 LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi); 621 if (data_target == NULL) { 622 data_target = AddWideData(&literal_list_, val_lo, val_hi); 623 } 624 if (ARM_FPREG(r_dest_lo)) { 625 res = RawLIR(current_dalvik_offset_, kThumb2Vldrd, 626 target_reg, r15pc, 0, 0, 0, data_target); 627 } else { 628 res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8, 629 r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target); 630 } 631 SetMemRefType(res, true, kLiteral); 632 res->alias_info = reinterpret_cast<uintptr_t>(data_target); 633 AppendLIR(res); 634 } 635 return res; 636 } 637 638 int ArmMir2Lir::EncodeShift(int code, int amount) { 639 return ((amount & 0x1f) << 2) | code; 640 } 641 642 LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest, 643 int scale, OpSize size) { 644 bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest); 645 LIR* load; 646 ArmOpcode opcode = kThumbBkpt; 647 bool thumb_form = (all_low_regs && (scale == 0)); 648 int reg_ptr; 649 650 if (ARM_FPREG(r_dest)) { 651 if (ARM_SINGLEREG(r_dest)) { 652 DCHECK((size == kWord) || (size == kSingle)); 653 opcode = kThumb2Vldrs; 654 size = kSingle; 655 } else { 656 DCHECK(ARM_DOUBLEREG(r_dest)); 657 DCHECK((size == kLong) || (size == kDouble)); 658 DCHECK_EQ((r_dest & 0x1), 0); 659 opcode = kThumb2Vldrd; 660 size = kDouble; 661 } 662 } else { 663 if (size == kSingle) 664 size = kWord; 665 } 666 667 switch (size) { 668 case kDouble: // fall-through 669 case kSingle: 670 reg_ptr = AllocTemp(); 671 if (scale) { 672 NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index, 673 EncodeShift(kArmLsl, scale)); 674 } else { 675 OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index); 676 } 677 load = NewLIR3(opcode, r_dest, reg_ptr, 0); 678 FreeTemp(reg_ptr); 679 return load; 680 case kWord: 681 opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR; 682 break; 683 case kUnsignedHalf: 684 opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR; 685 break; 686 case kSignedHalf: 687 opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR; 688 break; 689 case kUnsignedByte: 690 opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR; 691 break; 692 case kSignedByte: 693 opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR; 694 break; 695 default: 696 LOG(FATAL) << "Bad size: " << size; 697 } 698 if (thumb_form) 699 load = NewLIR3(opcode, r_dest, rBase, r_index); 700 else 701 load = NewLIR4(opcode, r_dest, rBase, r_index, scale); 702 703 return load; 704 } 705 706 LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src, 707 int scale, OpSize size) { 708 bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src); 709 LIR* store = NULL; 710 ArmOpcode opcode = kThumbBkpt; 711 bool thumb_form = (all_low_regs && (scale == 0)); 712 int reg_ptr; 713 714 if (ARM_FPREG(r_src)) { 715 if (ARM_SINGLEREG(r_src)) { 716 DCHECK((size == kWord) || (size == kSingle)); 717 opcode = kThumb2Vstrs; 718 size = kSingle; 719 } else { 720 DCHECK(ARM_DOUBLEREG(r_src)); 721 DCHECK((size == kLong) || (size == kDouble)); 722 DCHECK_EQ((r_src & 0x1), 0); 723 opcode = kThumb2Vstrd; 724 size = kDouble; 725 } 726 } else { 727 if (size == kSingle) 728 size = kWord; 729 } 730 731 switch (size) { 732 case kDouble: // fall-through 733 case kSingle: 734 reg_ptr = AllocTemp(); 735 if (scale) { 736 NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index, 737 EncodeShift(kArmLsl, scale)); 738 } else { 739 OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index); 740 } 741 store = NewLIR3(opcode, r_src, reg_ptr, 0); 742 FreeTemp(reg_ptr); 743 return store; 744 case kWord: 745 opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR; 746 break; 747 case kUnsignedHalf: 748 case kSignedHalf: 749 opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR; 750 break; 751 case kUnsignedByte: 752 case kSignedByte: 753 opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR; 754 break; 755 default: 756 LOG(FATAL) << "Bad size: " << size; 757 } 758 if (thumb_form) 759 store = NewLIR3(opcode, r_src, rBase, r_index); 760 else 761 store = NewLIR4(opcode, r_src, rBase, r_index, scale); 762 763 return store; 764 } 765 766 /* 767 * Load value from base + displacement. Optionally perform null check 768 * on base (which must have an associated s_reg and MIR). If not 769 * performing null check, incoming MIR can be null. 770 */ 771 LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest, 772 int r_dest_hi, OpSize size, int s_reg) { 773 LIR* load = NULL; 774 ArmOpcode opcode = kThumbBkpt; 775 bool short_form = false; 776 bool thumb2Form = (displacement < 4092 && displacement >= 0); 777 bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest)); 778 int encoded_disp = displacement; 779 bool is64bit = false; 780 bool already_generated = false; 781 switch (size) { 782 case kDouble: 783 case kLong: 784 is64bit = true; 785 if (ARM_FPREG(r_dest)) { 786 if (ARM_SINGLEREG(r_dest)) { 787 DCHECK(ARM_FPREG(r_dest_hi)); 788 r_dest = S2d(r_dest, r_dest_hi); 789 } 790 opcode = kThumb2Vldrd; 791 if (displacement <= 1020) { 792 short_form = true; 793 encoded_disp >>= 2; 794 } 795 break; 796 } else { 797 if (displacement <= 1020) { 798 load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2); 799 } else { 800 load = LoadBaseDispBody(rBase, displacement, r_dest, 801 -1, kWord, s_reg); 802 LoadBaseDispBody(rBase, displacement + 4, r_dest_hi, 803 -1, kWord, INVALID_SREG); 804 } 805 already_generated = true; 806 } 807 case kSingle: 808 case kWord: 809 if (ARM_FPREG(r_dest)) { 810 opcode = kThumb2Vldrs; 811 if (displacement <= 1020) { 812 short_form = true; 813 encoded_disp >>= 2; 814 } 815 break; 816 } 817 if (ARM_LOWREG(r_dest) && (rBase == r15pc) && 818 (displacement <= 1020) && (displacement >= 0)) { 819 short_form = true; 820 encoded_disp >>= 2; 821 opcode = kThumbLdrPcRel; 822 } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) && 823 (displacement <= 1020) && (displacement >= 0)) { 824 short_form = true; 825 encoded_disp >>= 2; 826 opcode = kThumbLdrSpRel; 827 } else if (all_low_regs && displacement < 128 && displacement >= 0) { 828 DCHECK_EQ((displacement & 0x3), 0); 829 short_form = true; 830 encoded_disp >>= 2; 831 opcode = kThumbLdrRRI5; 832 } else if (thumb2Form) { 833 short_form = true; 834 opcode = kThumb2LdrRRI12; 835 } 836 break; 837 case kUnsignedHalf: 838 if (all_low_regs && displacement < 64 && displacement >= 0) { 839 DCHECK_EQ((displacement & 0x1), 0); 840 short_form = true; 841 encoded_disp >>= 1; 842 opcode = kThumbLdrhRRI5; 843 } else if (displacement < 4092 && displacement >= 0) { 844 short_form = true; 845 opcode = kThumb2LdrhRRI12; 846 } 847 break; 848 case kSignedHalf: 849 if (thumb2Form) { 850 short_form = true; 851 opcode = kThumb2LdrshRRI12; 852 } 853 break; 854 case kUnsignedByte: 855 if (all_low_regs && displacement < 32 && displacement >= 0) { 856 short_form = true; 857 opcode = kThumbLdrbRRI5; 858 } else if (thumb2Form) { 859 short_form = true; 860 opcode = kThumb2LdrbRRI12; 861 } 862 break; 863 case kSignedByte: 864 if (thumb2Form) { 865 short_form = true; 866 opcode = kThumb2LdrsbRRI12; 867 } 868 break; 869 default: 870 LOG(FATAL) << "Bad size: " << size; 871 } 872 873 if (!already_generated) { 874 if (short_form) { 875 load = NewLIR3(opcode, r_dest, rBase, encoded_disp); 876 } else { 877 int reg_offset = AllocTemp(); 878 LoadConstant(reg_offset, encoded_disp); 879 load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size); 880 FreeTemp(reg_offset); 881 } 882 } 883 884 // TODO: in future may need to differentiate Dalvik accesses w/ spills 885 if (rBase == rARM_SP) { 886 AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit); 887 } 888 return load; 889 } 890 891 LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest, 892 OpSize size, int s_reg) { 893 return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg); 894 } 895 896 LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo, 897 int r_dest_hi, int s_reg) { 898 return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg); 899 } 900 901 902 LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement, 903 int r_src, int r_src_hi, OpSize size) { 904 LIR* store = NULL; 905 ArmOpcode opcode = kThumbBkpt; 906 bool short_form = false; 907 bool thumb2Form = (displacement < 4092 && displacement >= 0); 908 bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src)); 909 int encoded_disp = displacement; 910 bool is64bit = false; 911 bool already_generated = false; 912 switch (size) { 913 case kLong: 914 case kDouble: 915 is64bit = true; 916 if (!ARM_FPREG(r_src)) { 917 if (displacement <= 1020) { 918 store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2); 919 } else { 920 store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord); 921 StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord); 922 } 923 already_generated = true; 924 } else { 925 if (ARM_SINGLEREG(r_src)) { 926 DCHECK(ARM_FPREG(r_src_hi)); 927 r_src = S2d(r_src, r_src_hi); 928 } 929 opcode = kThumb2Vstrd; 930 if (displacement <= 1020) { 931 short_form = true; 932 encoded_disp >>= 2; 933 } 934 } 935 break; 936 case kSingle: 937 case kWord: 938 if (ARM_FPREG(r_src)) { 939 DCHECK(ARM_SINGLEREG(r_src)); 940 opcode = kThumb2Vstrs; 941 if (displacement <= 1020) { 942 short_form = true; 943 encoded_disp >>= 2; 944 } 945 break; 946 } 947 if (ARM_LOWREG(r_src) && (rBase == r13sp) && 948 (displacement <= 1020) && (displacement >= 0)) { 949 short_form = true; 950 encoded_disp >>= 2; 951 opcode = kThumbStrSpRel; 952 } else if (all_low_regs && displacement < 128 && displacement >= 0) { 953 DCHECK_EQ((displacement & 0x3), 0); 954 short_form = true; 955 encoded_disp >>= 2; 956 opcode = kThumbStrRRI5; 957 } else if (thumb2Form) { 958 short_form = true; 959 opcode = kThumb2StrRRI12; 960 } 961 break; 962 case kUnsignedHalf: 963 case kSignedHalf: 964 if (all_low_regs && displacement < 64 && displacement >= 0) { 965 DCHECK_EQ((displacement & 0x1), 0); 966 short_form = true; 967 encoded_disp >>= 1; 968 opcode = kThumbStrhRRI5; 969 } else if (thumb2Form) { 970 short_form = true; 971 opcode = kThumb2StrhRRI12; 972 } 973 break; 974 case kUnsignedByte: 975 case kSignedByte: 976 if (all_low_regs && displacement < 32 && displacement >= 0) { 977 short_form = true; 978 opcode = kThumbStrbRRI5; 979 } else if (thumb2Form) { 980 short_form = true; 981 opcode = kThumb2StrbRRI12; 982 } 983 break; 984 default: 985 LOG(FATAL) << "Bad size: " << size; 986 } 987 if (!already_generated) { 988 if (short_form) { 989 store = NewLIR3(opcode, r_src, rBase, encoded_disp); 990 } else { 991 int r_scratch = AllocTemp(); 992 LoadConstant(r_scratch, encoded_disp); 993 store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size); 994 FreeTemp(r_scratch); 995 } 996 } 997 998 // TODO: In future, may need to differentiate Dalvik & spill accesses 999 if (rBase == rARM_SP) { 1000 AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit); 1001 } 1002 return store; 1003 } 1004 1005 LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src, 1006 OpSize size) { 1007 return StoreBaseDispBody(rBase, displacement, r_src, -1, size); 1008 } 1009 1010 LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement, 1011 int r_src_lo, int r_src_hi) { 1012 return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong); 1013 } 1014 1015 LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) { 1016 int opcode; 1017 DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src)); 1018 if (ARM_DOUBLEREG(r_dest)) { 1019 opcode = kThumb2Vmovd; 1020 } else { 1021 if (ARM_SINGLEREG(r_dest)) { 1022 opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr; 1023 } else { 1024 DCHECK(ARM_SINGLEREG(r_src)); 1025 opcode = kThumb2Fmrs; 1026 } 1027 } 1028 LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src); 1029 if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) { 1030 res->flags.is_nop = true; 1031 } 1032 return res; 1033 } 1034 1035 LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) { 1036 LOG(FATAL) << "Unexpected use of OpThreadMem for Arm"; 1037 return NULL; 1038 } 1039 1040 LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) { 1041 LOG(FATAL) << "Unexpected use of OpMem for Arm"; 1042 return NULL; 1043 } 1044 1045 LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale, 1046 int displacement, int r_src, int r_src_hi, OpSize size, 1047 int s_reg) { 1048 LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm"; 1049 return NULL; 1050 } 1051 1052 LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) { 1053 LOG(FATAL) << "Unexpected use of OpRegMem for Arm"; 1054 return NULL; 1055 } 1056 1057 LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale, 1058 int displacement, int r_dest, int r_dest_hi, OpSize size, 1059 int s_reg) { 1060 LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm"; 1061 return NULL; 1062 } 1063 1064 } // namespace art 1065