1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /* 18 * This file contains codegen for the Thumb ISA and is intended to be 19 * includes by: 20 * 21 * Codegen-$(TARGET_ARCH_VARIANT).c 22 * 23 */ 24 25 static int coreTemps[] = {r0, r1, r2, r3, r4PC, r7, r8, r9, r10, r11, r12}; 26 static int fpTemps[] = {fr16, fr17, fr18, fr19, fr20, fr21, fr22, fr23, 27 fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31}; 28 29 static int encodeImmSingle(int value) 30 { 31 int res; 32 int bitA = (value & 0x80000000) >> 31; 33 int notBitB = (value & 0x40000000) >> 30; 34 int bitB = (value & 0x20000000) >> 29; 35 int bSmear = (value & 0x3e000000) >> 25; 36 int slice = (value & 0x01f80000) >> 19; 37 int zeroes = (value & 0x0007ffff); 38 if (zeroes != 0) 39 return -1; 40 if (bitB) { 41 if ((notBitB != 0) || (bSmear != 0x1f)) 42 return -1; 43 } else { 44 if ((notBitB != 1) || (bSmear != 0x0)) 45 return -1; 46 } 47 res = (bitA << 7) | (bitB << 6) | slice; 48 return res; 49 } 50 51 static ArmLIR *loadFPConstantValue(CompilationUnit *cUnit, int rDest, 52 int value) 53 { 54 int encodedImm = encodeImmSingle(value); 55 assert(SINGLEREG(rDest)); 56 if (value == 0) { 57 // TODO: we need better info about the target CPU. a vector exclusive or 58 // would probably be better here if we could rely on its existance. 59 // Load an immediate +2.0 (which encodes to 0) 60 newLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, 0); 61 // +0.0 = +2.0 - +2.0 62 return newLIR3(cUnit, kThumb2Vsubs, rDest, rDest, rDest); 63 } else if (encodedImm >= 0) { 64 return newLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm); 65 } 66 ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0); 67 if (dataTarget == NULL) { 68 dataTarget = addWordData(cUnit, &cUnit->literalList, value); 69 } 70 ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 71 loadPcRel->opcode = kThumb2Vldrs; 72 loadPcRel->generic.target = (LIR *) dataTarget; 73 loadPcRel->operands[0] = rDest; 74 loadPcRel->operands[1] = r15pc; 75 setupResourceMasks(loadPcRel); 76 setMemRefType(loadPcRel, true, kLiteral); 77 loadPcRel->aliasInfo = dataTarget->operands[0]; 78 dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); 79 return loadPcRel; 80 } 81 82 static int leadingZeros(u4 val) 83 { 84 u4 alt; 85 int n; 86 int count; 87 88 count = 16; 89 n = 32; 90 do { 91 alt = val >> count; 92 if (alt != 0) { 93 n = n - count; 94 val = alt; 95 } 96 count >>= 1; 97 } while (count); 98 return n - val; 99 } 100 101 /* 102 * Determine whether value can be encoded as a Thumb2 modified 103 * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form. 104 */ 105 static int modifiedImmediate(u4 value) 106 { 107 int zLeading; 108 int zTrailing; 109 u4 b0 = value & 0xff; 110 111 /* Note: case of value==0 must use 0:000:0:0000000 encoding */ 112 if (value <= 0xFF) 113 return b0; // 0:000:a:bcdefgh 114 if (value == ((b0 << 16) | b0)) 115 return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */ 116 if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0)) 117 return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */ 118 b0 = (value >> 8) & 0xff; 119 if (value == ((b0 << 24) | (b0 << 8))) 120 return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */ 121 /* Can we do it with rotation? */ 122 zLeading = leadingZeros(value); 123 zTrailing = 32 - leadingZeros(~value & (value - 1)); 124 /* A run of eight or fewer active bits? */ 125 if ((zLeading + zTrailing) < 24) 126 return -1; /* No - bail */ 127 /* left-justify the constant, discarding msb (known to be 1) */ 128 value <<= zLeading + 1; 129 /* Create bcdefgh */ 130 value >>= 25; 131 /* Put it all together */ 132 return value | ((0x8 + zLeading) << 7); /* [01000..11111]:bcdefgh */ 133 } 134 135 /* 136 * Load a immediate using a shortcut if possible; otherwise 137 * grab from the per-translation literal pool. 138 * 139 * No additional register clobbering operation performed. Use this version when 140 * 1) rDest is freshly returned from dvmCompilerAllocTemp or 141 * 2) The codegen is under fixed register usage 142 */ 143 static ArmLIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, 144 int value) 145 { 146 ArmLIR *res; 147 int modImm; 148 149 if (FPREG(rDest)) { 150 return loadFPConstantValue(cUnit, rDest, value); 151 } 152 153 /* See if the value can be constructed cheaply */ 154 if (LOWREG(rDest) && (value >= 0) && (value <= 255)) { 155 return newLIR2(cUnit, kThumbMovImm, rDest, value); 156 } 157 /* Check Modified immediate special cases */ 158 modImm = modifiedImmediate(value); 159 if (modImm >= 0) { 160 res = newLIR2(cUnit, kThumb2MovImmShift, rDest, modImm); 161 return res; 162 } 163 modImm = modifiedImmediate(~value); 164 if (modImm >= 0) { 165 res = newLIR2(cUnit, kThumb2MvnImmShift, rDest, modImm); 166 return res; 167 } 168 /* 16-bit immediate? */ 169 if ((value & 0xffff) == value) { 170 res = newLIR2(cUnit, kThumb2MovImm16, rDest, value); 171 return res; 172 } 173 /* No shortcut - go ahead and use literal pool */ 174 ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0); 175 if (dataTarget == NULL) { 176 dataTarget = addWordData(cUnit, &cUnit->literalList, value); 177 } 178 ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 179 loadPcRel->opcode = kThumb2LdrPcRel12; 180 loadPcRel->generic.target = (LIR *) dataTarget; 181 loadPcRel->operands[0] = rDest; 182 setupResourceMasks(loadPcRel); 183 setMemRefType(loadPcRel, true, kLiteral); 184 loadPcRel->aliasInfo = dataTarget->operands[0]; 185 res = loadPcRel; 186 dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); 187 188 /* 189 * To save space in the constant pool, we use the ADD_RRI8 instruction to 190 * add up to 255 to an existing constant value. 191 */ 192 if (dataTarget->operands[0] != value) { 193 opRegImm(cUnit, kOpAdd, rDest, value - dataTarget->operands[0]); 194 } 195 return res; 196 } 197 198 /* 199 * Load an immediate value into a fixed or temp register. Target 200 * register is clobbered, and marked inUse. 201 */ 202 static ArmLIR *loadConstant(CompilationUnit *cUnit, int rDest, int value) 203 { 204 if (dvmCompilerIsTemp(cUnit, rDest)) { 205 dvmCompilerClobber(cUnit, rDest); 206 dvmCompilerMarkInUse(cUnit, rDest); 207 } 208 return loadConstantNoClobber(cUnit, rDest, value); 209 } 210 211 /* 212 * Load a class pointer value into a fixed or temp register. Target 213 * register is clobbered, and marked inUse. 214 */ 215 static ArmLIR *loadClassPointer(CompilationUnit *cUnit, int rDest, int value) 216 { 217 ArmLIR *res; 218 cUnit->hasClassLiterals = true; 219 if (dvmCompilerIsTemp(cUnit, rDest)) { 220 dvmCompilerClobber(cUnit, rDest); 221 dvmCompilerMarkInUse(cUnit, rDest); 222 } 223 ArmLIR *dataTarget = scanLiteralPool(cUnit->classPointerList, value, 0); 224 if (dataTarget == NULL) { 225 dataTarget = addWordData(cUnit, &cUnit->classPointerList, value); 226 /* Counts the number of class pointers in this translation */ 227 cUnit->numClassPointers++; 228 } 229 ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 230 loadPcRel->opcode = kThumb2LdrPcRel12; 231 loadPcRel->generic.target = (LIR *) dataTarget; 232 loadPcRel->operands[0] = rDest; 233 setupResourceMasks(loadPcRel); 234 setMemRefType(loadPcRel, true, kLiteral); 235 loadPcRel->aliasInfo = dataTarget->operands[0]; 236 res = loadPcRel; 237 dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); 238 return res; 239 } 240 241 static ArmLIR *opNone(CompilationUnit *cUnit, OpKind op) 242 { 243 ArmOpcode opcode = kThumbBkpt; 244 switch (op) { 245 case kOpUncondBr: 246 opcode = kThumbBUncond; 247 break; 248 default: 249 assert(0); 250 } 251 return newLIR0(cUnit, opcode); 252 } 253 254 static ArmLIR *opCondBranch(CompilationUnit *cUnit, ArmConditionCode cc) 255 { 256 return newLIR2(cUnit, kThumb2BCond, 0 /* offset to be patched */, cc); 257 } 258 259 static ArmLIR *opImm(CompilationUnit *cUnit, OpKind op, int value) 260 { 261 ArmOpcode opcode = kThumbBkpt; 262 switch (op) { 263 case kOpPush: { 264 if ((value & 0xff00) == 0) { 265 opcode = kThumbPush; 266 } else if ((value & 0xff00) == (1 << r14lr)) { 267 /* Thumb push can handle lr, which is encoded by bit 8 */ 268 opcode = kThumbPush; 269 value = (value & 0xff) | (1<<8); 270 } else { 271 opcode = kThumb2Push; 272 } 273 break; 274 } 275 case kOpPop: { 276 if ((value & 0xff00) == 0) { 277 opcode = kThumbPop; 278 } else if ((value & 0xff00) == (1 << r15pc)) { 279 /* Thumb pop can handle pc, which is encoded by bit 8 */ 280 opcode = kThumbPop; 281 value = (value & 0xff) | (1<<8); 282 } else { 283 opcode = kThumb2Pop; 284 } 285 break; 286 } 287 default: 288 assert(0); 289 } 290 return newLIR1(cUnit, opcode, value); 291 } 292 293 static ArmLIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc) 294 { 295 ArmOpcode opcode = kThumbBkpt; 296 switch (op) { 297 case kOpBlx: 298 opcode = kThumbBlxR; 299 break; 300 default: 301 assert(0); 302 } 303 return newLIR1(cUnit, opcode, rDestSrc); 304 } 305 306 static ArmLIR *opRegRegShift(CompilationUnit *cUnit, OpKind op, int rDestSrc1, 307 int rSrc2, int shift) 308 { 309 bool thumbForm = ((shift == 0) && LOWREG(rDestSrc1) && LOWREG(rSrc2)); 310 ArmOpcode opcode = kThumbBkpt; 311 switch (op) { 312 case kOpAdc: 313 opcode = (thumbForm) ? kThumbAdcRR : kThumb2AdcRRR; 314 break; 315 case kOpAnd: 316 opcode = (thumbForm) ? kThumbAndRR : kThumb2AndRRR; 317 break; 318 case kOpBic: 319 opcode = (thumbForm) ? kThumbBicRR : kThumb2BicRRR; 320 break; 321 case kOpCmn: 322 assert(shift == 0); 323 opcode = (thumbForm) ? kThumbCmnRR : kThumb2CmnRR; 324 break; 325 case kOpCmp: 326 if (thumbForm) 327 opcode = kThumbCmpRR; 328 else if ((shift == 0) && !LOWREG(rDestSrc1) && !LOWREG(rSrc2)) 329 opcode = kThumbCmpHH; 330 else if ((shift == 0) && LOWREG(rDestSrc1)) 331 opcode = kThumbCmpLH; 332 else if (shift == 0) 333 opcode = kThumbCmpHL; 334 else 335 opcode = kThumb2CmpRR; 336 break; 337 case kOpXor: 338 opcode = (thumbForm) ? kThumbEorRR : kThumb2EorRRR; 339 break; 340 case kOpMov: 341 assert(shift == 0); 342 if (LOWREG(rDestSrc1) && LOWREG(rSrc2)) 343 opcode = kThumbMovRR; 344 else if (!LOWREG(rDestSrc1) && !LOWREG(rSrc2)) 345 opcode = kThumbMovRR_H2H; 346 else if (LOWREG(rDestSrc1)) 347 opcode = kThumbMovRR_H2L; 348 else 349 opcode = kThumbMovRR_L2H; 350 break; 351 case kOpMul: 352 assert(shift == 0); 353 opcode = (thumbForm) ? kThumbMul : kThumb2MulRRR; 354 break; 355 case kOpMvn: 356 opcode = (thumbForm) ? kThumbMvn : kThumb2MnvRR; 357 break; 358 case kOpNeg: 359 assert(shift == 0); 360 opcode = (thumbForm) ? kThumbNeg : kThumb2NegRR; 361 break; 362 case kOpOr: 363 opcode = (thumbForm) ? kThumbOrr : kThumb2OrrRRR; 364 break; 365 case kOpSbc: 366 opcode = (thumbForm) ? kThumbSbc : kThumb2SbcRRR; 367 break; 368 case kOpTst: 369 opcode = (thumbForm) ? kThumbTst : kThumb2TstRR; 370 break; 371 case kOpLsl: 372 assert(shift == 0); 373 opcode = (thumbForm) ? kThumbLslRR : kThumb2LslRRR; 374 break; 375 case kOpLsr: 376 assert(shift == 0); 377 opcode = (thumbForm) ? kThumbLsrRR : kThumb2LsrRRR; 378 break; 379 case kOpAsr: 380 assert(shift == 0); 381 opcode = (thumbForm) ? kThumbAsrRR : kThumb2AsrRRR; 382 break; 383 case kOpRor: 384 assert(shift == 0); 385 opcode = (thumbForm) ? kThumbRorRR : kThumb2RorRRR; 386 break; 387 case kOpAdd: 388 opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR; 389 break; 390 case kOpSub: 391 opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR; 392 break; 393 case kOp2Byte: 394 assert(shift == 0); 395 return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 8); 396 case kOp2Short: 397 assert(shift == 0); 398 return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 16); 399 case kOp2Char: 400 assert(shift == 0); 401 return newLIR4(cUnit, kThumb2Ubfx, rDestSrc1, rSrc2, 0, 16); 402 default: 403 assert(0); 404 break; 405 } 406 assert(opcode >= 0); 407 if (EncodingMap[opcode].flags & IS_BINARY_OP) 408 return newLIR2(cUnit, opcode, rDestSrc1, rSrc2); 409 else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) { 410 if (EncodingMap[opcode].fieldLoc[2].kind == kFmtShift) 411 return newLIR3(cUnit, opcode, rDestSrc1, rSrc2, shift); 412 else 413 return newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2); 414 } else if (EncodingMap[opcode].flags & IS_QUAD_OP) 415 return newLIR4(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2, shift); 416 else { 417 assert(0); 418 return NULL; 419 } 420 } 421 422 static ArmLIR *opRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1, 423 int rSrc2) 424 { 425 return opRegRegShift(cUnit, op, rDestSrc1, rSrc2, 0); 426 } 427 428 static ArmLIR *opRegRegRegShift(CompilationUnit *cUnit, OpKind op, 429 int rDest, int rSrc1, int rSrc2, int shift) 430 { 431 ArmOpcode opcode = kThumbBkpt; 432 bool thumbForm = (shift == 0) && LOWREG(rDest) && LOWREG(rSrc1) && 433 LOWREG(rSrc2); 434 switch (op) { 435 case kOpAdd: 436 opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR; 437 break; 438 case kOpSub: 439 opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR; 440 break; 441 case kOpAdc: 442 opcode = kThumb2AdcRRR; 443 break; 444 case kOpAnd: 445 opcode = kThumb2AndRRR; 446 break; 447 case kOpBic: 448 opcode = kThumb2BicRRR; 449 break; 450 case kOpXor: 451 opcode = kThumb2EorRRR; 452 break; 453 case kOpMul: 454 assert(shift == 0); 455 opcode = kThumb2MulRRR; 456 break; 457 case kOpOr: 458 opcode = kThumb2OrrRRR; 459 break; 460 case kOpSbc: 461 opcode = kThumb2SbcRRR; 462 break; 463 case kOpLsl: 464 assert(shift == 0); 465 opcode = kThumb2LslRRR; 466 break; 467 case kOpLsr: 468 assert(shift == 0); 469 opcode = kThumb2LsrRRR; 470 break; 471 case kOpAsr: 472 assert(shift == 0); 473 opcode = kThumb2AsrRRR; 474 break; 475 case kOpRor: 476 assert(shift == 0); 477 opcode = kThumb2RorRRR; 478 break; 479 default: 480 assert(0); 481 break; 482 } 483 assert(opcode >= 0); 484 if (EncodingMap[opcode].flags & IS_QUAD_OP) 485 return newLIR4(cUnit, opcode, rDest, rSrc1, rSrc2, shift); 486 else { 487 assert(EncodingMap[opcode].flags & IS_TERTIARY_OP); 488 return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2); 489 } 490 } 491 492 static ArmLIR *opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest, 493 int rSrc1, int rSrc2) 494 { 495 return opRegRegRegShift(cUnit, op, rDest, rSrc1, rSrc2, 0); 496 } 497 498 static ArmLIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest, 499 int rSrc1, int value) 500 { 501 ArmLIR *res; 502 bool neg = (value < 0); 503 int absValue = (neg) ? -value : value; 504 ArmOpcode opcode = kThumbBkpt; 505 ArmOpcode altOpcode = kThumbBkpt; 506 bool allLowRegs = (LOWREG(rDest) && LOWREG(rSrc1)); 507 int modImm = modifiedImmediate(value); 508 int modImmNeg = modifiedImmediate(-value); 509 510 switch(op) { 511 case kOpLsl: 512 if (allLowRegs) 513 return newLIR3(cUnit, kThumbLslRRI5, rDest, rSrc1, value); 514 else 515 return newLIR3(cUnit, kThumb2LslRRI5, rDest, rSrc1, value); 516 case kOpLsr: 517 if (allLowRegs) 518 return newLIR3(cUnit, kThumbLsrRRI5, rDest, rSrc1, value); 519 else 520 return newLIR3(cUnit, kThumb2LsrRRI5, rDest, rSrc1, value); 521 case kOpAsr: 522 if (allLowRegs) 523 return newLIR3(cUnit, kThumbAsrRRI5, rDest, rSrc1, value); 524 else 525 return newLIR3(cUnit, kThumb2AsrRRI5, rDest, rSrc1, value); 526 case kOpRor: 527 return newLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value); 528 case kOpAdd: 529 if (LOWREG(rDest) && (rSrc1 == r13sp) && 530 (value <= 1020) && ((value & 0x3)==0)) { 531 return newLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1, 532 value >> 2); 533 } else if (LOWREG(rDest) && (rSrc1 == r15pc) && 534 (value <= 1020) && ((value & 0x3)==0)) { 535 return newLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1, 536 value >> 2); 537 } 538 opcode = kThumb2AddRRI8; 539 altOpcode = kThumb2AddRRR; 540 // Note: intentional fallthrough 541 case kOpSub: 542 if (allLowRegs && ((absValue & 0x7) == absValue)) { 543 if (op == kOpAdd) 544 opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3; 545 else 546 opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3; 547 return newLIR3(cUnit, opcode, rDest, rSrc1, absValue); 548 } else if ((absValue & 0xff) == absValue) { 549 if (op == kOpAdd) 550 opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12; 551 else 552 opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12; 553 return newLIR3(cUnit, opcode, rDest, rSrc1, absValue); 554 } 555 if (modImmNeg >= 0) { 556 op = (op == kOpAdd) ? kOpSub : kOpAdd; 557 modImm = modImmNeg; 558 } 559 if (op == kOpSub) { 560 opcode = kThumb2SubRRI8; 561 altOpcode = kThumb2SubRRR; 562 } 563 break; 564 case kOpAdc: 565 opcode = kThumb2AdcRRI8; 566 altOpcode = kThumb2AdcRRR; 567 break; 568 case kOpSbc: 569 opcode = kThumb2SbcRRI8; 570 altOpcode = kThumb2SbcRRR; 571 break; 572 case kOpOr: 573 opcode = kThumb2OrrRRI8; 574 altOpcode = kThumb2OrrRRR; 575 break; 576 case kOpAnd: 577 opcode = kThumb2AndRRI8; 578 altOpcode = kThumb2AndRRR; 579 break; 580 case kOpXor: 581 opcode = kThumb2EorRRI8; 582 altOpcode = kThumb2EorRRR; 583 break; 584 case kOpMul: 585 //TUNING: power of 2, shift & add 586 modImm = -1; 587 altOpcode = kThumb2MulRRR; 588 break; 589 case kOpCmp: { 590 int modImm = modifiedImmediate(value); 591 ArmLIR *res; 592 if (modImm >= 0) { 593 res = newLIR2(cUnit, kThumb2CmpRI8, rSrc1, modImm); 594 } else { 595 int rTmp = dvmCompilerAllocTemp(cUnit); 596 res = loadConstant(cUnit, rTmp, value); 597 opRegReg(cUnit, kOpCmp, rSrc1, rTmp); 598 dvmCompilerFreeTemp(cUnit, rTmp); 599 } 600 return res; 601 } 602 default: 603 assert(0); 604 } 605 606 if (modImm >= 0) { 607 return newLIR3(cUnit, opcode, rDest, rSrc1, modImm); 608 } else { 609 int rScratch = dvmCompilerAllocTemp(cUnit); 610 loadConstant(cUnit, rScratch, value); 611 if (EncodingMap[altOpcode].flags & IS_QUAD_OP) 612 res = newLIR4(cUnit, altOpcode, rDest, rSrc1, rScratch, 0); 613 else 614 res = newLIR3(cUnit, altOpcode, rDest, rSrc1, rScratch); 615 dvmCompilerFreeTemp(cUnit, rScratch); 616 return res; 617 } 618 } 619 620 /* Handle Thumb-only variants here - otherwise punt to opRegRegImm */ 621 static ArmLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1, 622 int value) 623 { 624 bool neg = (value < 0); 625 int absValue = (neg) ? -value : value; 626 bool shortForm = (((absValue & 0xff) == absValue) && LOWREG(rDestSrc1)); 627 ArmOpcode opcode = kThumbBkpt; 628 switch (op) { 629 case kOpAdd: 630 if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */ 631 assert((value & 0x3) == 0); 632 return newLIR1(cUnit, kThumbAddSpI7, value >> 2); 633 } else if (shortForm) { 634 opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8; 635 } 636 break; 637 case kOpSub: 638 if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */ 639 assert((value & 0x3) == 0); 640 return newLIR1(cUnit, kThumbSubSpI7, value >> 2); 641 } else if (shortForm) { 642 opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8; 643 } 644 break; 645 case kOpCmp: 646 if (LOWREG(rDestSrc1) && shortForm) 647 opcode = (shortForm) ? kThumbCmpRI8 : kThumbCmpRR; 648 else if (LOWREG(rDestSrc1)) 649 opcode = kThumbCmpRR; 650 else { 651 shortForm = false; 652 opcode = kThumbCmpHL; 653 } 654 break; 655 default: 656 /* Punt to opRegRegImm - if bad case catch it there */ 657 shortForm = false; 658 break; 659 } 660 if (shortForm) 661 return newLIR2(cUnit, opcode, rDestSrc1, absValue); 662 else { 663 return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value); 664 } 665 } 666 667 /* 668 * Determine whether value can be encoded as a Thumb2 floating point 669 * immediate. If not, return -1. If so return encoded 8-bit value. 670 */ 671 static int encodeImmDoubleHigh(int value) 672 { 673 int res; 674 int bitA = (value & 0x80000000) >> 31; 675 int notBitB = (value & 0x40000000) >> 30; 676 int bitB = (value & 0x20000000) >> 29; 677 int bSmear = (value & 0x3fc00000) >> 22; 678 int slice = (value & 0x003f0000) >> 16; 679 int zeroes = (value & 0x0000ffff); 680 if (zeroes != 0) 681 return -1; 682 if (bitB) { 683 if ((notBitB != 0) || (bSmear != 0x1f)) 684 return -1; 685 } else { 686 if ((notBitB != 1) || (bSmear != 0x0)) 687 return -1; 688 } 689 res = (bitA << 7) | (bitB << 6) | slice; 690 return res; 691 } 692 693 static int encodeImmDouble(int valLo, int valHi) 694 { 695 int res = -1; 696 if (valLo == 0) 697 res = encodeImmDoubleHigh(valHi); 698 return res; 699 } 700 701 static ArmLIR *loadConstantValueWide(CompilationUnit *cUnit, int rDestLo, 702 int rDestHi, int valLo, int valHi) 703 { 704 int encodedImm = encodeImmDouble(valLo, valHi); 705 ArmLIR *res; 706 int targetReg = S2D(rDestLo, rDestHi); 707 if (FPREG(rDestLo)) { 708 if ((valLo == 0) && (valHi == 0)) { 709 // TODO: we need better info about the target CPU. a vector 710 // exclusive or would probably be better here if we could rely on 711 // its existance. 712 // Load an immediate +2.0 (which encodes to 0) 713 newLIR2(cUnit, kThumb2Vmovd_IMM8, targetReg, 0); 714 // +0.0 = +2.0 - +2.0 715 res = newLIR3(cUnit, kThumb2Vsubd, targetReg, targetReg, targetReg); 716 } else if (encodedImm >= 0) { 717 res = newLIR2(cUnit, kThumb2Vmovd_IMM8, targetReg, encodedImm); 718 } else { 719 ArmLIR* dataTarget = scanLiteralPoolWide(cUnit->literalList, valLo, valHi); 720 if (dataTarget == NULL) { 721 dataTarget = addWideData(cUnit, &cUnit->literalList, valLo, valHi); 722 } 723 ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 724 loadPcRel->opcode = kThumb2Vldrd; 725 loadPcRel->generic.target = (LIR *) dataTarget; 726 loadPcRel->operands[0] = targetReg; 727 loadPcRel->operands[1] = r15pc; 728 setupResourceMasks(loadPcRel); 729 setMemRefType(loadPcRel, true, kLiteral); 730 // TODO: rework literal load disambiguation to more cleanly handle 64-bit loads 731 loadPcRel->aliasInfo = (uintptr_t)dataTarget; 732 dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); 733 res = loadPcRel; 734 } 735 } else { 736 res = loadConstantNoClobber(cUnit, rDestLo, valLo); 737 loadConstantNoClobber(cUnit, rDestHi, valHi); 738 } 739 return res; 740 } 741 742 static int encodeShift(int code, int amount) { 743 return ((amount & 0x1f) << 2) | code; 744 } 745 746 static ArmLIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase, 747 int rIndex, int rDest, int scale, OpSize size) 748 { 749 bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rDest); 750 ArmLIR *load; 751 ArmOpcode opcode = kThumbBkpt; 752 bool thumbForm = (allLowRegs && (scale == 0)); 753 int regPtr; 754 755 if (FPREG(rDest)) { 756 assert(SINGLEREG(rDest)); 757 assert((size == kWord) || (size == kSingle)); 758 opcode = kThumb2Vldrs; 759 size = kSingle; 760 } else { 761 if (size == kSingle) 762 size = kWord; 763 } 764 765 switch (size) { 766 case kSingle: 767 regPtr = dvmCompilerAllocTemp(cUnit); 768 if (scale) { 769 newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex, 770 encodeShift(kArmLsl, scale)); 771 } else { 772 opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex); 773 } 774 load = newLIR3(cUnit, opcode, rDest, regPtr, 0); 775 #if defined(WITH_SELF_VERIFICATION) 776 if (cUnit->heapMemOp) 777 load->flags.insertWrapper = true; 778 #endif 779 return load; 780 case kWord: 781 opcode = (thumbForm) ? kThumbLdrRRR : kThumb2LdrRRR; 782 break; 783 case kUnsignedHalf: 784 opcode = (thumbForm) ? kThumbLdrhRRR : kThumb2LdrhRRR; 785 break; 786 case kSignedHalf: 787 opcode = (thumbForm) ? kThumbLdrshRRR : kThumb2LdrshRRR; 788 break; 789 case kUnsignedByte: 790 opcode = (thumbForm) ? kThumbLdrbRRR : kThumb2LdrbRRR; 791 break; 792 case kSignedByte: 793 opcode = (thumbForm) ? kThumbLdrsbRRR : kThumb2LdrsbRRR; 794 break; 795 default: 796 assert(0); 797 } 798 if (thumbForm) 799 load = newLIR3(cUnit, opcode, rDest, rBase, rIndex); 800 else 801 load = newLIR4(cUnit, opcode, rDest, rBase, rIndex, scale); 802 803 #if defined(WITH_SELF_VERIFICATION) 804 if (cUnit->heapMemOp) 805 load->flags.insertWrapper = true; 806 #endif 807 return load; 808 } 809 810 static ArmLIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase, 811 int rIndex, int rSrc, int scale, OpSize size) 812 { 813 bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rSrc); 814 ArmLIR *store; 815 ArmOpcode opcode = kThumbBkpt; 816 bool thumbForm = (allLowRegs && (scale == 0)); 817 int regPtr; 818 819 if (FPREG(rSrc)) { 820 assert(SINGLEREG(rSrc)); 821 assert((size == kWord) || (size == kSingle)); 822 opcode = kThumb2Vstrs; 823 size = kSingle; 824 } else { 825 if (size == kSingle) 826 size = kWord; 827 } 828 829 switch (size) { 830 case kSingle: 831 regPtr = dvmCompilerAllocTemp(cUnit); 832 if (scale) { 833 newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex, 834 encodeShift(kArmLsl, scale)); 835 } else { 836 opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex); 837 } 838 store = newLIR3(cUnit, opcode, rSrc, regPtr, 0); 839 #if defined(WITH_SELF_VERIFICATION) 840 if (cUnit->heapMemOp) 841 store->flags.insertWrapper = true; 842 #endif 843 return store; 844 case kWord: 845 opcode = (thumbForm) ? kThumbStrRRR : kThumb2StrRRR; 846 break; 847 case kUnsignedHalf: 848 case kSignedHalf: 849 opcode = (thumbForm) ? kThumbStrhRRR : kThumb2StrhRRR; 850 break; 851 case kUnsignedByte: 852 case kSignedByte: 853 opcode = (thumbForm) ? kThumbStrbRRR : kThumb2StrbRRR; 854 break; 855 default: 856 assert(0); 857 } 858 if (thumbForm) 859 store = newLIR3(cUnit, opcode, rSrc, rBase, rIndex); 860 else 861 store = newLIR4(cUnit, opcode, rSrc, rBase, rIndex, scale); 862 863 #if defined(WITH_SELF_VERIFICATION) 864 if (cUnit->heapMemOp) 865 store->flags.insertWrapper = true; 866 #endif 867 return store; 868 } 869 870 /* 871 * Load value from base + displacement. Optionally perform null check 872 * on base (which must have an associated sReg and MIR). If not 873 * performing null check, incoming MIR can be null. 874 */ 875 static ArmLIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase, 876 int displacement, int rDest, int rDestHi, 877 OpSize size, int sReg) 878 { 879 ArmLIR *res, *load; 880 ArmOpcode opcode = kThumbBkpt; 881 bool shortForm = false; 882 bool thumb2Form = (displacement < 4092 && displacement >= 0); 883 bool allLowRegs = (LOWREG(rBase) && LOWREG(rDest)); 884 int encodedDisp = displacement; 885 886 switch (size) { 887 case kDouble: 888 case kLong: 889 if (FPREG(rDest)) { 890 if (SINGLEREG(rDest)) { 891 assert(FPREG(rDestHi)); 892 rDest = S2D(rDest, rDestHi); 893 } 894 opcode = kThumb2Vldrd; 895 if (displacement <= 1020) { 896 shortForm = true; 897 encodedDisp >>= 2; 898 } 899 break; 900 } else { 901 res = loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, 902 -1, kWord, sReg); 903 loadBaseDispBody(cUnit, NULL, rBase, displacement + 4, rDestHi, 904 -1, kWord, INVALID_SREG); 905 return res; 906 } 907 case kSingle: 908 case kWord: 909 if (FPREG(rDest)) { 910 opcode = kThumb2Vldrs; 911 if (displacement <= 1020) { 912 shortForm = true; 913 encodedDisp >>= 2; 914 } 915 break; 916 } 917 if (LOWREG(rDest) && (rBase == r15pc) && 918 (displacement <= 1020) && (displacement >= 0)) { 919 shortForm = true; 920 encodedDisp >>= 2; 921 opcode = kThumbLdrPcRel; 922 } else if (LOWREG(rDest) && (rBase == r13sp) && 923 (displacement <= 1020) && (displacement >= 0)) { 924 shortForm = true; 925 encodedDisp >>= 2; 926 opcode = kThumbLdrSpRel; 927 } else if (allLowRegs && displacement < 128 && displacement >= 0) { 928 assert((displacement & 0x3) == 0); 929 shortForm = true; 930 encodedDisp >>= 2; 931 opcode = kThumbLdrRRI5; 932 } else if (thumb2Form) { 933 shortForm = true; 934 opcode = kThumb2LdrRRI12; 935 } 936 break; 937 case kUnsignedHalf: 938 if (allLowRegs && displacement < 64 && displacement >= 0) { 939 assert((displacement & 0x1) == 0); 940 shortForm = true; 941 encodedDisp >>= 1; 942 opcode = kThumbLdrhRRI5; 943 } else if (displacement < 4092 && displacement >= 0) { 944 shortForm = true; 945 opcode = kThumb2LdrhRRI12; 946 } 947 break; 948 case kSignedHalf: 949 if (thumb2Form) { 950 shortForm = true; 951 opcode = kThumb2LdrshRRI12; 952 } 953 break; 954 case kUnsignedByte: 955 if (allLowRegs && displacement < 32 && displacement >= 0) { 956 shortForm = true; 957 opcode = kThumbLdrbRRI5; 958 } else if (thumb2Form) { 959 shortForm = true; 960 opcode = kThumb2LdrbRRI12; 961 } 962 break; 963 case kSignedByte: 964 if (thumb2Form) { 965 shortForm = true; 966 opcode = kThumb2LdrsbRRI12; 967 } 968 break; 969 default: 970 assert(0); 971 } 972 973 if (shortForm) { 974 load = res = newLIR3(cUnit, opcode, rDest, rBase, encodedDisp); 975 } else { 976 int regOffset = dvmCompilerAllocTemp(cUnit); 977 res = loadConstant(cUnit, regOffset, encodedDisp); 978 load = loadBaseIndexed(cUnit, rBase, regOffset, rDest, 0, size); 979 dvmCompilerFreeTemp(cUnit, regOffset); 980 } 981 982 if (rBase == r5FP) { 983 annotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */); 984 } 985 #if defined(WITH_SELF_VERIFICATION) 986 if (cUnit->heapMemOp) 987 load->flags.insertWrapper = true; 988 #endif 989 return load; 990 } 991 992 static ArmLIR *loadBaseDisp(CompilationUnit *cUnit, MIR *mir, int rBase, 993 int displacement, int rDest, OpSize size, 994 int sReg) 995 { 996 return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1, 997 size, sReg); 998 } 999 1000 static ArmLIR *loadBaseDispWide(CompilationUnit *cUnit, MIR *mir, int rBase, 1001 int displacement, int rDestLo, int rDestHi, 1002 int sReg) 1003 { 1004 return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi, 1005 kLong, sReg); 1006 } 1007 1008 1009 static ArmLIR *storeBaseDispBody(CompilationUnit *cUnit, int rBase, 1010 int displacement, int rSrc, int rSrcHi, 1011 OpSize size) 1012 { 1013 ArmLIR *res, *store; 1014 ArmOpcode opcode = kThumbBkpt; 1015 bool shortForm = false; 1016 bool thumb2Form = (displacement < 4092 && displacement >= 0); 1017 bool allLowRegs = (LOWREG(rBase) && LOWREG(rSrc)); 1018 int encodedDisp = displacement; 1019 1020 switch (size) { 1021 case kLong: 1022 case kDouble: 1023 if (!FPREG(rSrc)) { 1024 res = storeBaseDispBody(cUnit, rBase, displacement, rSrc, 1025 -1, kWord); 1026 storeBaseDispBody(cUnit, rBase, displacement + 4, rSrcHi, 1027 -1, kWord); 1028 return res; 1029 } 1030 if (SINGLEREG(rSrc)) { 1031 assert(FPREG(rSrcHi)); 1032 rSrc = S2D(rSrc, rSrcHi); 1033 } 1034 opcode = kThumb2Vstrd; 1035 if (displacement <= 1020) { 1036 shortForm = true; 1037 encodedDisp >>= 2; 1038 } 1039 break; 1040 case kSingle: 1041 case kWord: 1042 if (FPREG(rSrc)) { 1043 assert(SINGLEREG(rSrc)); 1044 opcode = kThumb2Vstrs; 1045 if (displacement <= 1020) { 1046 shortForm = true; 1047 encodedDisp >>= 2; 1048 } 1049 break; 1050 } 1051 if (allLowRegs && displacement < 128 && displacement >= 0) { 1052 assert((displacement & 0x3) == 0); 1053 shortForm = true; 1054 encodedDisp >>= 2; 1055 opcode = kThumbStrRRI5; 1056 } else if (thumb2Form) { 1057 shortForm = true; 1058 opcode = kThumb2StrRRI12; 1059 } 1060 break; 1061 case kUnsignedHalf: 1062 case kSignedHalf: 1063 if (allLowRegs && displacement < 64 && displacement >= 0) { 1064 assert((displacement & 0x1) == 0); 1065 shortForm = true; 1066 encodedDisp >>= 1; 1067 opcode = kThumbStrhRRI5; 1068 } else if (thumb2Form) { 1069 shortForm = true; 1070 opcode = kThumb2StrhRRI12; 1071 } 1072 break; 1073 case kUnsignedByte: 1074 case kSignedByte: 1075 if (allLowRegs && displacement < 32 && displacement >= 0) { 1076 shortForm = true; 1077 opcode = kThumbStrbRRI5; 1078 } else if (thumb2Form) { 1079 shortForm = true; 1080 opcode = kThumb2StrbRRI12; 1081 } 1082 break; 1083 default: 1084 assert(0); 1085 } 1086 if (shortForm) { 1087 store = res = newLIR3(cUnit, opcode, rSrc, rBase, encodedDisp); 1088 } else { 1089 int rScratch = dvmCompilerAllocTemp(cUnit); 1090 res = loadConstant(cUnit, rScratch, encodedDisp); 1091 store = storeBaseIndexed(cUnit, rBase, rScratch, rSrc, 0, size); 1092 dvmCompilerFreeTemp(cUnit, rScratch); 1093 } 1094 1095 if (rBase == r5FP) { 1096 annotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */); 1097 } 1098 #if defined(WITH_SELF_VERIFICATION) 1099 if (cUnit->heapMemOp) 1100 store->flags.insertWrapper = true; 1101 #endif 1102 return res; 1103 } 1104 1105 static ArmLIR *storeBaseDisp(CompilationUnit *cUnit, int rBase, 1106 int displacement, int rSrc, OpSize size) 1107 { 1108 return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size); 1109 } 1110 1111 static ArmLIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase, 1112 int displacement, int rSrcLo, int rSrcHi) 1113 { 1114 return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong); 1115 } 1116 1117 static ArmLIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask) 1118 { 1119 ArmLIR *res; 1120 genBarrier(cUnit); 1121 if (LOWREG(rBase) && ((rMask & 0xff)==rMask)) { 1122 res = newLIR2(cUnit, kThumbLdmia, rBase, rMask); 1123 } else { 1124 res = newLIR2(cUnit, kThumb2Ldmia, rBase, rMask); 1125 } 1126 #if defined(WITH_SELF_VERIFICATION) 1127 if (cUnit->heapMemOp) 1128 res->flags.insertWrapper = true; 1129 #endif 1130 genBarrier(cUnit); 1131 return res; 1132 } 1133 1134 static ArmLIR *storeMultiple(CompilationUnit *cUnit, int rBase, int rMask) 1135 { 1136 ArmLIR *res; 1137 genBarrier(cUnit); 1138 if (LOWREG(rBase) && ((rMask & 0xff)==rMask)) { 1139 res = newLIR2(cUnit, kThumbStmia, rBase, rMask); 1140 } else { 1141 res = newLIR2(cUnit, kThumb2Stmia, rBase, rMask); 1142 } 1143 #if defined(WITH_SELF_VERIFICATION) 1144 if (cUnit->heapMemOp) 1145 res->flags.insertWrapper = true; 1146 #endif 1147 genBarrier(cUnit); 1148 return res; 1149 } 1150 1151 static void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg) 1152 { 1153 storeBaseDispWide(cUnit, base, 0, lowReg, highReg); 1154 } 1155 1156 static void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg) 1157 { 1158 loadBaseDispWide(cUnit, NULL, base, 0, lowReg, highReg, INVALID_SREG); 1159 } 1160 1161 /* 1162 * Generate a register comparison to an immediate and branch. Caller 1163 * is responsible for setting branch target field. 1164 */ 1165 static ArmLIR *genCmpImmBranch(CompilationUnit *cUnit, 1166 ArmConditionCode cond, int reg, 1167 int checkValue) 1168 { 1169 ArmLIR *branch; 1170 int modImm; 1171 if ((LOWREG(reg)) && (checkValue == 0) && 1172 ((cond == kArmCondEq) || (cond == kArmCondNe))) { 1173 branch = newLIR2(cUnit, 1174 (cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz, 1175 reg, 0); 1176 } else { 1177 modImm = modifiedImmediate(checkValue); 1178 if (LOWREG(reg) && ((checkValue & 0xff) == checkValue)) { 1179 newLIR2(cUnit, kThumbCmpRI8, reg, checkValue); 1180 } else if (modImm >= 0) { 1181 newLIR2(cUnit, kThumb2CmpRI8, reg, modImm); 1182 } else { 1183 int tReg = dvmCompilerAllocTemp(cUnit); 1184 loadConstant(cUnit, tReg, checkValue); 1185 opRegReg(cUnit, kOpCmp, reg, tReg); 1186 } 1187 branch = newLIR2(cUnit, kThumbBCond, 0, cond); 1188 } 1189 return branch; 1190 } 1191 1192 static ArmLIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc) 1193 { 1194 ArmLIR* res = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1195 res->operands[0] = rDest; 1196 res->operands[1] = rSrc; 1197 if (rDest == rSrc) { 1198 res->flags.isNop = true; 1199 } else { 1200 assert(DOUBLEREG(rDest) == DOUBLEREG(rSrc)); 1201 if (DOUBLEREG(rDest)) { 1202 res->opcode = kThumb2Vmovd; 1203 } else { 1204 if (SINGLEREG(rDest)) { 1205 res->opcode = SINGLEREG(rSrc) ? kThumb2Vmovs : kThumb2Fmsr; 1206 } else { 1207 assert(SINGLEREG(rSrc)); 1208 res->opcode = kThumb2Fmrs; 1209 } 1210 } 1211 res->operands[0] = rDest; 1212 res->operands[1] = rSrc; 1213 } 1214 setupResourceMasks(res); 1215 return res; 1216 } 1217 1218 static ArmLIR* genRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc) 1219 { 1220 ArmLIR* res; 1221 ArmOpcode opcode; 1222 if (FPREG(rDest) || FPREG(rSrc)) 1223 return fpRegCopy(cUnit, rDest, rSrc); 1224 res = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1225 if (LOWREG(rDest) && LOWREG(rSrc)) 1226 opcode = kThumbMovRR; 1227 else if (!LOWREG(rDest) && !LOWREG(rSrc)) 1228 opcode = kThumbMovRR_H2H; 1229 else if (LOWREG(rDest)) 1230 opcode = kThumbMovRR_H2L; 1231 else 1232 opcode = kThumbMovRR_L2H; 1233 1234 res->operands[0] = rDest; 1235 res->operands[1] = rSrc; 1236 res->opcode = opcode; 1237 setupResourceMasks(res); 1238 if (rDest == rSrc) { 1239 res->flags.isNop = true; 1240 } 1241 return res; 1242 } 1243 1244 static ArmLIR* genRegCopy(CompilationUnit *cUnit, int rDest, int rSrc) 1245 { 1246 ArmLIR *res = genRegCopyNoInsert(cUnit, rDest, rSrc); 1247 dvmCompilerAppendLIR(cUnit, (LIR*)res); 1248 return res; 1249 } 1250 1251 static void genRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi, 1252 int srcLo, int srcHi) 1253 { 1254 bool destFP = FPREG(destLo) && FPREG(destHi); 1255 bool srcFP = FPREG(srcLo) && FPREG(srcHi); 1256 assert(FPREG(srcLo) == FPREG(srcHi)); 1257 assert(FPREG(destLo) == FPREG(destHi)); 1258 if (destFP) { 1259 if (srcFP) { 1260 genRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi)); 1261 } else { 1262 newLIR3(cUnit, kThumb2Fmdrr, S2D(destLo, destHi), srcLo, srcHi); 1263 } 1264 } else { 1265 if (srcFP) { 1266 newLIR3(cUnit, kThumb2Fmrrd, destLo, destHi, S2D(srcLo, srcHi)); 1267 } else { 1268 // Handle overlap 1269 if (srcHi == destLo) { 1270 genRegCopy(cUnit, destHi, srcHi); 1271 genRegCopy(cUnit, destLo, srcLo); 1272 } else { 1273 genRegCopy(cUnit, destLo, srcLo); 1274 genRegCopy(cUnit, destHi, srcHi); 1275 } 1276 } 1277 } 1278 } 1279 1280 #if defined(WITH_SELF_VERIFICATION) 1281 static void genSelfVerificationPreBranch(CompilationUnit *cUnit, 1282 ArmLIR *origLIR) { 1283 ArmLIR *push = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1284 push->opcode = kThumbPush; 1285 /* Thumb push can handle LR (encoded at bit 8) */ 1286 push->operands[0] = (1 << r5FP | 1 << 8); 1287 setupResourceMasks(push); 1288 dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) push); 1289 } 1290 1291 static void genSelfVerificationPostBranch(CompilationUnit *cUnit, 1292 ArmLIR *origLIR) { 1293 ArmLIR *pop = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1294 /* Thumb pop cannot store into LR - use Thumb2 here */ 1295 pop->opcode = kThumb2Pop; 1296 pop->operands[0] = (1 << r5FP | 1 << r14lr); 1297 setupResourceMasks(pop); 1298 dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) pop); 1299 } 1300 #endif 1301