1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /* 18 * This file contains codegen for the Thumb ISA and is intended to be 19 * includes by: 20 * 21 * Codegen-$(TARGET_ARCH_VARIANT).c 22 * 23 */ 24 25 static int coreTemps[] = {r0, r1, r2, r3, r4PC, r7, r8, r9, r10, r11, r12}; 26 static int fpTemps[] = {fr16, fr17, fr18, fr19, fr20, fr21, fr22, fr23, 27 fr24, fr25, fr26, fr27, fr28, fr29, fr30, fr31}; 28 29 static int encodeImmSingle(int value) 30 { 31 int res; 32 int bitA = (value & 0x80000000) >> 31; 33 int notBitB = (value & 0x40000000) >> 30; 34 int bitB = (value & 0x20000000) >> 29; 35 int bSmear = (value & 0x3e000000) >> 25; 36 int slice = (value & 0x01f80000) >> 19; 37 int zeroes = (value & 0x0007ffff); 38 if (zeroes != 0) 39 return -1; 40 if (bitB) { 41 if ((notBitB != 0) || (bSmear != 0x1f)) 42 return -1; 43 } else { 44 if ((notBitB != 1) || (bSmear != 0x0)) 45 return -1; 46 } 47 res = (bitA << 7) | (bitB << 6) | slice; 48 return res; 49 } 50 51 static ArmLIR *loadFPConstantValue(CompilationUnit *cUnit, int rDest, 52 int value) 53 { 54 int encodedImm = encodeImmSingle(value); 55 assert(SINGLEREG(rDest)); 56 if (encodedImm >= 0) { 57 return newLIR2(cUnit, kThumb2Vmovs_IMM8, rDest, encodedImm); 58 } 59 ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0); 60 if (dataTarget == NULL) { 61 dataTarget = addWordData(cUnit, &cUnit->literalList, value); 62 } 63 ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 64 loadPcRel->opcode = kThumb2Vldrs; 65 loadPcRel->generic.target = (LIR *) dataTarget; 66 loadPcRel->operands[0] = rDest; 67 loadPcRel->operands[1] = r15pc; 68 setupResourceMasks(loadPcRel); 69 setMemRefType(loadPcRel, true, kLiteral); 70 loadPcRel->aliasInfo = dataTarget->operands[0]; 71 dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); 72 return loadPcRel; 73 } 74 75 static int leadingZeros(u4 val) 76 { 77 u4 alt; 78 int n; 79 int count; 80 81 count = 16; 82 n = 32; 83 do { 84 alt = val >> count; 85 if (alt != 0) { 86 n = n - count; 87 val = alt; 88 } 89 count >>= 1; 90 } while (count); 91 return n - val; 92 } 93 94 /* 95 * Determine whether value can be encoded as a Thumb2 modified 96 * immediate. If not, return -1. If so, return i:imm3:a:bcdefgh form. 97 */ 98 static int modifiedImmediate(u4 value) 99 { 100 int zLeading; 101 int zTrailing; 102 u4 b0 = value & 0xff; 103 104 /* Note: case of value==0 must use 0:000:0:0000000 encoding */ 105 if (value <= 0xFF) 106 return b0; // 0:000:a:bcdefgh 107 if (value == ((b0 << 16) | b0)) 108 return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */ 109 if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0)) 110 return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */ 111 b0 = (value >> 8) & 0xff; 112 if (value == ((b0 << 24) | (b0 << 8))) 113 return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */ 114 /* Can we do it with rotation? */ 115 zLeading = leadingZeros(value); 116 zTrailing = 32 - leadingZeros(~value & (value - 1)); 117 /* A run of eight or fewer active bits? */ 118 if ((zLeading + zTrailing) < 24) 119 return -1; /* No - bail */ 120 /* left-justify the constant, discarding msb (known to be 1) */ 121 value <<= zLeading + 1; 122 /* Create bcdefgh */ 123 value >>= 25; 124 /* Put it all together */ 125 return value | ((0x8 + zLeading) << 7); /* [01000..11111]:bcdefgh */ 126 } 127 128 /* 129 * Load a immediate using a shortcut if possible; otherwise 130 * grab from the per-translation literal pool. 131 * 132 * No additional register clobbering operation performed. Use this version when 133 * 1) rDest is freshly returned from dvmCompilerAllocTemp or 134 * 2) The codegen is under fixed register usage 135 */ 136 static ArmLIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, 137 int value) 138 { 139 ArmLIR *res; 140 int modImm; 141 142 if (FPREG(rDest)) { 143 return loadFPConstantValue(cUnit, rDest, value); 144 } 145 146 /* See if the value can be constructed cheaply */ 147 if (LOWREG(rDest) && (value >= 0) && (value <= 255)) { 148 return newLIR2(cUnit, kThumbMovImm, rDest, value); 149 } 150 /* Check Modified immediate special cases */ 151 modImm = modifiedImmediate(value); 152 if (modImm >= 0) { 153 res = newLIR2(cUnit, kThumb2MovImmShift, rDest, modImm); 154 return res; 155 } 156 modImm = modifiedImmediate(~value); 157 if (modImm >= 0) { 158 res = newLIR2(cUnit, kThumb2MvnImmShift, rDest, modImm); 159 return res; 160 } 161 /* 16-bit immediate? */ 162 if ((value & 0xffff) == value) { 163 res = newLIR2(cUnit, kThumb2MovImm16, rDest, value); 164 return res; 165 } 166 /* No shortcut - go ahead and use literal pool */ 167 ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 0); 168 if (dataTarget == NULL) { 169 dataTarget = addWordData(cUnit, &cUnit->literalList, value); 170 } 171 ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 172 loadPcRel->opcode = kThumb2LdrPcRel12; 173 loadPcRel->generic.target = (LIR *) dataTarget; 174 loadPcRel->operands[0] = rDest; 175 setupResourceMasks(loadPcRel); 176 setMemRefType(loadPcRel, true, kLiteral); 177 loadPcRel->aliasInfo = dataTarget->operands[0]; 178 res = loadPcRel; 179 dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); 180 181 /* 182 * To save space in the constant pool, we use the ADD_RRI8 instruction to 183 * add up to 255 to an existing constant value. 184 */ 185 if (dataTarget->operands[0] != value) { 186 opRegImm(cUnit, kOpAdd, rDest, value - dataTarget->operands[0]); 187 } 188 return res; 189 } 190 191 /* 192 * Load an immediate value into a fixed or temp register. Target 193 * register is clobbered, and marked inUse. 194 */ 195 static ArmLIR *loadConstant(CompilationUnit *cUnit, int rDest, int value) 196 { 197 if (dvmCompilerIsTemp(cUnit, rDest)) { 198 dvmCompilerClobber(cUnit, rDest); 199 dvmCompilerMarkInUse(cUnit, rDest); 200 } 201 return loadConstantNoClobber(cUnit, rDest, value); 202 } 203 204 /* 205 * Load a class pointer value into a fixed or temp register. Target 206 * register is clobbered, and marked inUse. 207 */ 208 static ArmLIR *loadClassPointer(CompilationUnit *cUnit, int rDest, int value) 209 { 210 ArmLIR *res; 211 cUnit->hasClassLiterals = true; 212 if (dvmCompilerIsTemp(cUnit, rDest)) { 213 dvmCompilerClobber(cUnit, rDest); 214 dvmCompilerMarkInUse(cUnit, rDest); 215 } 216 ArmLIR *dataTarget = scanLiteralPool(cUnit->classPointerList, value, 0); 217 if (dataTarget == NULL) { 218 dataTarget = addWordData(cUnit, &cUnit->classPointerList, value); 219 /* Counts the number of class pointers in this translation */ 220 cUnit->numClassPointers++; 221 } 222 ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 223 loadPcRel->opcode = kThumb2LdrPcRel12; 224 loadPcRel->generic.target = (LIR *) dataTarget; 225 loadPcRel->operands[0] = rDest; 226 setupResourceMasks(loadPcRel); 227 setMemRefType(loadPcRel, true, kLiteral); 228 loadPcRel->aliasInfo = dataTarget->operands[0]; 229 res = loadPcRel; 230 dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); 231 return res; 232 } 233 234 static ArmLIR *opNone(CompilationUnit *cUnit, OpKind op) 235 { 236 ArmOpcode opcode = kThumbBkpt; 237 switch (op) { 238 case kOpUncondBr: 239 opcode = kThumbBUncond; 240 break; 241 default: 242 assert(0); 243 } 244 return newLIR0(cUnit, opcode); 245 } 246 247 static ArmLIR *opCondBranch(CompilationUnit *cUnit, ArmConditionCode cc) 248 { 249 return newLIR2(cUnit, kThumb2BCond, 0 /* offset to be patched */, cc); 250 } 251 252 static ArmLIR *opImm(CompilationUnit *cUnit, OpKind op, int value) 253 { 254 ArmOpcode opcode = kThumbBkpt; 255 switch (op) { 256 case kOpPush: { 257 if ((value & 0xff00) == 0) { 258 opcode = kThumbPush; 259 } else if ((value & 0xff00) == (1 << r14lr)) { 260 /* Thumb push can handle lr, which is encoded by bit 8 */ 261 opcode = kThumbPush; 262 value = (value & 0xff) | (1<<8); 263 } else { 264 opcode = kThumb2Push; 265 } 266 break; 267 } 268 case kOpPop: { 269 if ((value & 0xff00) == 0) { 270 opcode = kThumbPop; 271 } else if ((value & 0xff00) == (1 << r15pc)) { 272 /* Thumb pop can handle pc, which is encoded by bit 8 */ 273 opcode = kThumbPop; 274 value = (value & 0xff) | (1<<8); 275 } else { 276 opcode = kThumb2Pop; 277 } 278 break; 279 } 280 default: 281 assert(0); 282 } 283 return newLIR1(cUnit, opcode, value); 284 } 285 286 static ArmLIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc) 287 { 288 ArmOpcode opcode = kThumbBkpt; 289 switch (op) { 290 case kOpBlx: 291 opcode = kThumbBlxR; 292 break; 293 default: 294 assert(0); 295 } 296 return newLIR1(cUnit, opcode, rDestSrc); 297 } 298 299 static ArmLIR *opRegRegShift(CompilationUnit *cUnit, OpKind op, int rDestSrc1, 300 int rSrc2, int shift) 301 { 302 bool thumbForm = ((shift == 0) && LOWREG(rDestSrc1) && LOWREG(rSrc2)); 303 ArmOpcode opcode = kThumbBkpt; 304 switch (op) { 305 case kOpAdc: 306 opcode = (thumbForm) ? kThumbAdcRR : kThumb2AdcRRR; 307 break; 308 case kOpAnd: 309 opcode = (thumbForm) ? kThumbAndRR : kThumb2AndRRR; 310 break; 311 case kOpBic: 312 opcode = (thumbForm) ? kThumbBicRR : kThumb2BicRRR; 313 break; 314 case kOpCmn: 315 assert(shift == 0); 316 opcode = (thumbForm) ? kThumbCmnRR : kThumb2CmnRR; 317 break; 318 case kOpCmp: 319 if (thumbForm) 320 opcode = kThumbCmpRR; 321 else if ((shift == 0) && !LOWREG(rDestSrc1) && !LOWREG(rSrc2)) 322 opcode = kThumbCmpHH; 323 else if ((shift == 0) && LOWREG(rDestSrc1)) 324 opcode = kThumbCmpLH; 325 else if (shift == 0) 326 opcode = kThumbCmpHL; 327 else 328 opcode = kThumb2CmpRR; 329 break; 330 case kOpXor: 331 opcode = (thumbForm) ? kThumbEorRR : kThumb2EorRRR; 332 break; 333 case kOpMov: 334 assert(shift == 0); 335 if (LOWREG(rDestSrc1) && LOWREG(rSrc2)) 336 opcode = kThumbMovRR; 337 else if (!LOWREG(rDestSrc1) && !LOWREG(rSrc2)) 338 opcode = kThumbMovRR_H2H; 339 else if (LOWREG(rDestSrc1)) 340 opcode = kThumbMovRR_H2L; 341 else 342 opcode = kThumbMovRR_L2H; 343 break; 344 case kOpMul: 345 assert(shift == 0); 346 opcode = (thumbForm) ? kThumbMul : kThumb2MulRRR; 347 break; 348 case kOpMvn: 349 opcode = (thumbForm) ? kThumbMvn : kThumb2MnvRR; 350 break; 351 case kOpNeg: 352 assert(shift == 0); 353 opcode = (thumbForm) ? kThumbNeg : kThumb2NegRR; 354 break; 355 case kOpOr: 356 opcode = (thumbForm) ? kThumbOrr : kThumb2OrrRRR; 357 break; 358 case kOpSbc: 359 opcode = (thumbForm) ? kThumbSbc : kThumb2SbcRRR; 360 break; 361 case kOpTst: 362 opcode = (thumbForm) ? kThumbTst : kThumb2TstRR; 363 break; 364 case kOpLsl: 365 assert(shift == 0); 366 opcode = (thumbForm) ? kThumbLslRR : kThumb2LslRRR; 367 break; 368 case kOpLsr: 369 assert(shift == 0); 370 opcode = (thumbForm) ? kThumbLsrRR : kThumb2LsrRRR; 371 break; 372 case kOpAsr: 373 assert(shift == 0); 374 opcode = (thumbForm) ? kThumbAsrRR : kThumb2AsrRRR; 375 break; 376 case kOpRor: 377 assert(shift == 0); 378 opcode = (thumbForm) ? kThumbRorRR : kThumb2RorRRR; 379 break; 380 case kOpAdd: 381 opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR; 382 break; 383 case kOpSub: 384 opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR; 385 break; 386 case kOp2Byte: 387 assert(shift == 0); 388 return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 8); 389 case kOp2Short: 390 assert(shift == 0); 391 return newLIR4(cUnit, kThumb2Sbfx, rDestSrc1, rSrc2, 0, 16); 392 case kOp2Char: 393 assert(shift == 0); 394 return newLIR4(cUnit, kThumb2Ubfx, rDestSrc1, rSrc2, 0, 16); 395 default: 396 assert(0); 397 break; 398 } 399 assert(opcode >= 0); 400 if (EncodingMap[opcode].flags & IS_BINARY_OP) 401 return newLIR2(cUnit, opcode, rDestSrc1, rSrc2); 402 else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) { 403 if (EncodingMap[opcode].fieldLoc[2].kind == kFmtShift) 404 return newLIR3(cUnit, opcode, rDestSrc1, rSrc2, shift); 405 else 406 return newLIR3(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2); 407 } else if (EncodingMap[opcode].flags & IS_QUAD_OP) 408 return newLIR4(cUnit, opcode, rDestSrc1, rDestSrc1, rSrc2, shift); 409 else { 410 assert(0); 411 return NULL; 412 } 413 } 414 415 static ArmLIR *opRegReg(CompilationUnit *cUnit, OpKind op, int rDestSrc1, 416 int rSrc2) 417 { 418 return opRegRegShift(cUnit, op, rDestSrc1, rSrc2, 0); 419 } 420 421 static ArmLIR *opRegRegRegShift(CompilationUnit *cUnit, OpKind op, 422 int rDest, int rSrc1, int rSrc2, int shift) 423 { 424 ArmOpcode opcode = kThumbBkpt; 425 bool thumbForm = (shift == 0) && LOWREG(rDest) && LOWREG(rSrc1) && 426 LOWREG(rSrc2); 427 switch (op) { 428 case kOpAdd: 429 opcode = (thumbForm) ? kThumbAddRRR : kThumb2AddRRR; 430 break; 431 case kOpSub: 432 opcode = (thumbForm) ? kThumbSubRRR : kThumb2SubRRR; 433 break; 434 case kOpAdc: 435 opcode = kThumb2AdcRRR; 436 break; 437 case kOpAnd: 438 opcode = kThumb2AndRRR; 439 break; 440 case kOpBic: 441 opcode = kThumb2BicRRR; 442 break; 443 case kOpXor: 444 opcode = kThumb2EorRRR; 445 break; 446 case kOpMul: 447 assert(shift == 0); 448 opcode = kThumb2MulRRR; 449 break; 450 case kOpOr: 451 opcode = kThumb2OrrRRR; 452 break; 453 case kOpSbc: 454 opcode = kThumb2SbcRRR; 455 break; 456 case kOpLsl: 457 assert(shift == 0); 458 opcode = kThumb2LslRRR; 459 break; 460 case kOpLsr: 461 assert(shift == 0); 462 opcode = kThumb2LsrRRR; 463 break; 464 case kOpAsr: 465 assert(shift == 0); 466 opcode = kThumb2AsrRRR; 467 break; 468 case kOpRor: 469 assert(shift == 0); 470 opcode = kThumb2RorRRR; 471 break; 472 default: 473 assert(0); 474 break; 475 } 476 assert(opcode >= 0); 477 if (EncodingMap[opcode].flags & IS_QUAD_OP) 478 return newLIR4(cUnit, opcode, rDest, rSrc1, rSrc2, shift); 479 else { 480 assert(EncodingMap[opcode].flags & IS_TERTIARY_OP); 481 return newLIR3(cUnit, opcode, rDest, rSrc1, rSrc2); 482 } 483 } 484 485 static ArmLIR *opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest, 486 int rSrc1, int rSrc2) 487 { 488 return opRegRegRegShift(cUnit, op, rDest, rSrc1, rSrc2, 0); 489 } 490 491 static ArmLIR *opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest, 492 int rSrc1, int value) 493 { 494 ArmLIR *res; 495 bool neg = (value < 0); 496 int absValue = (neg) ? -value : value; 497 ArmOpcode opcode = kThumbBkpt; 498 ArmOpcode altOpcode = kThumbBkpt; 499 bool allLowRegs = (LOWREG(rDest) && LOWREG(rSrc1)); 500 int modImm = modifiedImmediate(value); 501 int modImmNeg = modifiedImmediate(-value); 502 503 switch(op) { 504 case kOpLsl: 505 if (allLowRegs) 506 return newLIR3(cUnit, kThumbLslRRI5, rDest, rSrc1, value); 507 else 508 return newLIR3(cUnit, kThumb2LslRRI5, rDest, rSrc1, value); 509 case kOpLsr: 510 if (allLowRegs) 511 return newLIR3(cUnit, kThumbLsrRRI5, rDest, rSrc1, value); 512 else 513 return newLIR3(cUnit, kThumb2LsrRRI5, rDest, rSrc1, value); 514 case kOpAsr: 515 if (allLowRegs) 516 return newLIR3(cUnit, kThumbAsrRRI5, rDest, rSrc1, value); 517 else 518 return newLIR3(cUnit, kThumb2AsrRRI5, rDest, rSrc1, value); 519 case kOpRor: 520 return newLIR3(cUnit, kThumb2RorRRI5, rDest, rSrc1, value); 521 case kOpAdd: 522 if (LOWREG(rDest) && (rSrc1 == r13sp) && 523 (value <= 1020) && ((value & 0x3)==0)) { 524 return newLIR3(cUnit, kThumbAddSpRel, rDest, rSrc1, 525 value >> 2); 526 } else if (LOWREG(rDest) && (rSrc1 == r15pc) && 527 (value <= 1020) && ((value & 0x3)==0)) { 528 return newLIR3(cUnit, kThumbAddPcRel, rDest, rSrc1, 529 value >> 2); 530 } 531 opcode = kThumb2AddRRI8; 532 altOpcode = kThumb2AddRRR; 533 // Note: intentional fallthrough 534 case kOpSub: 535 if (allLowRegs && ((absValue & 0x7) == absValue)) { 536 if (op == kOpAdd) 537 opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3; 538 else 539 opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3; 540 return newLIR3(cUnit, opcode, rDest, rSrc1, absValue); 541 } else if ((absValue & 0xff) == absValue) { 542 if (op == kOpAdd) 543 opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12; 544 else 545 opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12; 546 return newLIR3(cUnit, opcode, rDest, rSrc1, absValue); 547 } 548 if (modImmNeg >= 0) { 549 op = (op == kOpAdd) ? kOpSub : kOpAdd; 550 modImm = modImmNeg; 551 } 552 if (op == kOpSub) { 553 opcode = kThumb2SubRRI8; 554 altOpcode = kThumb2SubRRR; 555 } 556 break; 557 case kOpAdc: 558 opcode = kThumb2AdcRRI8; 559 altOpcode = kThumb2AdcRRR; 560 break; 561 case kOpSbc: 562 opcode = kThumb2SbcRRI8; 563 altOpcode = kThumb2SbcRRR; 564 break; 565 case kOpOr: 566 opcode = kThumb2OrrRRI8; 567 altOpcode = kThumb2OrrRRR; 568 break; 569 case kOpAnd: 570 opcode = kThumb2AndRRI8; 571 altOpcode = kThumb2AndRRR; 572 break; 573 case kOpXor: 574 opcode = kThumb2EorRRI8; 575 altOpcode = kThumb2EorRRR; 576 break; 577 case kOpMul: 578 //TUNING: power of 2, shift & add 579 modImm = -1; 580 altOpcode = kThumb2MulRRR; 581 break; 582 case kOpCmp: { 583 int modImm = modifiedImmediate(value); 584 ArmLIR *res; 585 if (modImm >= 0) { 586 res = newLIR2(cUnit, kThumb2CmpRI8, rSrc1, modImm); 587 } else { 588 int rTmp = dvmCompilerAllocTemp(cUnit); 589 res = loadConstant(cUnit, rTmp, value); 590 opRegReg(cUnit, kOpCmp, rSrc1, rTmp); 591 dvmCompilerFreeTemp(cUnit, rTmp); 592 } 593 return res; 594 } 595 default: 596 assert(0); 597 } 598 599 if (modImm >= 0) { 600 return newLIR3(cUnit, opcode, rDest, rSrc1, modImm); 601 } else { 602 int rScratch = dvmCompilerAllocTemp(cUnit); 603 loadConstant(cUnit, rScratch, value); 604 if (EncodingMap[altOpcode].flags & IS_QUAD_OP) 605 res = newLIR4(cUnit, altOpcode, rDest, rSrc1, rScratch, 0); 606 else 607 res = newLIR3(cUnit, altOpcode, rDest, rSrc1, rScratch); 608 dvmCompilerFreeTemp(cUnit, rScratch); 609 return res; 610 } 611 } 612 613 /* Handle Thumb-only variants here - otherwise punt to opRegRegImm */ 614 static ArmLIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1, 615 int value) 616 { 617 bool neg = (value < 0); 618 int absValue = (neg) ? -value : value; 619 bool shortForm = (((absValue & 0xff) == absValue) && LOWREG(rDestSrc1)); 620 ArmOpcode opcode = kThumbBkpt; 621 switch (op) { 622 case kOpAdd: 623 if ( !neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */ 624 assert((value & 0x3) == 0); 625 return newLIR1(cUnit, kThumbAddSpI7, value >> 2); 626 } else if (shortForm) { 627 opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8; 628 } 629 break; 630 case kOpSub: 631 if (!neg && (rDestSrc1 == r13sp) && (value <= 508)) { /* sp */ 632 assert((value & 0x3) == 0); 633 return newLIR1(cUnit, kThumbSubSpI7, value >> 2); 634 } else if (shortForm) { 635 opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8; 636 } 637 break; 638 case kOpCmp: 639 if (LOWREG(rDestSrc1) && shortForm) 640 opcode = (shortForm) ? kThumbCmpRI8 : kThumbCmpRR; 641 else if (LOWREG(rDestSrc1)) 642 opcode = kThumbCmpRR; 643 else { 644 shortForm = false; 645 opcode = kThumbCmpHL; 646 } 647 break; 648 default: 649 /* Punt to opRegRegImm - if bad case catch it there */ 650 shortForm = false; 651 break; 652 } 653 if (shortForm) 654 return newLIR2(cUnit, opcode, rDestSrc1, absValue); 655 else { 656 return opRegRegImm(cUnit, op, rDestSrc1, rDestSrc1, value); 657 } 658 } 659 660 /* 661 * Determine whether value can be encoded as a Thumb2 floating point 662 * immediate. If not, return -1. If so return encoded 8-bit value. 663 */ 664 static int encodeImmDoubleHigh(int value) 665 { 666 int res; 667 int bitA = (value & 0x80000000) >> 31; 668 int notBitB = (value & 0x40000000) >> 30; 669 int bitB = (value & 0x20000000) >> 29; 670 int bSmear = (value & 0x3fc00000) >> 22; 671 int slice = (value & 0x003f0000) >> 16; 672 int zeroes = (value & 0x0000ffff); 673 if (zeroes != 0) 674 return -1; 675 if (bitB) { 676 if ((notBitB != 0) || (bSmear != 0x1f)) 677 return -1; 678 } else { 679 if ((notBitB != 1) || (bSmear != 0x0)) 680 return -1; 681 } 682 res = (bitA << 7) | (bitB << 6) | slice; 683 return res; 684 } 685 686 static int encodeImmDouble(int valLo, int valHi) 687 { 688 int res = -1; 689 if (valLo == 0) 690 res = encodeImmDoubleHigh(valHi); 691 return res; 692 } 693 694 static ArmLIR *loadConstantValueWide(CompilationUnit *cUnit, int rDestLo, 695 int rDestHi, int valLo, int valHi) 696 { 697 int encodedImm = encodeImmDouble(valLo, valHi); 698 ArmLIR *res; 699 if (FPREG(rDestLo) && (encodedImm >= 0)) { 700 res = newLIR2(cUnit, kThumb2Vmovd_IMM8, S2D(rDestLo, rDestHi), 701 encodedImm); 702 } else { 703 res = loadConstantNoClobber(cUnit, rDestLo, valLo); 704 loadConstantNoClobber(cUnit, rDestHi, valHi); 705 } 706 return res; 707 } 708 709 static int encodeShift(int code, int amount) { 710 return ((amount & 0x1f) << 2) | code; 711 } 712 713 static ArmLIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase, 714 int rIndex, int rDest, int scale, OpSize size) 715 { 716 bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rDest); 717 ArmLIR *load; 718 ArmOpcode opcode = kThumbBkpt; 719 bool thumbForm = (allLowRegs && (scale == 0)); 720 int regPtr; 721 722 if (FPREG(rDest)) { 723 assert(SINGLEREG(rDest)); 724 assert((size == kWord) || (size == kSingle)); 725 opcode = kThumb2Vldrs; 726 size = kSingle; 727 } else { 728 if (size == kSingle) 729 size = kWord; 730 } 731 732 switch (size) { 733 case kSingle: 734 regPtr = dvmCompilerAllocTemp(cUnit); 735 if (scale) { 736 newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex, 737 encodeShift(kArmLsl, scale)); 738 } else { 739 opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex); 740 } 741 load = newLIR3(cUnit, opcode, rDest, regPtr, 0); 742 #if defined(WITH_SELF_VERIFICATION) 743 if (cUnit->heapMemOp) 744 load->flags.insertWrapper = true; 745 #endif 746 return load; 747 case kWord: 748 opcode = (thumbForm) ? kThumbLdrRRR : kThumb2LdrRRR; 749 break; 750 case kUnsignedHalf: 751 opcode = (thumbForm) ? kThumbLdrhRRR : kThumb2LdrhRRR; 752 break; 753 case kSignedHalf: 754 opcode = (thumbForm) ? kThumbLdrshRRR : kThumb2LdrshRRR; 755 break; 756 case kUnsignedByte: 757 opcode = (thumbForm) ? kThumbLdrbRRR : kThumb2LdrbRRR; 758 break; 759 case kSignedByte: 760 opcode = (thumbForm) ? kThumbLdrsbRRR : kThumb2LdrsbRRR; 761 break; 762 default: 763 assert(0); 764 } 765 if (thumbForm) 766 load = newLIR3(cUnit, opcode, rDest, rBase, rIndex); 767 else 768 load = newLIR4(cUnit, opcode, rDest, rBase, rIndex, scale); 769 770 #if defined(WITH_SELF_VERIFICATION) 771 if (cUnit->heapMemOp) 772 load->flags.insertWrapper = true; 773 #endif 774 return load; 775 } 776 777 static ArmLIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase, 778 int rIndex, int rSrc, int scale, OpSize size) 779 { 780 bool allLowRegs = LOWREG(rBase) && LOWREG(rIndex) && LOWREG(rSrc); 781 ArmLIR *store; 782 ArmOpcode opcode = kThumbBkpt; 783 bool thumbForm = (allLowRegs && (scale == 0)); 784 int regPtr; 785 786 if (FPREG(rSrc)) { 787 assert(SINGLEREG(rSrc)); 788 assert((size == kWord) || (size == kSingle)); 789 opcode = kThumb2Vstrs; 790 size = kSingle; 791 } else { 792 if (size == kSingle) 793 size = kWord; 794 } 795 796 switch (size) { 797 case kSingle: 798 regPtr = dvmCompilerAllocTemp(cUnit); 799 if (scale) { 800 newLIR4(cUnit, kThumb2AddRRR, regPtr, rBase, rIndex, 801 encodeShift(kArmLsl, scale)); 802 } else { 803 opRegRegReg(cUnit, kOpAdd, regPtr, rBase, rIndex); 804 } 805 store = newLIR3(cUnit, opcode, rSrc, regPtr, 0); 806 #if defined(WITH_SELF_VERIFICATION) 807 if (cUnit->heapMemOp) 808 store->flags.insertWrapper = true; 809 #endif 810 return store; 811 case kWord: 812 opcode = (thumbForm) ? kThumbStrRRR : kThumb2StrRRR; 813 break; 814 case kUnsignedHalf: 815 case kSignedHalf: 816 opcode = (thumbForm) ? kThumbStrhRRR : kThumb2StrhRRR; 817 break; 818 case kUnsignedByte: 819 case kSignedByte: 820 opcode = (thumbForm) ? kThumbStrbRRR : kThumb2StrbRRR; 821 break; 822 default: 823 assert(0); 824 } 825 if (thumbForm) 826 store = newLIR3(cUnit, opcode, rSrc, rBase, rIndex); 827 else 828 store = newLIR4(cUnit, opcode, rSrc, rBase, rIndex, scale); 829 830 #if defined(WITH_SELF_VERIFICATION) 831 if (cUnit->heapMemOp) 832 store->flags.insertWrapper = true; 833 #endif 834 return store; 835 } 836 837 /* 838 * Load value from base + displacement. Optionally perform null check 839 * on base (which must have an associated sReg and MIR). If not 840 * performing null check, incoming MIR can be null. 841 */ 842 static ArmLIR *loadBaseDispBody(CompilationUnit *cUnit, MIR *mir, int rBase, 843 int displacement, int rDest, int rDestHi, 844 OpSize size, int sReg) 845 { 846 ArmLIR *res, *load; 847 ArmOpcode opcode = kThumbBkpt; 848 bool shortForm = false; 849 bool thumb2Form = (displacement < 4092 && displacement >= 0); 850 bool allLowRegs = (LOWREG(rBase) && LOWREG(rDest)); 851 int encodedDisp = displacement; 852 853 switch (size) { 854 case kDouble: 855 case kLong: 856 if (FPREG(rDest)) { 857 if (SINGLEREG(rDest)) { 858 assert(FPREG(rDestHi)); 859 rDest = S2D(rDest, rDestHi); 860 } 861 opcode = kThumb2Vldrd; 862 if (displacement <= 1020) { 863 shortForm = true; 864 encodedDisp >>= 2; 865 } 866 break; 867 } else { 868 res = loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, 869 -1, kWord, sReg); 870 loadBaseDispBody(cUnit, NULL, rBase, displacement + 4, rDestHi, 871 -1, kWord, INVALID_SREG); 872 return res; 873 } 874 case kSingle: 875 case kWord: 876 if (FPREG(rDest)) { 877 opcode = kThumb2Vldrs; 878 if (displacement <= 1020) { 879 shortForm = true; 880 encodedDisp >>= 2; 881 } 882 break; 883 } 884 if (LOWREG(rDest) && (rBase == r15pc) && 885 (displacement <= 1020) && (displacement >= 0)) { 886 shortForm = true; 887 encodedDisp >>= 2; 888 opcode = kThumbLdrPcRel; 889 } else if (LOWREG(rDest) && (rBase == r13sp) && 890 (displacement <= 1020) && (displacement >= 0)) { 891 shortForm = true; 892 encodedDisp >>= 2; 893 opcode = kThumbLdrSpRel; 894 } else if (allLowRegs && displacement < 128 && displacement >= 0) { 895 assert((displacement & 0x3) == 0); 896 shortForm = true; 897 encodedDisp >>= 2; 898 opcode = kThumbLdrRRI5; 899 } else if (thumb2Form) { 900 shortForm = true; 901 opcode = kThumb2LdrRRI12; 902 } 903 break; 904 case kUnsignedHalf: 905 if (allLowRegs && displacement < 64 && displacement >= 0) { 906 assert((displacement & 0x1) == 0); 907 shortForm = true; 908 encodedDisp >>= 1; 909 opcode = kThumbLdrhRRI5; 910 } else if (displacement < 4092 && displacement >= 0) { 911 shortForm = true; 912 opcode = kThumb2LdrhRRI12; 913 } 914 break; 915 case kSignedHalf: 916 if (thumb2Form) { 917 shortForm = true; 918 opcode = kThumb2LdrshRRI12; 919 } 920 break; 921 case kUnsignedByte: 922 if (allLowRegs && displacement < 32 && displacement >= 0) { 923 shortForm = true; 924 opcode = kThumbLdrbRRI5; 925 } else if (thumb2Form) { 926 shortForm = true; 927 opcode = kThumb2LdrbRRI12; 928 } 929 break; 930 case kSignedByte: 931 if (thumb2Form) { 932 shortForm = true; 933 opcode = kThumb2LdrsbRRI12; 934 } 935 break; 936 default: 937 assert(0); 938 } 939 940 if (shortForm) { 941 load = res = newLIR3(cUnit, opcode, rDest, rBase, encodedDisp); 942 } else { 943 int regOffset = dvmCompilerAllocTemp(cUnit); 944 res = loadConstant(cUnit, regOffset, encodedDisp); 945 load = loadBaseIndexed(cUnit, rBase, regOffset, rDest, 0, size); 946 dvmCompilerFreeTemp(cUnit, regOffset); 947 } 948 949 if (rBase == r5FP) { 950 annotateDalvikRegAccess(load, displacement >> 2, true /* isLoad */); 951 } 952 #if defined(WITH_SELF_VERIFICATION) 953 if (cUnit->heapMemOp) 954 load->flags.insertWrapper = true; 955 #endif 956 return load; 957 } 958 959 static ArmLIR *loadBaseDisp(CompilationUnit *cUnit, MIR *mir, int rBase, 960 int displacement, int rDest, OpSize size, 961 int sReg) 962 { 963 return loadBaseDispBody(cUnit, mir, rBase, displacement, rDest, -1, 964 size, sReg); 965 } 966 967 static ArmLIR *loadBaseDispWide(CompilationUnit *cUnit, MIR *mir, int rBase, 968 int displacement, int rDestLo, int rDestHi, 969 int sReg) 970 { 971 return loadBaseDispBody(cUnit, mir, rBase, displacement, rDestLo, rDestHi, 972 kLong, sReg); 973 } 974 975 976 static ArmLIR *storeBaseDispBody(CompilationUnit *cUnit, int rBase, 977 int displacement, int rSrc, int rSrcHi, 978 OpSize size) 979 { 980 ArmLIR *res, *store; 981 ArmOpcode opcode = kThumbBkpt; 982 bool shortForm = false; 983 bool thumb2Form = (displacement < 4092 && displacement >= 0); 984 bool allLowRegs = (LOWREG(rBase) && LOWREG(rSrc)); 985 int encodedDisp = displacement; 986 987 switch (size) { 988 case kLong: 989 case kDouble: 990 if (!FPREG(rSrc)) { 991 res = storeBaseDispBody(cUnit, rBase, displacement, rSrc, 992 -1, kWord); 993 storeBaseDispBody(cUnit, rBase, displacement + 4, rSrcHi, 994 -1, kWord); 995 return res; 996 } 997 if (SINGLEREG(rSrc)) { 998 assert(FPREG(rSrcHi)); 999 rSrc = S2D(rSrc, rSrcHi); 1000 } 1001 opcode = kThumb2Vstrd; 1002 if (displacement <= 1020) { 1003 shortForm = true; 1004 encodedDisp >>= 2; 1005 } 1006 break; 1007 case kSingle: 1008 case kWord: 1009 if (FPREG(rSrc)) { 1010 assert(SINGLEREG(rSrc)); 1011 opcode = kThumb2Vstrs; 1012 if (displacement <= 1020) { 1013 shortForm = true; 1014 encodedDisp >>= 2; 1015 } 1016 break; 1017 } 1018 if (allLowRegs && displacement < 128 && displacement >= 0) { 1019 assert((displacement & 0x3) == 0); 1020 shortForm = true; 1021 encodedDisp >>= 2; 1022 opcode = kThumbStrRRI5; 1023 } else if (thumb2Form) { 1024 shortForm = true; 1025 opcode = kThumb2StrRRI12; 1026 } 1027 break; 1028 case kUnsignedHalf: 1029 case kSignedHalf: 1030 if (allLowRegs && displacement < 64 && displacement >= 0) { 1031 assert((displacement & 0x1) == 0); 1032 shortForm = true; 1033 encodedDisp >>= 1; 1034 opcode = kThumbStrhRRI5; 1035 } else if (thumb2Form) { 1036 shortForm = true; 1037 opcode = kThumb2StrhRRI12; 1038 } 1039 break; 1040 case kUnsignedByte: 1041 case kSignedByte: 1042 if (allLowRegs && displacement < 32 && displacement >= 0) { 1043 shortForm = true; 1044 opcode = kThumbStrbRRI5; 1045 } else if (thumb2Form) { 1046 shortForm = true; 1047 opcode = kThumb2StrbRRI12; 1048 } 1049 break; 1050 default: 1051 assert(0); 1052 } 1053 if (shortForm) { 1054 store = res = newLIR3(cUnit, opcode, rSrc, rBase, encodedDisp); 1055 } else { 1056 int rScratch = dvmCompilerAllocTemp(cUnit); 1057 res = loadConstant(cUnit, rScratch, encodedDisp); 1058 store = storeBaseIndexed(cUnit, rBase, rScratch, rSrc, 0, size); 1059 dvmCompilerFreeTemp(cUnit, rScratch); 1060 } 1061 1062 if (rBase == r5FP) { 1063 annotateDalvikRegAccess(store, displacement >> 2, false /* isLoad */); 1064 } 1065 #if defined(WITH_SELF_VERIFICATION) 1066 if (cUnit->heapMemOp) 1067 store->flags.insertWrapper = true; 1068 #endif 1069 return res; 1070 } 1071 1072 static ArmLIR *storeBaseDisp(CompilationUnit *cUnit, int rBase, 1073 int displacement, int rSrc, OpSize size) 1074 { 1075 return storeBaseDispBody(cUnit, rBase, displacement, rSrc, -1, size); 1076 } 1077 1078 static ArmLIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase, 1079 int displacement, int rSrcLo, int rSrcHi) 1080 { 1081 return storeBaseDispBody(cUnit, rBase, displacement, rSrcLo, rSrcHi, kLong); 1082 } 1083 1084 static ArmLIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask) 1085 { 1086 ArmLIR *res; 1087 genBarrier(cUnit); 1088 if (LOWREG(rBase) && ((rMask & 0xff)==rMask)) { 1089 res = newLIR2(cUnit, kThumbLdmia, rBase, rMask); 1090 } else { 1091 res = newLIR2(cUnit, kThumb2Ldmia, rBase, rMask); 1092 } 1093 #if defined(WITH_SELF_VERIFICATION) 1094 if (cUnit->heapMemOp) 1095 res->flags.insertWrapper = true; 1096 #endif 1097 genBarrier(cUnit); 1098 return res; 1099 } 1100 1101 static ArmLIR *storeMultiple(CompilationUnit *cUnit, int rBase, int rMask) 1102 { 1103 ArmLIR *res; 1104 genBarrier(cUnit); 1105 if (LOWREG(rBase) && ((rMask & 0xff)==rMask)) { 1106 res = newLIR2(cUnit, kThumbStmia, rBase, rMask); 1107 } else { 1108 res = newLIR2(cUnit, kThumb2Stmia, rBase, rMask); 1109 } 1110 #if defined(WITH_SELF_VERIFICATION) 1111 if (cUnit->heapMemOp) 1112 res->flags.insertWrapper = true; 1113 #endif 1114 genBarrier(cUnit); 1115 return res; 1116 } 1117 1118 static void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg) 1119 { 1120 storeBaseDispWide(cUnit, base, 0, lowReg, highReg); 1121 } 1122 1123 static void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg) 1124 { 1125 loadBaseDispWide(cUnit, NULL, base, 0, lowReg, highReg, INVALID_SREG); 1126 } 1127 1128 /* 1129 * Generate a register comparison to an immediate and branch. Caller 1130 * is responsible for setting branch target field. 1131 */ 1132 static ArmLIR *genCmpImmBranch(CompilationUnit *cUnit, 1133 ArmConditionCode cond, int reg, 1134 int checkValue) 1135 { 1136 ArmLIR *branch; 1137 int modImm; 1138 if ((LOWREG(reg)) && (checkValue == 0) && 1139 ((cond == kArmCondEq) || (cond == kArmCondNe))) { 1140 branch = newLIR2(cUnit, 1141 (cond == kArmCondEq) ? kThumb2Cbz : kThumb2Cbnz, 1142 reg, 0); 1143 } else { 1144 modImm = modifiedImmediate(checkValue); 1145 if (LOWREG(reg) && ((checkValue & 0xff) == checkValue)) { 1146 newLIR2(cUnit, kThumbCmpRI8, reg, checkValue); 1147 } else if (modImm >= 0) { 1148 newLIR2(cUnit, kThumb2CmpRI8, reg, modImm); 1149 } else { 1150 int tReg = dvmCompilerAllocTemp(cUnit); 1151 loadConstant(cUnit, tReg, checkValue); 1152 opRegReg(cUnit, kOpCmp, reg, tReg); 1153 } 1154 branch = newLIR2(cUnit, kThumbBCond, 0, cond); 1155 } 1156 return branch; 1157 } 1158 1159 static ArmLIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc) 1160 { 1161 ArmLIR* res = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1162 res->operands[0] = rDest; 1163 res->operands[1] = rSrc; 1164 if (rDest == rSrc) { 1165 res->flags.isNop = true; 1166 } else { 1167 assert(DOUBLEREG(rDest) == DOUBLEREG(rSrc)); 1168 if (DOUBLEREG(rDest)) { 1169 res->opcode = kThumb2Vmovd; 1170 } else { 1171 if (SINGLEREG(rDest)) { 1172 res->opcode = SINGLEREG(rSrc) ? kThumb2Vmovs : kThumb2Fmsr; 1173 } else { 1174 assert(SINGLEREG(rSrc)); 1175 res->opcode = kThumb2Fmrs; 1176 } 1177 } 1178 res->operands[0] = rDest; 1179 res->operands[1] = rSrc; 1180 } 1181 setupResourceMasks(res); 1182 return res; 1183 } 1184 1185 static ArmLIR* genRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc) 1186 { 1187 ArmLIR* res; 1188 ArmOpcode opcode; 1189 if (FPREG(rDest) || FPREG(rSrc)) 1190 return fpRegCopy(cUnit, rDest, rSrc); 1191 res = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1192 if (LOWREG(rDest) && LOWREG(rSrc)) 1193 opcode = kThumbMovRR; 1194 else if (!LOWREG(rDest) && !LOWREG(rSrc)) 1195 opcode = kThumbMovRR_H2H; 1196 else if (LOWREG(rDest)) 1197 opcode = kThumbMovRR_H2L; 1198 else 1199 opcode = kThumbMovRR_L2H; 1200 1201 res->operands[0] = rDest; 1202 res->operands[1] = rSrc; 1203 res->opcode = opcode; 1204 setupResourceMasks(res); 1205 if (rDest == rSrc) { 1206 res->flags.isNop = true; 1207 } 1208 return res; 1209 } 1210 1211 static ArmLIR* genRegCopy(CompilationUnit *cUnit, int rDest, int rSrc) 1212 { 1213 ArmLIR *res = genRegCopyNoInsert(cUnit, rDest, rSrc); 1214 dvmCompilerAppendLIR(cUnit, (LIR*)res); 1215 return res; 1216 } 1217 1218 static void genRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi, 1219 int srcLo, int srcHi) 1220 { 1221 bool destFP = FPREG(destLo) && FPREG(destHi); 1222 bool srcFP = FPREG(srcLo) && FPREG(srcHi); 1223 assert(FPREG(srcLo) == FPREG(srcHi)); 1224 assert(FPREG(destLo) == FPREG(destHi)); 1225 if (destFP) { 1226 if (srcFP) { 1227 genRegCopy(cUnit, S2D(destLo, destHi), S2D(srcLo, srcHi)); 1228 } else { 1229 newLIR3(cUnit, kThumb2Fmdrr, S2D(destLo, destHi), srcLo, srcHi); 1230 } 1231 } else { 1232 if (srcFP) { 1233 newLIR3(cUnit, kThumb2Fmrrd, destLo, destHi, S2D(srcLo, srcHi)); 1234 } else { 1235 // Handle overlap 1236 if (srcHi == destLo) { 1237 genRegCopy(cUnit, destHi, srcHi); 1238 genRegCopy(cUnit, destLo, srcLo); 1239 } else { 1240 genRegCopy(cUnit, destLo, srcLo); 1241 genRegCopy(cUnit, destHi, srcHi); 1242 } 1243 } 1244 } 1245 } 1246 1247 #if defined(WITH_SELF_VERIFICATION) 1248 static void genSelfVerificationPreBranch(CompilationUnit *cUnit, 1249 ArmLIR *origLIR) { 1250 ArmLIR *push = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1251 push->opcode = kThumbPush; 1252 /* Thumb push can handle LR (encoded at bit 8) */ 1253 push->operands[0] = (1 << r5FP | 1 << 8); 1254 setupResourceMasks(push); 1255 dvmCompilerInsertLIRBefore((LIR *) origLIR, (LIR *) push); 1256 } 1257 1258 static void genSelfVerificationPostBranch(CompilationUnit *cUnit, 1259 ArmLIR *origLIR) { 1260 ArmLIR *pop = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); 1261 /* Thumb pop cannot store into LR - use Thumb2 here */ 1262 pop->opcode = kThumb2Pop; 1263 pop->operands[0] = (1 << r5FP | 1 << r14lr); 1264 setupResourceMasks(pop); 1265 dvmCompilerInsertLIRAfter((LIR *) origLIR, (LIR *) pop); 1266 } 1267 #endif 1268