1 /* 2 * This file was generated automatically by gen-mterp.py for 'armv5te'. 3 * 4 * --> DO NOT EDIT <-- 5 */ 6 7 /* File: armv5te/header.S */ 8 /* 9 * Copyright (C) 2008 The Android Open Source Project 10 * 11 * Licensed under the Apache License, Version 2.0 (the "License"); 12 * you may not use this file except in compliance with the License. 13 * You may obtain a copy of the License at 14 * 15 * http://www.apache.org/licenses/LICENSE-2.0 16 * 17 * Unless required by applicable law or agreed to in writing, software 18 * distributed under the License is distributed on an "AS IS" BASIS, 19 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 20 * See the License for the specific language governing permissions and 21 * limitations under the License. 22 */ 23 24 /* 25 * ARMv5 definitions and declarations. 26 */ 27 28 /* 29 ARM EABI general notes: 30 31 r0-r3 hold first 4 args to a method; they are not preserved across method calls 32 r4-r8 are available for general use 33 r9 is given special treatment in some situations, but not for us 34 r10 (sl) seems to be generally available 35 r11 (fp) is used by gcc (unless -fomit-frame-pointer is set) 36 r12 (ip) is scratch -- not preserved across method calls 37 r13 (sp) should be managed carefully in case a signal arrives 38 r14 (lr) must be preserved 39 r15 (pc) can be tinkered with directly 40 41 r0 holds returns of <= 4 bytes 42 r0-r1 hold returns of 8 bytes, low word in r0 43 44 Callee must save/restore r4+ (except r12) if it modifies them. If VFP 45 is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved, 46 s0-s15 (d0-d7, q0-a3) do not need to be. 47 48 Stack is "full descending". Only the arguments that don't fit in the first 4 49 registers are placed on the stack. "sp" points at the first stacked argument 50 (i.e. the 5th arg). 51 52 VFP: single-precision results in s0, double-precision results in d0. 53 54 In the EABI, "sp" must be 64-bit aligned on entry to a function, and any 55 64-bit quantities (long long, double) must be 64-bit aligned. 56 */ 57 58 /* 59 Mterp and ARM notes: 60 61 The following registers have fixed assignments: 62 63 reg nick purpose 64 r4 rPC interpreted program counter, used for fetching instructions 65 r5 rFP interpreted frame pointer, used for accessing locals and args 66 r6 rGLUE MterpGlue pointer 67 r7 rINST first 16-bit code unit of current instruction 68 r8 rIBASE interpreted instruction base pointer, used for computed goto 69 70 Macros are provided for common operations. Each macro MUST emit only 71 one instruction to make instruction-counting easier. They MUST NOT alter 72 unspecified registers or condition codes. 73 */ 74 75 /* single-purpose registers, given names for clarity */ 76 #define rPC r4 77 #define rFP r5 78 #define rGLUE r6 79 #define rINST r7 80 #define rIBASE r8 81 82 /* save/restore the PC and/or FP from the glue struct */ 83 #define LOAD_PC_FROM_GLUE() ldr rPC, [rGLUE, #offGlue_pc] 84 #define SAVE_PC_TO_GLUE() str rPC, [rGLUE, #offGlue_pc] 85 #define LOAD_FP_FROM_GLUE() ldr rFP, [rGLUE, #offGlue_fp] 86 #define SAVE_FP_TO_GLUE() str rFP, [rGLUE, #offGlue_fp] 87 #define LOAD_PC_FP_FROM_GLUE() ldmia rGLUE, {rPC, rFP} 88 #define SAVE_PC_FP_TO_GLUE() stmia rGLUE, {rPC, rFP} 89 90 /* 91 * "export" the PC to the stack frame, f/b/o future exception objects. Must 92 * be done *before* something calls dvmThrowException. 93 * 94 * In C this is "SAVEAREA_FROM_FP(fp)->xtra.currentPc = pc", i.e. 95 * fp - sizeof(StackSaveArea) + offsetof(SaveArea, xtra.currentPc) 96 * 97 * It's okay to do this more than once. 98 */ 99 #define EXPORT_PC() \ 100 str rPC, [rFP, #(-sizeofStackSaveArea + offStackSaveArea_currentPc)] 101 102 /* 103 * Given a frame pointer, find the stack save area. 104 * 105 * In C this is "((StackSaveArea*)(_fp) -1)". 106 */ 107 #define SAVEAREA_FROM_FP(_reg, _fpreg) \ 108 sub _reg, _fpreg, #sizeofStackSaveArea 109 110 /* 111 * Fetch the next instruction from rPC into rINST. Does not advance rPC. 112 */ 113 #define FETCH_INST() ldrh rINST, [rPC] 114 115 /* 116 * Fetch the next instruction from the specified offset. Advances rPC 117 * to point to the next instruction. "_count" is in 16-bit code units. 118 * 119 * Because of the limited size of immediate constants on ARM, this is only 120 * suitable for small forward movements (i.e. don't try to implement "goto" 121 * with this). 122 * 123 * This must come AFTER anything that can throw an exception, or the 124 * exception catch may miss. (This also implies that it must come after 125 * EXPORT_PC().) 126 */ 127 #define FETCH_ADVANCE_INST(_count) ldrh rINST, [rPC, #(_count*2)]! 128 129 /* 130 * The operation performed here is similar to FETCH_ADVANCE_INST, except the 131 * src and dest registers are parameterized (not hard-wired to rPC and rINST). 132 */ 133 #define PREFETCH_ADVANCE_INST(_dreg, _sreg, _count) \ 134 ldrh _dreg, [_sreg, #(_count*2)]! 135 136 /* 137 * Fetch the next instruction from an offset specified by _reg. Updates 138 * rPC to point to the next instruction. "_reg" must specify the distance 139 * in bytes, *not* 16-bit code units, and may be a signed value. 140 * 141 * We want to write "ldrh rINST, [rPC, _reg, lsl #2]!", but some of the 142 * bits that hold the shift distance are used for the half/byte/sign flags. 143 * In some cases we can pre-double _reg for free, so we require a byte offset 144 * here. 145 */ 146 #define FETCH_ADVANCE_INST_RB(_reg) ldrh rINST, [rPC, _reg]! 147 148 /* 149 * Fetch a half-word code unit from an offset past the current PC. The 150 * "_count" value is in 16-bit code units. Does not advance rPC. 151 * 152 * The "_S" variant works the same but treats the value as signed. 153 */ 154 #define FETCH(_reg, _count) ldrh _reg, [rPC, #(_count*2)] 155 #define FETCH_S(_reg, _count) ldrsh _reg, [rPC, #(_count*2)] 156 157 /* 158 * Fetch one byte from an offset past the current PC. Pass in the same 159 * "_count" as you would for FETCH, and an additional 0/1 indicating which 160 * byte of the halfword you want (lo/hi). 161 */ 162 #define FETCH_B(_reg, _count, _byte) ldrb _reg, [rPC, #(_count*2+_byte)] 163 164 /* 165 * Put the instruction's opcode field into the specified register. 166 */ 167 #define GET_INST_OPCODE(_reg) and _reg, rINST, #255 168 169 /* 170 * Put the prefetched instruction's opcode field into the specified register. 171 */ 172 #define GET_PREFETCHED_OPCODE(_oreg, _ireg) and _oreg, _ireg, #255 173 174 /* 175 * Begin executing the opcode in _reg. Because this only jumps within the 176 * interpreter, we don't have to worry about pre-ARMv5 THUMB interwork. 177 */ 178 #define GOTO_OPCODE(_reg) add pc, rIBASE, _reg, lsl #6 179 #define GOTO_OPCODE_IFEQ(_reg) addeq pc, rIBASE, _reg, lsl #6 180 #define GOTO_OPCODE_IFNE(_reg) addne pc, rIBASE, _reg, lsl #6 181 182 /* 183 * Get/set the 32-bit value from a Dalvik register. 184 */ 185 #define GET_VREG(_reg, _vreg) ldr _reg, [rFP, _vreg, lsl #2] 186 #define SET_VREG(_reg, _vreg) str _reg, [rFP, _vreg, lsl #2] 187 188 #if defined(WITH_JIT) 189 #define GET_JIT_PROF_TABLE(_reg) ldr _reg,[rGLUE,#offGlue_pJitProfTable] 190 #define GET_JIT_THRESHOLD(_reg) ldr _reg,[rGLUE,#offGlue_jitThreshold] 191 #endif 192 193 /* 194 * Convert a virtual register index into an address. 195 */ 196 #define VREG_INDEX_TO_ADDR(_reg, _vreg) \ 197 add _reg, rFP, _vreg, lsl #2 198 199 /* 200 * This is a #include, not a %include, because we want the C pre-processor 201 * to expand the macros into assembler assignment statements. 202 */ 203 #include "../common/asm-constants.h" 204 205 #if defined(WITH_JIT) 206 #include "../common/jit-config.h" 207 #endif 208 209 /* File: armv5te/platform.S */ 210 /* 211 * =========================================================================== 212 * CPU-version-specific defines 213 * =========================================================================== 214 */ 215 216 /* 217 * Macro for "LDR PC,xxx", which is not allowed pre-ARMv5. Essentially a 218 * one-way branch. 219 * 220 * May modify IP. Does not modify LR. 221 */ 222 .macro LDR_PC source 223 ldr pc, \source 224 .endm 225 226 /* 227 * Macro for "MOV LR,PC / LDR PC,xxx", which is not allowed pre-ARMv5. 228 * Jump to subroutine. 229 * 230 * May modify IP and LR. 231 */ 232 .macro LDR_PC_LR source 233 mov lr, pc 234 ldr pc, \source 235 .endm 236 237 /* 238 * Macro for "LDMFD SP!, {...regs...,PC}". 239 * 240 * May modify IP and LR. 241 */ 242 .macro LDMFD_PC regs 243 ldmfd sp!, {\regs,pc} 244 .endm 245 246 /* 247 * Macro for data memory barrier; not meaningful pre-ARMv6K. 248 */ 249 .macro SMP_DMB 250 .endm 251 252 /* File: armv5te/entry.S */ 253 /* 254 * Copyright (C) 2008 The Android Open Source Project 255 * 256 * Licensed under the Apache License, Version 2.0 (the "License"); 257 * you may not use this file except in compliance with the License. 258 * You may obtain a copy of the License at 259 * 260 * http://www.apache.org/licenses/LICENSE-2.0 261 * 262 * Unless required by applicable law or agreed to in writing, software 263 * distributed under the License is distributed on an "AS IS" BASIS, 264 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 265 * See the License for the specific language governing permissions and 266 * limitations under the License. 267 */ 268 /* 269 * Interpreter entry point. 270 */ 271 272 /* 273 * We don't have formal stack frames, so gdb scans upward in the code 274 * to find the start of the function (a label with the %function type), 275 * and then looks at the next few instructions to figure out what 276 * got pushed onto the stack. From this it figures out how to restore 277 * the registers, including PC, for the previous stack frame. If gdb 278 * sees a non-function label, it stops scanning, so either we need to 279 * have nothing but assembler-local labels between the entry point and 280 * the break, or we need to fake it out. 281 * 282 * When this is defined, we add some stuff to make gdb less confused. 283 */ 284 #define ASSIST_DEBUGGER 1 285 286 .text 287 .align 2 288 .global dvmMterpStdRun 289 .type dvmMterpStdRun, %function 290 291 /* 292 * On entry: 293 * r0 MterpGlue* glue 294 * 295 * This function returns a boolean "changeInterp" value. The return comes 296 * via a call to dvmMterpStdBail(). 297 */ 298 dvmMterpStdRun: 299 #define MTERP_ENTRY1 \ 300 .save {r4-r10,fp,lr}; \ 301 stmfd sp!, {r4-r10,fp,lr} @ save 9 regs 302 #define MTERP_ENTRY2 \ 303 .pad #4; \ 304 sub sp, sp, #4 @ align 64 305 306 .fnstart 307 MTERP_ENTRY1 308 MTERP_ENTRY2 309 310 /* save stack pointer, add magic word for debuggerd */ 311 str sp, [r0, #offGlue_bailPtr] @ save SP for eventual return 312 313 /* set up "named" registers, figure out entry point */ 314 mov rGLUE, r0 @ set rGLUE 315 ldr r1, [r0, #offGlue_entryPoint] @ enum is 4 bytes in aapcs-EABI 316 LOAD_PC_FP_FROM_GLUE() @ load rPC and rFP from "glue" 317 adr rIBASE, dvmAsmInstructionStart @ set rIBASE 318 cmp r1, #kInterpEntryInstr @ usual case? 319 bne .Lnot_instr @ no, handle it 320 321 #if defined(WITH_JIT) 322 .LentryInstr: 323 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 324 /* Entry is always a possible trace start */ 325 GET_JIT_PROF_TABLE(r0) 326 FETCH_INST() 327 mov r1, #0 @ prepare the value for the new state 328 str r1, [r10, #offThread_inJitCodeCache] @ back to the interp land 329 cmp r0,#0 @ is profiling disabled? 330 #if !defined(WITH_SELF_VERIFICATION) 331 bne common_updateProfile @ profiling is enabled 332 #else 333 ldr r2, [r10, #offThread_shadowSpace] @ to find out the jit exit state 334 beq 1f @ profiling is disabled 335 ldr r3, [r2, #offShadowSpace_jitExitState] @ jit exit state 336 cmp r3, #kSVSTraceSelect @ hot trace following? 337 moveq r2,#kJitTSelectRequestHot @ ask for trace selection 338 beq common_selectTrace @ go build the trace 339 cmp r3, #kSVSNoProfile @ don't profile the next instruction? 340 beq 1f @ intrepret the next instruction 341 b common_updateProfile @ collect profiles 342 #endif 343 1: 344 GET_INST_OPCODE(ip) 345 GOTO_OPCODE(ip) 346 #else 347 /* start executing the instruction at rPC */ 348 FETCH_INST() @ load rINST from rPC 349 GET_INST_OPCODE(ip) @ extract opcode from rINST 350 GOTO_OPCODE(ip) @ jump to next instruction 351 #endif 352 353 .Lnot_instr: 354 cmp r1, #kInterpEntryReturn @ were we returning from a method? 355 beq common_returnFromMethod 356 357 .Lnot_return: 358 cmp r1, #kInterpEntryThrow @ were we throwing an exception? 359 beq common_exceptionThrown 360 361 #if defined(WITH_JIT) 362 .Lnot_throw: 363 ldr r10,[rGLUE, #offGlue_jitResumeNPC] 364 ldr r2,[rGLUE, #offGlue_jitResumeDPC] 365 cmp r1, #kInterpEntryResume @ resuming after Jit single-step? 366 bne .Lbad_arg 367 cmp rPC,r2 368 bne .LentryInstr @ must have branched, don't resume 369 #if defined(WITH_SELF_VERIFICATION) 370 @ glue->entryPoint will be set in dvmSelfVerificationSaveState 371 b jitSVShadowRunStart @ re-enter the translation after the 372 @ single-stepped instruction 373 @noreturn 374 #endif 375 mov r1, #kInterpEntryInstr 376 str r1, [rGLUE, #offGlue_entryPoint] 377 bx r10 @ re-enter the translation 378 #endif 379 380 .Lbad_arg: 381 ldr r0, strBadEntryPoint 382 @ r1 holds value of entryPoint 383 bl printf 384 bl dvmAbort 385 .fnend 386 387 388 .global dvmMterpStdBail 389 .type dvmMterpStdBail, %function 390 391 /* 392 * Restore the stack pointer and PC from the save point established on entry. 393 * This is essentially the same as a longjmp, but should be cheaper. The 394 * last instruction causes us to return to whoever called dvmMterpStdRun. 395 * 396 * We pushed some registers on the stack in dvmMterpStdRun, then saved 397 * SP and LR. Here we restore SP, restore the registers, and then restore 398 * LR to PC. 399 * 400 * On entry: 401 * r0 MterpGlue* glue 402 * r1 bool changeInterp 403 */ 404 dvmMterpStdBail: 405 ldr sp, [r0, #offGlue_bailPtr] @ sp<- saved SP 406 mov r0, r1 @ return the changeInterp value 407 add sp, sp, #4 @ un-align 64 408 LDMFD_PC "r4-r10,fp" @ restore 9 regs and return 409 410 411 /* 412 * String references. 413 */ 414 strBadEntryPoint: 415 .word .LstrBadEntryPoint 416 417 418 .global dvmAsmInstructionStart 419 .type dvmAsmInstructionStart, %function 420 dvmAsmInstructionStart = .L_OP_NOP 421 .text 422 423 /* ------------------------------ */ 424 .balign 64 425 .L_OP_NOP: /* 0x00 */ 426 /* File: armv5te/OP_NOP.S */ 427 FETCH_ADVANCE_INST(1) @ advance to next instr, load rINST 428 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 429 GOTO_OPCODE(ip) @ execute it 430 431 #ifdef ASSIST_DEBUGGER 432 /* insert fake function header to help gdb find the stack frame */ 433 .type dalvik_inst, %function 434 dalvik_inst: 435 .fnstart 436 MTERP_ENTRY1 437 MTERP_ENTRY2 438 .fnend 439 #endif 440 441 /* ------------------------------ */ 442 .balign 64 443 .L_OP_MOVE: /* 0x01 */ 444 /* File: armv5te/OP_MOVE.S */ 445 /* for move, move-object, long-to-int */ 446 /* op vA, vB */ 447 mov r1, rINST, lsr #12 @ r1<- B from 15:12 448 mov r0, rINST, lsr #8 @ r0<- A from 11:8 449 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 450 GET_VREG(r2, r1) @ r2<- fp[B] 451 and r0, r0, #15 452 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 453 SET_VREG(r2, r0) @ fp[A]<- r2 454 GOTO_OPCODE(ip) @ execute next instruction 455 456 /* ------------------------------ */ 457 .balign 64 458 .L_OP_MOVE_FROM16: /* 0x02 */ 459 /* File: armv5te/OP_MOVE_FROM16.S */ 460 /* for: move/from16, move-object/from16 */ 461 /* op vAA, vBBBB */ 462 FETCH(r1, 1) @ r1<- BBBB 463 mov r0, rINST, lsr #8 @ r0<- AA 464 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 465 GET_VREG(r2, r1) @ r2<- fp[BBBB] 466 GET_INST_OPCODE(ip) @ extract opcode from rINST 467 SET_VREG(r2, r0) @ fp[AA]<- r2 468 GOTO_OPCODE(ip) @ jump to next instruction 469 470 /* ------------------------------ */ 471 .balign 64 472 .L_OP_MOVE_16: /* 0x03 */ 473 /* File: armv5te/OP_MOVE_16.S */ 474 /* for: move/16, move-object/16 */ 475 /* op vAAAA, vBBBB */ 476 FETCH(r1, 2) @ r1<- BBBB 477 FETCH(r0, 1) @ r0<- AAAA 478 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 479 GET_VREG(r2, r1) @ r2<- fp[BBBB] 480 GET_INST_OPCODE(ip) @ extract opcode from rINST 481 SET_VREG(r2, r0) @ fp[AAAA]<- r2 482 GOTO_OPCODE(ip) @ jump to next instruction 483 484 /* ------------------------------ */ 485 .balign 64 486 .L_OP_MOVE_WIDE: /* 0x04 */ 487 /* File: armv5te/OP_MOVE_WIDE.S */ 488 /* move-wide vA, vB */ 489 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 490 mov r2, rINST, lsr #8 @ r2<- A(+) 491 mov r3, rINST, lsr #12 @ r3<- B 492 and r2, r2, #15 493 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 494 add r2, rFP, r2, lsl #2 @ r2<- &fp[A] 495 ldmia r3, {r0-r1} @ r0/r1<- fp[B] 496 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 497 GET_INST_OPCODE(ip) @ extract opcode from rINST 498 stmia r2, {r0-r1} @ fp[A]<- r0/r1 499 GOTO_OPCODE(ip) @ jump to next instruction 500 501 /* ------------------------------ */ 502 .balign 64 503 .L_OP_MOVE_WIDE_FROM16: /* 0x05 */ 504 /* File: armv5te/OP_MOVE_WIDE_FROM16.S */ 505 /* move-wide/from16 vAA, vBBBB */ 506 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 507 FETCH(r3, 1) @ r3<- BBBB 508 mov r2, rINST, lsr #8 @ r2<- AA 509 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 510 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 511 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 512 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 513 GET_INST_OPCODE(ip) @ extract opcode from rINST 514 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 515 GOTO_OPCODE(ip) @ jump to next instruction 516 517 /* ------------------------------ */ 518 .balign 64 519 .L_OP_MOVE_WIDE_16: /* 0x06 */ 520 /* File: armv5te/OP_MOVE_WIDE_16.S */ 521 /* move-wide/16 vAAAA, vBBBB */ 522 /* NOTE: regs can overlap, e.g. "move v6,v7" or "move v7,v6" */ 523 FETCH(r3, 2) @ r3<- BBBB 524 FETCH(r2, 1) @ r2<- AAAA 525 add r3, rFP, r3, lsl #2 @ r3<- &fp[BBBB] 526 add r2, rFP, r2, lsl #2 @ r2<- &fp[AAAA] 527 ldmia r3, {r0-r1} @ r0/r1<- fp[BBBB] 528 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 529 GET_INST_OPCODE(ip) @ extract opcode from rINST 530 stmia r2, {r0-r1} @ fp[AAAA]<- r0/r1 531 GOTO_OPCODE(ip) @ jump to next instruction 532 533 /* ------------------------------ */ 534 .balign 64 535 .L_OP_MOVE_OBJECT: /* 0x07 */ 536 /* File: armv5te/OP_MOVE_OBJECT.S */ 537 /* File: armv5te/OP_MOVE.S */ 538 /* for move, move-object, long-to-int */ 539 /* op vA, vB */ 540 mov r1, rINST, lsr #12 @ r1<- B from 15:12 541 mov r0, rINST, lsr #8 @ r0<- A from 11:8 542 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 543 GET_VREG(r2, r1) @ r2<- fp[B] 544 and r0, r0, #15 545 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 546 SET_VREG(r2, r0) @ fp[A]<- r2 547 GOTO_OPCODE(ip) @ execute next instruction 548 549 550 /* ------------------------------ */ 551 .balign 64 552 .L_OP_MOVE_OBJECT_FROM16: /* 0x08 */ 553 /* File: armv5te/OP_MOVE_OBJECT_FROM16.S */ 554 /* File: armv5te/OP_MOVE_FROM16.S */ 555 /* for: move/from16, move-object/from16 */ 556 /* op vAA, vBBBB */ 557 FETCH(r1, 1) @ r1<- BBBB 558 mov r0, rINST, lsr #8 @ r0<- AA 559 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 560 GET_VREG(r2, r1) @ r2<- fp[BBBB] 561 GET_INST_OPCODE(ip) @ extract opcode from rINST 562 SET_VREG(r2, r0) @ fp[AA]<- r2 563 GOTO_OPCODE(ip) @ jump to next instruction 564 565 566 /* ------------------------------ */ 567 .balign 64 568 .L_OP_MOVE_OBJECT_16: /* 0x09 */ 569 /* File: armv5te/OP_MOVE_OBJECT_16.S */ 570 /* File: armv5te/OP_MOVE_16.S */ 571 /* for: move/16, move-object/16 */ 572 /* op vAAAA, vBBBB */ 573 FETCH(r1, 2) @ r1<- BBBB 574 FETCH(r0, 1) @ r0<- AAAA 575 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 576 GET_VREG(r2, r1) @ r2<- fp[BBBB] 577 GET_INST_OPCODE(ip) @ extract opcode from rINST 578 SET_VREG(r2, r0) @ fp[AAAA]<- r2 579 GOTO_OPCODE(ip) @ jump to next instruction 580 581 582 /* ------------------------------ */ 583 .balign 64 584 .L_OP_MOVE_RESULT: /* 0x0a */ 585 /* File: armv5te/OP_MOVE_RESULT.S */ 586 /* for: move-result, move-result-object */ 587 /* op vAA */ 588 mov r2, rINST, lsr #8 @ r2<- AA 589 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 590 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 591 GET_INST_OPCODE(ip) @ extract opcode from rINST 592 SET_VREG(r0, r2) @ fp[AA]<- r0 593 GOTO_OPCODE(ip) @ jump to next instruction 594 595 /* ------------------------------ */ 596 .balign 64 597 .L_OP_MOVE_RESULT_WIDE: /* 0x0b */ 598 /* File: armv5te/OP_MOVE_RESULT_WIDE.S */ 599 /* move-result-wide vAA */ 600 mov r2, rINST, lsr #8 @ r2<- AA 601 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 602 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 603 ldmia r3, {r0-r1} @ r0/r1<- retval.j 604 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 605 GET_INST_OPCODE(ip) @ extract opcode from rINST 606 stmia r2, {r0-r1} @ fp[AA]<- r0/r1 607 GOTO_OPCODE(ip) @ jump to next instruction 608 609 /* ------------------------------ */ 610 .balign 64 611 .L_OP_MOVE_RESULT_OBJECT: /* 0x0c */ 612 /* File: armv5te/OP_MOVE_RESULT_OBJECT.S */ 613 /* File: armv5te/OP_MOVE_RESULT.S */ 614 /* for: move-result, move-result-object */ 615 /* op vAA */ 616 mov r2, rINST, lsr #8 @ r2<- AA 617 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 618 ldr r0, [rGLUE, #offGlue_retval] @ r0<- glue->retval.i 619 GET_INST_OPCODE(ip) @ extract opcode from rINST 620 SET_VREG(r0, r2) @ fp[AA]<- r0 621 GOTO_OPCODE(ip) @ jump to next instruction 622 623 624 /* ------------------------------ */ 625 .balign 64 626 .L_OP_MOVE_EXCEPTION: /* 0x0d */ 627 /* File: armv5te/OP_MOVE_EXCEPTION.S */ 628 /* move-exception vAA */ 629 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 630 mov r2, rINST, lsr #8 @ r2<- AA 631 ldr r3, [r0, #offThread_exception] @ r3<- dvmGetException bypass 632 mov r1, #0 @ r1<- 0 633 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 634 SET_VREG(r3, r2) @ fp[AA]<- exception obj 635 GET_INST_OPCODE(ip) @ extract opcode from rINST 636 str r1, [r0, #offThread_exception] @ dvmClearException bypass 637 GOTO_OPCODE(ip) @ jump to next instruction 638 639 /* ------------------------------ */ 640 .balign 64 641 .L_OP_RETURN_VOID: /* 0x0e */ 642 /* File: armv5te/OP_RETURN_VOID.S */ 643 b common_returnFromMethod 644 645 /* ------------------------------ */ 646 .balign 64 647 .L_OP_RETURN: /* 0x0f */ 648 /* File: armv5te/OP_RETURN.S */ 649 /* 650 * Return a 32-bit value. Copies the return value into the "glue" 651 * structure, then jumps to the return handler. 652 * 653 * for: return, return-object 654 */ 655 /* op vAA */ 656 mov r2, rINST, lsr #8 @ r2<- AA 657 GET_VREG(r0, r2) @ r0<- vAA 658 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 659 b common_returnFromMethod 660 661 /* ------------------------------ */ 662 .balign 64 663 .L_OP_RETURN_WIDE: /* 0x10 */ 664 /* File: armv5te/OP_RETURN_WIDE.S */ 665 /* 666 * Return a 64-bit value. Copies the return value into the "glue" 667 * structure, then jumps to the return handler. 668 */ 669 /* return-wide vAA */ 670 mov r2, rINST, lsr #8 @ r2<- AA 671 add r2, rFP, r2, lsl #2 @ r2<- &fp[AA] 672 add r3, rGLUE, #offGlue_retval @ r3<- &glue->retval 673 ldmia r2, {r0-r1} @ r0/r1 <- vAA/vAA+1 674 stmia r3, {r0-r1} @ retval<- r0/r1 675 b common_returnFromMethod 676 677 /* ------------------------------ */ 678 .balign 64 679 .L_OP_RETURN_OBJECT: /* 0x11 */ 680 /* File: armv5te/OP_RETURN_OBJECT.S */ 681 /* File: armv5te/OP_RETURN.S */ 682 /* 683 * Return a 32-bit value. Copies the return value into the "glue" 684 * structure, then jumps to the return handler. 685 * 686 * for: return, return-object 687 */ 688 /* op vAA */ 689 mov r2, rINST, lsr #8 @ r2<- AA 690 GET_VREG(r0, r2) @ r0<- vAA 691 str r0, [rGLUE, #offGlue_retval] @ retval.i <- vAA 692 b common_returnFromMethod 693 694 695 /* ------------------------------ */ 696 .balign 64 697 .L_OP_CONST_4: /* 0x12 */ 698 /* File: armv5te/OP_CONST_4.S */ 699 /* const/4 vA, #+B */ 700 mov r1, rINST, lsl #16 @ r1<- Bxxx0000 701 mov r0, rINST, lsr #8 @ r0<- A+ 702 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 703 mov r1, r1, asr #28 @ r1<- sssssssB (sign-extended) 704 and r0, r0, #15 705 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 706 SET_VREG(r1, r0) @ fp[A]<- r1 707 GOTO_OPCODE(ip) @ execute next instruction 708 709 /* ------------------------------ */ 710 .balign 64 711 .L_OP_CONST_16: /* 0x13 */ 712 /* File: armv5te/OP_CONST_16.S */ 713 /* const/16 vAA, #+BBBB */ 714 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 715 mov r3, rINST, lsr #8 @ r3<- AA 716 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 717 SET_VREG(r0, r3) @ vAA<- r0 718 GET_INST_OPCODE(ip) @ extract opcode from rINST 719 GOTO_OPCODE(ip) @ jump to next instruction 720 721 /* ------------------------------ */ 722 .balign 64 723 .L_OP_CONST: /* 0x14 */ 724 /* File: armv5te/OP_CONST.S */ 725 /* const vAA, #+BBBBbbbb */ 726 mov r3, rINST, lsr #8 @ r3<- AA 727 FETCH(r0, 1) @ r0<- bbbb (low) 728 FETCH(r1, 2) @ r1<- BBBB (high) 729 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 730 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 731 GET_INST_OPCODE(ip) @ extract opcode from rINST 732 SET_VREG(r0, r3) @ vAA<- r0 733 GOTO_OPCODE(ip) @ jump to next instruction 734 735 /* ------------------------------ */ 736 .balign 64 737 .L_OP_CONST_HIGH16: /* 0x15 */ 738 /* File: armv5te/OP_CONST_HIGH16.S */ 739 /* const/high16 vAA, #+BBBB0000 */ 740 FETCH(r0, 1) @ r0<- 0000BBBB (zero-extended) 741 mov r3, rINST, lsr #8 @ r3<- AA 742 mov r0, r0, lsl #16 @ r0<- BBBB0000 743 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 744 SET_VREG(r0, r3) @ vAA<- r0 745 GET_INST_OPCODE(ip) @ extract opcode from rINST 746 GOTO_OPCODE(ip) @ jump to next instruction 747 748 /* ------------------------------ */ 749 .balign 64 750 .L_OP_CONST_WIDE_16: /* 0x16 */ 751 /* File: armv5te/OP_CONST_WIDE_16.S */ 752 /* const-wide/16 vAA, #+BBBB */ 753 FETCH_S(r0, 1) @ r0<- ssssBBBB (sign-extended) 754 mov r3, rINST, lsr #8 @ r3<- AA 755 mov r1, r0, asr #31 @ r1<- ssssssss 756 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 757 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 758 GET_INST_OPCODE(ip) @ extract opcode from rINST 759 stmia r3, {r0-r1} @ vAA<- r0/r1 760 GOTO_OPCODE(ip) @ jump to next instruction 761 762 /* ------------------------------ */ 763 .balign 64 764 .L_OP_CONST_WIDE_32: /* 0x17 */ 765 /* File: armv5te/OP_CONST_WIDE_32.S */ 766 /* const-wide/32 vAA, #+BBBBbbbb */ 767 FETCH(r0, 1) @ r0<- 0000bbbb (low) 768 mov r3, rINST, lsr #8 @ r3<- AA 769 FETCH_S(r2, 2) @ r2<- ssssBBBB (high) 770 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 771 orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb 772 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 773 mov r1, r0, asr #31 @ r1<- ssssssss 774 GET_INST_OPCODE(ip) @ extract opcode from rINST 775 stmia r3, {r0-r1} @ vAA<- r0/r1 776 GOTO_OPCODE(ip) @ jump to next instruction 777 778 /* ------------------------------ */ 779 .balign 64 780 .L_OP_CONST_WIDE: /* 0x18 */ 781 /* File: armv5te/OP_CONST_WIDE.S */ 782 /* const-wide vAA, #+HHHHhhhhBBBBbbbb */ 783 FETCH(r0, 1) @ r0<- bbbb (low) 784 FETCH(r1, 2) @ r1<- BBBB (low middle) 785 FETCH(r2, 3) @ r2<- hhhh (high middle) 786 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word) 787 FETCH(r3, 4) @ r3<- HHHH (high) 788 mov r9, rINST, lsr #8 @ r9<- AA 789 orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word) 790 FETCH_ADVANCE_INST(5) @ advance rPC, load rINST 791 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 792 GET_INST_OPCODE(ip) @ extract opcode from rINST 793 stmia r9, {r0-r1} @ vAA<- r0/r1 794 GOTO_OPCODE(ip) @ jump to next instruction 795 796 /* ------------------------------ */ 797 .balign 64 798 .L_OP_CONST_WIDE_HIGH16: /* 0x19 */ 799 /* File: armv5te/OP_CONST_WIDE_HIGH16.S */ 800 /* const-wide/high16 vAA, #+BBBB000000000000 */ 801 FETCH(r1, 1) @ r1<- 0000BBBB (zero-extended) 802 mov r3, rINST, lsr #8 @ r3<- AA 803 mov r0, #0 @ r0<- 00000000 804 mov r1, r1, lsl #16 @ r1<- BBBB0000 805 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 806 add r3, rFP, r3, lsl #2 @ r3<- &fp[AA] 807 GET_INST_OPCODE(ip) @ extract opcode from rINST 808 stmia r3, {r0-r1} @ vAA<- r0/r1 809 GOTO_OPCODE(ip) @ jump to next instruction 810 811 /* ------------------------------ */ 812 .balign 64 813 .L_OP_CONST_STRING: /* 0x1a */ 814 /* File: armv5te/OP_CONST_STRING.S */ 815 /* const/string vAA, String@BBBB */ 816 FETCH(r1, 1) @ r1<- BBBB 817 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 818 mov r9, rINST, lsr #8 @ r9<- AA 819 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 820 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 821 cmp r0, #0 @ not yet resolved? 822 beq .LOP_CONST_STRING_resolve 823 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 824 GET_INST_OPCODE(ip) @ extract opcode from rINST 825 SET_VREG(r0, r9) @ vAA<- r0 826 GOTO_OPCODE(ip) @ jump to next instruction 827 828 /* ------------------------------ */ 829 .balign 64 830 .L_OP_CONST_STRING_JUMBO: /* 0x1b */ 831 /* File: armv5te/OP_CONST_STRING_JUMBO.S */ 832 /* const/string vAA, String@BBBBBBBB */ 833 FETCH(r0, 1) @ r0<- bbbb (low) 834 FETCH(r1, 2) @ r1<- BBBB (high) 835 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 836 mov r9, rINST, lsr #8 @ r9<- AA 837 ldr r2, [r2, #offDvmDex_pResStrings] @ r2<- dvmDex->pResStrings 838 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 839 ldr r0, [r2, r1, lsl #2] @ r0<- pResStrings[BBBB] 840 cmp r0, #0 841 beq .LOP_CONST_STRING_JUMBO_resolve 842 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 843 GET_INST_OPCODE(ip) @ extract opcode from rINST 844 SET_VREG(r0, r9) @ vAA<- r0 845 GOTO_OPCODE(ip) @ jump to next instruction 846 847 /* ------------------------------ */ 848 .balign 64 849 .L_OP_CONST_CLASS: /* 0x1c */ 850 /* File: armv5te/OP_CONST_CLASS.S */ 851 /* const/class vAA, Class@BBBB */ 852 FETCH(r1, 1) @ r1<- BBBB 853 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- glue->methodClassDex 854 mov r9, rINST, lsr #8 @ r9<- AA 855 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- dvmDex->pResClasses 856 ldr r0, [r2, r1, lsl #2] @ r0<- pResClasses[BBBB] 857 cmp r0, #0 @ not yet resolved? 858 beq .LOP_CONST_CLASS_resolve 859 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 860 GET_INST_OPCODE(ip) @ extract opcode from rINST 861 SET_VREG(r0, r9) @ vAA<- r0 862 GOTO_OPCODE(ip) @ jump to next instruction 863 864 /* ------------------------------ */ 865 .balign 64 866 .L_OP_MONITOR_ENTER: /* 0x1d */ 867 /* File: armv5te/OP_MONITOR_ENTER.S */ 868 /* 869 * Synchronize on an object. 870 */ 871 /* monitor-enter vAA */ 872 mov r2, rINST, lsr #8 @ r2<- AA 873 GET_VREG(r1, r2) @ r1<- vAA (object) 874 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 875 cmp r1, #0 @ null object? 876 EXPORT_PC() @ need for precise GC, MONITOR_TRACKING 877 beq common_errNullObject @ null object, throw an exception 878 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 879 bl dvmLockObject @ call(self, obj) 880 #ifdef WITH_DEADLOCK_PREDICTION /* implies WITH_MONITOR_TRACKING */ 881 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 882 ldr r1, [r0, #offThread_exception] @ check for exception 883 cmp r1, #0 884 bne common_exceptionThrown @ exception raised, bail out 885 #endif 886 GET_INST_OPCODE(ip) @ extract opcode from rINST 887 GOTO_OPCODE(ip) @ jump to next instruction 888 889 /* ------------------------------ */ 890 .balign 64 891 .L_OP_MONITOR_EXIT: /* 0x1e */ 892 /* File: armv5te/OP_MONITOR_EXIT.S */ 893 /* 894 * Unlock an object. 895 * 896 * Exceptions that occur when unlocking a monitor need to appear as 897 * if they happened at the following instruction. See the Dalvik 898 * instruction spec. 899 */ 900 /* monitor-exit vAA */ 901 mov r2, rINST, lsr #8 @ r2<- AA 902 EXPORT_PC() @ before fetch: export the PC 903 GET_VREG(r1, r2) @ r1<- vAA (object) 904 cmp r1, #0 @ null object? 905 beq 1f @ yes 906 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 907 bl dvmUnlockObject @ r0<- success for unlock(self, obj) 908 cmp r0, #0 @ failed? 909 FETCH_ADVANCE_INST(1) @ before throw: advance rPC, load rINST 910 beq common_exceptionThrown @ yes, exception is pending 911 GET_INST_OPCODE(ip) @ extract opcode from rINST 912 GOTO_OPCODE(ip) @ jump to next instruction 913 1: 914 FETCH_ADVANCE_INST(1) @ advance before throw 915 b common_errNullObject 916 917 /* ------------------------------ */ 918 .balign 64 919 .L_OP_CHECK_CAST: /* 0x1f */ 920 /* File: armv5te/OP_CHECK_CAST.S */ 921 /* 922 * Check to see if a cast from one class to another is allowed. 923 */ 924 /* check-cast vAA, class@BBBB */ 925 mov r3, rINST, lsr #8 @ r3<- AA 926 FETCH(r2, 1) @ r2<- BBBB 927 GET_VREG(r9, r3) @ r9<- object 928 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- pDvmDex 929 cmp r9, #0 @ is object null? 930 ldr r0, [r0, #offDvmDex_pResClasses] @ r0<- pDvmDex->pResClasses 931 beq .LOP_CHECK_CAST_okay @ null obj, cast always succeeds 932 ldr r1, [r0, r2, lsl #2] @ r1<- resolved class 933 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 934 cmp r1, #0 @ have we resolved this before? 935 beq .LOP_CHECK_CAST_resolve @ not resolved, do it now 936 .LOP_CHECK_CAST_resolved: 937 cmp r0, r1 @ same class (trivial success)? 938 bne .LOP_CHECK_CAST_fullcheck @ no, do full check 939 .LOP_CHECK_CAST_okay: 940 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 941 GET_INST_OPCODE(ip) @ extract opcode from rINST 942 GOTO_OPCODE(ip) @ jump to next instruction 943 944 /* ------------------------------ */ 945 .balign 64 946 .L_OP_INSTANCE_OF: /* 0x20 */ 947 /* File: armv5te/OP_INSTANCE_OF.S */ 948 /* 949 * Check to see if an object reference is an instance of a class. 950 * 951 * Most common situation is a non-null object, being compared against 952 * an already-resolved class. 953 */ 954 /* instance-of vA, vB, class@CCCC */ 955 mov r3, rINST, lsr #12 @ r3<- B 956 mov r9, rINST, lsr #8 @ r9<- A+ 957 GET_VREG(r0, r3) @ r0<- vB (object) 958 and r9, r9, #15 @ r9<- A 959 cmp r0, #0 @ is object null? 960 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- pDvmDex 961 beq .LOP_INSTANCE_OF_store @ null obj, not an instance, store r0 962 FETCH(r3, 1) @ r3<- CCCC 963 ldr r2, [r2, #offDvmDex_pResClasses] @ r2<- pDvmDex->pResClasses 964 ldr r1, [r2, r3, lsl #2] @ r1<- resolved class 965 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 966 cmp r1, #0 @ have we resolved this before? 967 beq .LOP_INSTANCE_OF_resolve @ not resolved, do it now 968 .LOP_INSTANCE_OF_resolved: @ r0=obj->clazz, r1=resolved class 969 cmp r0, r1 @ same class (trivial success)? 970 beq .LOP_INSTANCE_OF_trivial @ yes, trivial finish 971 b .LOP_INSTANCE_OF_fullcheck @ no, do full check 972 973 /* ------------------------------ */ 974 .balign 64 975 .L_OP_ARRAY_LENGTH: /* 0x21 */ 976 /* File: armv5te/OP_ARRAY_LENGTH.S */ 977 /* 978 * Return the length of an array. 979 */ 980 mov r1, rINST, lsr #12 @ r1<- B 981 mov r2, rINST, lsr #8 @ r2<- A+ 982 GET_VREG(r0, r1) @ r0<- vB (object ref) 983 and r2, r2, #15 @ r2<- A 984 cmp r0, #0 @ is object null? 985 beq common_errNullObject @ yup, fail 986 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 987 ldr r3, [r0, #offArrayObject_length] @ r3<- array length 988 GET_INST_OPCODE(ip) @ extract opcode from rINST 989 SET_VREG(r3, r2) @ vB<- length 990 GOTO_OPCODE(ip) @ jump to next instruction 991 992 /* ------------------------------ */ 993 .balign 64 994 .L_OP_NEW_INSTANCE: /* 0x22 */ 995 /* File: armv5te/OP_NEW_INSTANCE.S */ 996 /* 997 * Create a new instance of a class. 998 */ 999 /* new-instance vAA, class@BBBB */ 1000 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1001 FETCH(r1, 1) @ r1<- BBBB 1002 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1003 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1004 EXPORT_PC() @ req'd for init, resolve, alloc 1005 cmp r0, #0 @ already resolved? 1006 beq .LOP_NEW_INSTANCE_resolve @ no, resolve it now 1007 .LOP_NEW_INSTANCE_resolved: @ r0=class 1008 ldrb r1, [r0, #offClassObject_status] @ r1<- ClassStatus enum 1009 cmp r1, #CLASS_INITIALIZED @ has class been initialized? 1010 bne .LOP_NEW_INSTANCE_needinit @ no, init class now 1011 .LOP_NEW_INSTANCE_initialized: @ r0=class 1012 mov r1, #ALLOC_DONT_TRACK @ flags for alloc call 1013 bl dvmAllocObject @ r0<- new object 1014 b .LOP_NEW_INSTANCE_finish @ continue 1015 1016 /* ------------------------------ */ 1017 .balign 64 1018 .L_OP_NEW_ARRAY: /* 0x23 */ 1019 /* File: armv5te/OP_NEW_ARRAY.S */ 1020 /* 1021 * Allocate an array of objects, specified with the array class 1022 * and a count. 1023 * 1024 * The verifier guarantees that this is an array class, so we don't 1025 * check for it here. 1026 */ 1027 /* new-array vA, vB, class@CCCC */ 1028 mov r0, rINST, lsr #12 @ r0<- B 1029 FETCH(r2, 1) @ r2<- CCCC 1030 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1031 GET_VREG(r1, r0) @ r1<- vB (array length) 1032 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1033 cmp r1, #0 @ check length 1034 ldr r0, [r3, r2, lsl #2] @ r0<- resolved class 1035 bmi common_errNegativeArraySize @ negative length, bail 1036 cmp r0, #0 @ already resolved? 1037 EXPORT_PC() @ req'd for resolve, alloc 1038 bne .LOP_NEW_ARRAY_finish @ resolved, continue 1039 b .LOP_NEW_ARRAY_resolve @ do resolve now 1040 1041 /* ------------------------------ */ 1042 .balign 64 1043 .L_OP_FILLED_NEW_ARRAY: /* 0x24 */ 1044 /* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1045 /* 1046 * Create a new array with elements filled from registers. 1047 * 1048 * for: filled-new-array, filled-new-array/range 1049 */ 1050 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1051 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1052 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1053 FETCH(r1, 1) @ r1<- BBBB 1054 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1055 EXPORT_PC() @ need for resolve and alloc 1056 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1057 mov r10, rINST, lsr #8 @ r10<- AA or BA 1058 cmp r0, #0 @ already resolved? 1059 bne .LOP_FILLED_NEW_ARRAY_continue @ yes, continue on 1060 8: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1061 mov r2, #0 @ r2<- false 1062 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1063 bl dvmResolveClass @ r0<- call(clazz, ref) 1064 cmp r0, #0 @ got null? 1065 beq common_exceptionThrown @ yes, handle exception 1066 b .LOP_FILLED_NEW_ARRAY_continue 1067 1068 /* ------------------------------ */ 1069 .balign 64 1070 .L_OP_FILLED_NEW_ARRAY_RANGE: /* 0x25 */ 1071 /* File: armv5te/OP_FILLED_NEW_ARRAY_RANGE.S */ 1072 /* File: armv5te/OP_FILLED_NEW_ARRAY.S */ 1073 /* 1074 * Create a new array with elements filled from registers. 1075 * 1076 * for: filled-new-array, filled-new-array/range 1077 */ 1078 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 1079 /* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */ 1080 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 1081 FETCH(r1, 1) @ r1<- BBBB 1082 ldr r3, [r3, #offDvmDex_pResClasses] @ r3<- pDvmDex->pResClasses 1083 EXPORT_PC() @ need for resolve and alloc 1084 ldr r0, [r3, r1, lsl #2] @ r0<- resolved class 1085 mov r10, rINST, lsr #8 @ r10<- AA or BA 1086 cmp r0, #0 @ already resolved? 1087 bne .LOP_FILLED_NEW_ARRAY_RANGE_continue @ yes, continue on 1088 8: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 1089 mov r2, #0 @ r2<- false 1090 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 1091 bl dvmResolveClass @ r0<- call(clazz, ref) 1092 cmp r0, #0 @ got null? 1093 beq common_exceptionThrown @ yes, handle exception 1094 b .LOP_FILLED_NEW_ARRAY_RANGE_continue 1095 1096 1097 /* ------------------------------ */ 1098 .balign 64 1099 .L_OP_FILL_ARRAY_DATA: /* 0x26 */ 1100 /* File: armv5te/OP_FILL_ARRAY_DATA.S */ 1101 /* fill-array-data vAA, +BBBBBBBB */ 1102 FETCH(r0, 1) @ r0<- bbbb (lo) 1103 FETCH(r1, 2) @ r1<- BBBB (hi) 1104 mov r3, rINST, lsr #8 @ r3<- AA 1105 orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb 1106 GET_VREG(r0, r3) @ r0<- vAA (array object) 1107 add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.) 1108 EXPORT_PC(); 1109 bl dvmInterpHandleFillArrayData@ fill the array with predefined data 1110 cmp r0, #0 @ 0 means an exception is thrown 1111 beq common_exceptionThrown @ has exception 1112 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 1113 GET_INST_OPCODE(ip) @ extract opcode from rINST 1114 GOTO_OPCODE(ip) @ jump to next instruction 1115 1116 /* ------------------------------ */ 1117 .balign 64 1118 .L_OP_THROW: /* 0x27 */ 1119 /* File: armv5te/OP_THROW.S */ 1120 /* 1121 * Throw an exception object in the current thread. 1122 */ 1123 /* throw vAA */ 1124 mov r2, rINST, lsr #8 @ r2<- AA 1125 GET_VREG(r1, r2) @ r1<- vAA (exception object) 1126 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 1127 EXPORT_PC() @ exception handler can throw 1128 cmp r1, #0 @ null object? 1129 beq common_errNullObject @ yes, throw an NPE instead 1130 @ bypass dvmSetException, just store it 1131 str r1, [r0, #offThread_exception] @ thread->exception<- obj 1132 b common_exceptionThrown 1133 1134 /* ------------------------------ */ 1135 .balign 64 1136 .L_OP_GOTO: /* 0x28 */ 1137 /* File: armv5te/OP_GOTO.S */ 1138 /* 1139 * Unconditional branch, 8-bit offset. 1140 * 1141 * The branch distance is a signed code-unit offset, which we need to 1142 * double to get a byte offset. 1143 */ 1144 /* goto +AA */ 1145 mov r0, rINST, lsl #16 @ r0<- AAxx0000 1146 movs r9, r0, asr #24 @ r9<- ssssssAA (sign-extended) 1147 mov r9, r9, lsl #1 @ r9<- byte offset 1148 bmi common_backwardBranch @ backward branch, do periodic checks 1149 #if defined(WITH_JIT) 1150 GET_JIT_PROF_TABLE(r0) 1151 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1152 cmp r0,#0 1153 bne common_updateProfile 1154 GET_INST_OPCODE(ip) @ extract opcode from rINST 1155 GOTO_OPCODE(ip) @ jump to next instruction 1156 #else 1157 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1158 GET_INST_OPCODE(ip) @ extract opcode from rINST 1159 GOTO_OPCODE(ip) @ jump to next instruction 1160 #endif 1161 1162 /* ------------------------------ */ 1163 .balign 64 1164 .L_OP_GOTO_16: /* 0x29 */ 1165 /* File: armv5te/OP_GOTO_16.S */ 1166 /* 1167 * Unconditional branch, 16-bit offset. 1168 * 1169 * The branch distance is a signed code-unit offset, which we need to 1170 * double to get a byte offset. 1171 */ 1172 /* goto/16 +AAAA */ 1173 FETCH_S(r0, 1) @ r0<- ssssAAAA (sign-extended) 1174 movs r9, r0, asl #1 @ r9<- byte offset, check sign 1175 bmi common_backwardBranch @ backward branch, do periodic checks 1176 #if defined(WITH_JIT) 1177 GET_JIT_PROF_TABLE(r0) 1178 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1179 cmp r0,#0 1180 bne common_updateProfile 1181 GET_INST_OPCODE(ip) @ extract opcode from rINST 1182 GOTO_OPCODE(ip) @ jump to next instruction 1183 #else 1184 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1185 GET_INST_OPCODE(ip) @ extract opcode from rINST 1186 GOTO_OPCODE(ip) @ jump to next instruction 1187 #endif 1188 1189 /* ------------------------------ */ 1190 .balign 64 1191 .L_OP_GOTO_32: /* 0x2a */ 1192 /* File: armv5te/OP_GOTO_32.S */ 1193 /* 1194 * Unconditional branch, 32-bit offset. 1195 * 1196 * The branch distance is a signed code-unit offset, which we need to 1197 * double to get a byte offset. 1198 * 1199 * Unlike most opcodes, this one is allowed to branch to itself, so 1200 * our "backward branch" test must be "<=0" instead of "<0". The ORRS 1201 * instruction doesn't affect the V flag, so we need to clear it 1202 * explicitly. 1203 */ 1204 /* goto/32 +AAAAAAAA */ 1205 FETCH(r0, 1) @ r0<- aaaa (lo) 1206 FETCH(r1, 2) @ r1<- AAAA (hi) 1207 cmp ip, ip @ (clear V flag during stall) 1208 orrs r0, r0, r1, lsl #16 @ r0<- AAAAaaaa, check sign 1209 mov r9, r0, asl #1 @ r9<- byte offset 1210 ble common_backwardBranch @ backward branch, do periodic checks 1211 #if defined(WITH_JIT) 1212 GET_JIT_PROF_TABLE(r0) 1213 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1214 cmp r0,#0 1215 bne common_updateProfile 1216 GET_INST_OPCODE(ip) @ extract opcode from rINST 1217 GOTO_OPCODE(ip) @ jump to next instruction 1218 #else 1219 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1220 GET_INST_OPCODE(ip) @ extract opcode from rINST 1221 GOTO_OPCODE(ip) @ jump to next instruction 1222 #endif 1223 1224 /* ------------------------------ */ 1225 .balign 64 1226 .L_OP_PACKED_SWITCH: /* 0x2b */ 1227 /* File: armv5te/OP_PACKED_SWITCH.S */ 1228 /* 1229 * Handle a packed-switch or sparse-switch instruction. In both cases 1230 * we decode it and hand it off to a helper function. 1231 * 1232 * We don't really expect backward branches in a switch statement, but 1233 * they're perfectly legal, so we check for them here. 1234 * 1235 * for: packed-switch, sparse-switch 1236 */ 1237 /* op vAA, +BBBB */ 1238 FETCH(r0, 1) @ r0<- bbbb (lo) 1239 FETCH(r1, 2) @ r1<- BBBB (hi) 1240 mov r3, rINST, lsr #8 @ r3<- AA 1241 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1242 GET_VREG(r1, r3) @ r1<- vAA 1243 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1244 bl dvmInterpHandlePackedSwitch @ r0<- code-unit branch offset 1245 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1246 bmi common_backwardBranch @ backward branch, do periodic checks 1247 beq common_backwardBranch @ (want to use BLE but V is unknown) 1248 #if defined(WITH_JIT) 1249 GET_JIT_PROF_TABLE(r0) 1250 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1251 cmp r0,#0 1252 bne common_updateProfile 1253 GET_INST_OPCODE(ip) @ extract opcode from rINST 1254 GOTO_OPCODE(ip) @ jump to next instruction 1255 #else 1256 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1257 GET_INST_OPCODE(ip) @ extract opcode from rINST 1258 GOTO_OPCODE(ip) @ jump to next instruction 1259 #endif 1260 1261 /* ------------------------------ */ 1262 .balign 64 1263 .L_OP_SPARSE_SWITCH: /* 0x2c */ 1264 /* File: armv5te/OP_SPARSE_SWITCH.S */ 1265 /* File: armv5te/OP_PACKED_SWITCH.S */ 1266 /* 1267 * Handle a packed-switch or sparse-switch instruction. In both cases 1268 * we decode it and hand it off to a helper function. 1269 * 1270 * We don't really expect backward branches in a switch statement, but 1271 * they're perfectly legal, so we check for them here. 1272 * 1273 * for: packed-switch, sparse-switch 1274 */ 1275 /* op vAA, +BBBB */ 1276 FETCH(r0, 1) @ r0<- bbbb (lo) 1277 FETCH(r1, 2) @ r1<- BBBB (hi) 1278 mov r3, rINST, lsr #8 @ r3<- AA 1279 orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb 1280 GET_VREG(r1, r3) @ r1<- vAA 1281 add r0, rPC, r0, lsl #1 @ r0<- PC + BBBBbbbb*2 1282 bl dvmInterpHandleSparseSwitch @ r0<- code-unit branch offset 1283 movs r9, r0, asl #1 @ r9<- branch byte offset, check sign 1284 bmi common_backwardBranch @ backward branch, do periodic checks 1285 beq common_backwardBranch @ (want to use BLE but V is unknown) 1286 #if defined(WITH_JIT) 1287 GET_JIT_PROF_TABLE(r0) 1288 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1289 cmp r0,#0 1290 bne common_updateProfile 1291 GET_INST_OPCODE(ip) @ extract opcode from rINST 1292 GOTO_OPCODE(ip) @ jump to next instruction 1293 #else 1294 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1295 GET_INST_OPCODE(ip) @ extract opcode from rINST 1296 GOTO_OPCODE(ip) @ jump to next instruction 1297 #endif 1298 1299 1300 /* ------------------------------ */ 1301 .balign 64 1302 .L_OP_CMPL_FLOAT: /* 0x2d */ 1303 /* File: armv5te/OP_CMPL_FLOAT.S */ 1304 /* 1305 * Compare two floating-point values. Puts 0, 1, or -1 into the 1306 * destination register based on the results of the comparison. 1307 * 1308 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1309 * on what value we'd like to return when one of the operands is NaN. 1310 * 1311 * The operation we're implementing is: 1312 * if (x == y) 1313 * return 0; 1314 * else if (x < y) 1315 * return -1; 1316 * else if (x > y) 1317 * return 1; 1318 * else 1319 * return {-1,1}; // one or both operands was NaN 1320 * 1321 * The straightforward implementation requires 3 calls to functions 1322 * that return a result in r0. We can do it with two calls if our 1323 * EABI library supports __aeabi_cfcmple (only one if we want to check 1324 * for NaN directly): 1325 * check x <= y 1326 * if <, return -1 1327 * if ==, return 0 1328 * check y <= x 1329 * if <, return 1 1330 * return {-1,1} 1331 * 1332 * for: cmpl-float, cmpg-float 1333 */ 1334 /* op vAA, vBB, vCC */ 1335 FETCH(r0, 1) @ r0<- CCBB 1336 and r2, r0, #255 @ r2<- BB 1337 mov r3, r0, lsr #8 @ r3<- CC 1338 GET_VREG(r9, r2) @ r9<- vBB 1339 GET_VREG(r10, r3) @ r10<- vCC 1340 mov r0, r9 @ copy to arg registers 1341 mov r1, r10 1342 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1343 bhi .LOP_CMPL_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1344 mvncc r1, #0 @ (less than) r1<- -1 1345 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1346 .LOP_CMPL_FLOAT_finish: 1347 mov r3, rINST, lsr #8 @ r3<- AA 1348 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1349 SET_VREG(r1, r3) @ vAA<- r1 1350 GET_INST_OPCODE(ip) @ extract opcode from rINST 1351 GOTO_OPCODE(ip) @ jump to next instruction 1352 1353 /* ------------------------------ */ 1354 .balign 64 1355 .L_OP_CMPG_FLOAT: /* 0x2e */ 1356 /* File: armv5te/OP_CMPG_FLOAT.S */ 1357 /* File: armv5te/OP_CMPL_FLOAT.S */ 1358 /* 1359 * Compare two floating-point values. Puts 0, 1, or -1 into the 1360 * destination register based on the results of the comparison. 1361 * 1362 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1363 * on what value we'd like to return when one of the operands is NaN. 1364 * 1365 * The operation we're implementing is: 1366 * if (x == y) 1367 * return 0; 1368 * else if (x < y) 1369 * return -1; 1370 * else if (x > y) 1371 * return 1; 1372 * else 1373 * return {-1,1}; // one or both operands was NaN 1374 * 1375 * The straightforward implementation requires 3 calls to functions 1376 * that return a result in r0. We can do it with two calls if our 1377 * EABI library supports __aeabi_cfcmple (only one if we want to check 1378 * for NaN directly): 1379 * check x <= y 1380 * if <, return -1 1381 * if ==, return 0 1382 * check y <= x 1383 * if <, return 1 1384 * return {-1,1} 1385 * 1386 * for: cmpl-float, cmpg-float 1387 */ 1388 /* op vAA, vBB, vCC */ 1389 FETCH(r0, 1) @ r0<- CCBB 1390 and r2, r0, #255 @ r2<- BB 1391 mov r3, r0, lsr #8 @ r3<- CC 1392 GET_VREG(r9, r2) @ r9<- vBB 1393 GET_VREG(r10, r3) @ r10<- vCC 1394 mov r0, r9 @ copy to arg registers 1395 mov r1, r10 1396 bl __aeabi_cfcmple @ cmp <=: C clear if <, Z set if eq 1397 bhi .LOP_CMPG_FLOAT_gt_or_nan @ C set and Z clear, disambiguate 1398 mvncc r1, #0 @ (less than) r1<- -1 1399 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1400 .LOP_CMPG_FLOAT_finish: 1401 mov r3, rINST, lsr #8 @ r3<- AA 1402 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1403 SET_VREG(r1, r3) @ vAA<- r1 1404 GET_INST_OPCODE(ip) @ extract opcode from rINST 1405 GOTO_OPCODE(ip) @ jump to next instruction 1406 1407 1408 /* ------------------------------ */ 1409 .balign 64 1410 .L_OP_CMPL_DOUBLE: /* 0x2f */ 1411 /* File: armv5te/OP_CMPL_DOUBLE.S */ 1412 /* 1413 * Compare two floating-point values. Puts 0, 1, or -1 into the 1414 * destination register based on the results of the comparison. 1415 * 1416 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1417 * on what value we'd like to return when one of the operands is NaN. 1418 * 1419 * See OP_CMPL_FLOAT for an explanation. 1420 * 1421 * For: cmpl-double, cmpg-double 1422 */ 1423 /* op vAA, vBB, vCC */ 1424 FETCH(r0, 1) @ r0<- CCBB 1425 and r9, r0, #255 @ r9<- BB 1426 mov r10, r0, lsr #8 @ r10<- CC 1427 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1428 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1429 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1430 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1431 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1432 bhi .LOP_CMPL_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1433 mvncc r1, #0 @ (less than) r1<- -1 1434 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1435 .LOP_CMPL_DOUBLE_finish: 1436 mov r3, rINST, lsr #8 @ r3<- AA 1437 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1438 SET_VREG(r1, r3) @ vAA<- r1 1439 GET_INST_OPCODE(ip) @ extract opcode from rINST 1440 GOTO_OPCODE(ip) @ jump to next instruction 1441 1442 /* ------------------------------ */ 1443 .balign 64 1444 .L_OP_CMPG_DOUBLE: /* 0x30 */ 1445 /* File: armv5te/OP_CMPG_DOUBLE.S */ 1446 /* File: armv5te/OP_CMPL_DOUBLE.S */ 1447 /* 1448 * Compare two floating-point values. Puts 0, 1, or -1 into the 1449 * destination register based on the results of the comparison. 1450 * 1451 * Provide a "naninst" instruction that puts 1 or -1 into r1 depending 1452 * on what value we'd like to return when one of the operands is NaN. 1453 * 1454 * See OP_CMPL_FLOAT for an explanation. 1455 * 1456 * For: cmpl-double, cmpg-double 1457 */ 1458 /* op vAA, vBB, vCC */ 1459 FETCH(r0, 1) @ r0<- CCBB 1460 and r9, r0, #255 @ r9<- BB 1461 mov r10, r0, lsr #8 @ r10<- CC 1462 add r9, rFP, r9, lsl #2 @ r9<- &fp[BB] 1463 add r10, rFP, r10, lsl #2 @ r10<- &fp[CC] 1464 ldmia r9, {r0-r1} @ r0/r1<- vBB/vBB+1 1465 ldmia r10, {r2-r3} @ r2/r3<- vCC/vCC+1 1466 bl __aeabi_cdcmple @ cmp <=: C clear if <, Z set if eq 1467 bhi .LOP_CMPG_DOUBLE_gt_or_nan @ C set and Z clear, disambiguate 1468 mvncc r1, #0 @ (less than) r1<- -1 1469 moveq r1, #0 @ (equal) r1<- 0, trumps less than 1470 .LOP_CMPG_DOUBLE_finish: 1471 mov r3, rINST, lsr #8 @ r3<- AA 1472 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 1473 SET_VREG(r1, r3) @ vAA<- r1 1474 GET_INST_OPCODE(ip) @ extract opcode from rINST 1475 GOTO_OPCODE(ip) @ jump to next instruction 1476 1477 1478 /* ------------------------------ */ 1479 .balign 64 1480 .L_OP_CMP_LONG: /* 0x31 */ 1481 /* File: armv5te/OP_CMP_LONG.S */ 1482 /* 1483 * Compare two 64-bit values. Puts 0, 1, or -1 into the destination 1484 * register based on the results of the comparison. 1485 * 1486 * We load the full values with LDM, but in practice many values could 1487 * be resolved by only looking at the high word. This could be made 1488 * faster or slower by splitting the LDM into a pair of LDRs. 1489 * 1490 * If we just wanted to set condition flags, we could do this: 1491 * subs ip, r0, r2 1492 * sbcs ip, r1, r3 1493 * subeqs ip, r0, r2 1494 * Leaving { <0, 0, >0 } in ip. However, we have to set it to a specific 1495 * integer value, which we can do with 2 conditional mov/mvn instructions 1496 * (set 1, set -1; if they're equal we already have 0 in ip), giving 1497 * us a constant 5-cycle path plus a branch at the end to the 1498 * instruction epilogue code. The multi-compare approach below needs 1499 * 2 or 3 cycles + branch if the high word doesn't match, 6 + branch 1500 * in the worst case (the 64-bit values are equal). 1501 */ 1502 /* cmp-long vAA, vBB, vCC */ 1503 FETCH(r0, 1) @ r0<- CCBB 1504 mov r9, rINST, lsr #8 @ r9<- AA 1505 and r2, r0, #255 @ r2<- BB 1506 mov r3, r0, lsr #8 @ r3<- CC 1507 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 1508 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 1509 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 1510 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 1511 cmp r1, r3 @ compare (vBB+1, vCC+1) 1512 blt .LOP_CMP_LONG_less @ signed compare on high part 1513 bgt .LOP_CMP_LONG_greater 1514 subs r1, r0, r2 @ r1<- r0 - r2 1515 bhi .LOP_CMP_LONG_greater @ unsigned compare on low part 1516 bne .LOP_CMP_LONG_less 1517 b .LOP_CMP_LONG_finish @ equal; r1 already holds 0 1518 1519 /* ------------------------------ */ 1520 .balign 64 1521 .L_OP_IF_EQ: /* 0x32 */ 1522 /* File: armv5te/OP_IF_EQ.S */ 1523 /* File: armv5te/bincmp.S */ 1524 /* 1525 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1526 * fragment that specifies the *reverse* comparison to perform, e.g. 1527 * for "if-le" you would use "gt". 1528 * 1529 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1530 */ 1531 /* if-cmp vA, vB, +CCCC */ 1532 mov r0, rINST, lsr #8 @ r0<- A+ 1533 mov r1, rINST, lsr #12 @ r1<- B 1534 and r0, r0, #15 1535 GET_VREG(r3, r1) @ r3<- vB 1536 GET_VREG(r2, r0) @ r2<- vA 1537 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1538 cmp r2, r3 @ compare (vA, vB) 1539 bne 1f @ branch to 1 if comparison failed 1540 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1541 movs r9, r9, asl #1 @ convert to bytes, check sign 1542 bmi common_backwardBranch @ yes, do periodic checks 1543 1: 1544 #if defined(WITH_JIT) 1545 GET_JIT_PROF_TABLE(r0) 1546 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1547 b common_testUpdateProfile 1548 #else 1549 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1550 GET_INST_OPCODE(ip) @ extract opcode from rINST 1551 GOTO_OPCODE(ip) @ jump to next instruction 1552 #endif 1553 1554 1555 /* ------------------------------ */ 1556 .balign 64 1557 .L_OP_IF_NE: /* 0x33 */ 1558 /* File: armv5te/OP_IF_NE.S */ 1559 /* File: armv5te/bincmp.S */ 1560 /* 1561 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1562 * fragment that specifies the *reverse* comparison to perform, e.g. 1563 * for "if-le" you would use "gt". 1564 * 1565 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1566 */ 1567 /* if-cmp vA, vB, +CCCC */ 1568 mov r0, rINST, lsr #8 @ r0<- A+ 1569 mov r1, rINST, lsr #12 @ r1<- B 1570 and r0, r0, #15 1571 GET_VREG(r3, r1) @ r3<- vB 1572 GET_VREG(r2, r0) @ r2<- vA 1573 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1574 cmp r2, r3 @ compare (vA, vB) 1575 beq 1f @ branch to 1 if comparison failed 1576 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1577 movs r9, r9, asl #1 @ convert to bytes, check sign 1578 bmi common_backwardBranch @ yes, do periodic checks 1579 1: 1580 #if defined(WITH_JIT) 1581 GET_JIT_PROF_TABLE(r0) 1582 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1583 b common_testUpdateProfile 1584 #else 1585 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1586 GET_INST_OPCODE(ip) @ extract opcode from rINST 1587 GOTO_OPCODE(ip) @ jump to next instruction 1588 #endif 1589 1590 1591 /* ------------------------------ */ 1592 .balign 64 1593 .L_OP_IF_LT: /* 0x34 */ 1594 /* File: armv5te/OP_IF_LT.S */ 1595 /* File: armv5te/bincmp.S */ 1596 /* 1597 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1598 * fragment that specifies the *reverse* comparison to perform, e.g. 1599 * for "if-le" you would use "gt". 1600 * 1601 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1602 */ 1603 /* if-cmp vA, vB, +CCCC */ 1604 mov r0, rINST, lsr #8 @ r0<- A+ 1605 mov r1, rINST, lsr #12 @ r1<- B 1606 and r0, r0, #15 1607 GET_VREG(r3, r1) @ r3<- vB 1608 GET_VREG(r2, r0) @ r2<- vA 1609 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1610 cmp r2, r3 @ compare (vA, vB) 1611 bge 1f @ branch to 1 if comparison failed 1612 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1613 movs r9, r9, asl #1 @ convert to bytes, check sign 1614 bmi common_backwardBranch @ yes, do periodic checks 1615 1: 1616 #if defined(WITH_JIT) 1617 GET_JIT_PROF_TABLE(r0) 1618 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1619 b common_testUpdateProfile 1620 #else 1621 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1622 GET_INST_OPCODE(ip) @ extract opcode from rINST 1623 GOTO_OPCODE(ip) @ jump to next instruction 1624 #endif 1625 1626 1627 /* ------------------------------ */ 1628 .balign 64 1629 .L_OP_IF_GE: /* 0x35 */ 1630 /* File: armv5te/OP_IF_GE.S */ 1631 /* File: armv5te/bincmp.S */ 1632 /* 1633 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1634 * fragment that specifies the *reverse* comparison to perform, e.g. 1635 * for "if-le" you would use "gt". 1636 * 1637 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1638 */ 1639 /* if-cmp vA, vB, +CCCC */ 1640 mov r0, rINST, lsr #8 @ r0<- A+ 1641 mov r1, rINST, lsr #12 @ r1<- B 1642 and r0, r0, #15 1643 GET_VREG(r3, r1) @ r3<- vB 1644 GET_VREG(r2, r0) @ r2<- vA 1645 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1646 cmp r2, r3 @ compare (vA, vB) 1647 blt 1f @ branch to 1 if comparison failed 1648 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1649 movs r9, r9, asl #1 @ convert to bytes, check sign 1650 bmi common_backwardBranch @ yes, do periodic checks 1651 1: 1652 #if defined(WITH_JIT) 1653 GET_JIT_PROF_TABLE(r0) 1654 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1655 b common_testUpdateProfile 1656 #else 1657 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1658 GET_INST_OPCODE(ip) @ extract opcode from rINST 1659 GOTO_OPCODE(ip) @ jump to next instruction 1660 #endif 1661 1662 1663 /* ------------------------------ */ 1664 .balign 64 1665 .L_OP_IF_GT: /* 0x36 */ 1666 /* File: armv5te/OP_IF_GT.S */ 1667 /* File: armv5te/bincmp.S */ 1668 /* 1669 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1670 * fragment that specifies the *reverse* comparison to perform, e.g. 1671 * for "if-le" you would use "gt". 1672 * 1673 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1674 */ 1675 /* if-cmp vA, vB, +CCCC */ 1676 mov r0, rINST, lsr #8 @ r0<- A+ 1677 mov r1, rINST, lsr #12 @ r1<- B 1678 and r0, r0, #15 1679 GET_VREG(r3, r1) @ r3<- vB 1680 GET_VREG(r2, r0) @ r2<- vA 1681 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1682 cmp r2, r3 @ compare (vA, vB) 1683 ble 1f @ branch to 1 if comparison failed 1684 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1685 movs r9, r9, asl #1 @ convert to bytes, check sign 1686 bmi common_backwardBranch @ yes, do periodic checks 1687 1: 1688 #if defined(WITH_JIT) 1689 GET_JIT_PROF_TABLE(r0) 1690 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1691 b common_testUpdateProfile 1692 #else 1693 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1694 GET_INST_OPCODE(ip) @ extract opcode from rINST 1695 GOTO_OPCODE(ip) @ jump to next instruction 1696 #endif 1697 1698 1699 /* ------------------------------ */ 1700 .balign 64 1701 .L_OP_IF_LE: /* 0x37 */ 1702 /* File: armv5te/OP_IF_LE.S */ 1703 /* File: armv5te/bincmp.S */ 1704 /* 1705 * Generic two-operand compare-and-branch operation. Provide a "revcmp" 1706 * fragment that specifies the *reverse* comparison to perform, e.g. 1707 * for "if-le" you would use "gt". 1708 * 1709 * For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le 1710 */ 1711 /* if-cmp vA, vB, +CCCC */ 1712 mov r0, rINST, lsr #8 @ r0<- A+ 1713 mov r1, rINST, lsr #12 @ r1<- B 1714 and r0, r0, #15 1715 GET_VREG(r3, r1) @ r3<- vB 1716 GET_VREG(r2, r0) @ r2<- vA 1717 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1718 cmp r2, r3 @ compare (vA, vB) 1719 bgt 1f @ branch to 1 if comparison failed 1720 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1721 movs r9, r9, asl #1 @ convert to bytes, check sign 1722 bmi common_backwardBranch @ yes, do periodic checks 1723 1: 1724 #if defined(WITH_JIT) 1725 GET_JIT_PROF_TABLE(r0) 1726 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1727 b common_testUpdateProfile 1728 #else 1729 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1730 GET_INST_OPCODE(ip) @ extract opcode from rINST 1731 GOTO_OPCODE(ip) @ jump to next instruction 1732 #endif 1733 1734 1735 /* ------------------------------ */ 1736 .balign 64 1737 .L_OP_IF_EQZ: /* 0x38 */ 1738 /* File: armv5te/OP_IF_EQZ.S */ 1739 /* File: armv5te/zcmp.S */ 1740 /* 1741 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1742 * fragment that specifies the *reverse* comparison to perform, e.g. 1743 * for "if-le" you would use "gt". 1744 * 1745 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1746 */ 1747 /* if-cmp vAA, +BBBB */ 1748 mov r0, rINST, lsr #8 @ r0<- AA 1749 GET_VREG(r2, r0) @ r2<- vAA 1750 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1751 cmp r2, #0 @ compare (vA, 0) 1752 bne 1f @ branch to 1 if comparison failed 1753 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1754 movs r9, r9, asl #1 @ convert to bytes, check sign 1755 bmi common_backwardBranch @ backward branch, do periodic checks 1756 1: 1757 #if defined(WITH_JIT) 1758 GET_JIT_PROF_TABLE(r0) 1759 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1760 cmp r0,#0 1761 bne common_updateProfile 1762 GET_INST_OPCODE(ip) @ extract opcode from rINST 1763 GOTO_OPCODE(ip) @ jump to next instruction 1764 #else 1765 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1766 GET_INST_OPCODE(ip) @ extract opcode from rINST 1767 GOTO_OPCODE(ip) @ jump to next instruction 1768 #endif 1769 1770 1771 /* ------------------------------ */ 1772 .balign 64 1773 .L_OP_IF_NEZ: /* 0x39 */ 1774 /* File: armv5te/OP_IF_NEZ.S */ 1775 /* File: armv5te/zcmp.S */ 1776 /* 1777 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1778 * fragment that specifies the *reverse* comparison to perform, e.g. 1779 * for "if-le" you would use "gt". 1780 * 1781 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1782 */ 1783 /* if-cmp vAA, +BBBB */ 1784 mov r0, rINST, lsr #8 @ r0<- AA 1785 GET_VREG(r2, r0) @ r2<- vAA 1786 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1787 cmp r2, #0 @ compare (vA, 0) 1788 beq 1f @ branch to 1 if comparison failed 1789 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1790 movs r9, r9, asl #1 @ convert to bytes, check sign 1791 bmi common_backwardBranch @ backward branch, do periodic checks 1792 1: 1793 #if defined(WITH_JIT) 1794 GET_JIT_PROF_TABLE(r0) 1795 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1796 cmp r0,#0 1797 bne common_updateProfile 1798 GET_INST_OPCODE(ip) @ extract opcode from rINST 1799 GOTO_OPCODE(ip) @ jump to next instruction 1800 #else 1801 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1802 GET_INST_OPCODE(ip) @ extract opcode from rINST 1803 GOTO_OPCODE(ip) @ jump to next instruction 1804 #endif 1805 1806 1807 /* ------------------------------ */ 1808 .balign 64 1809 .L_OP_IF_LTZ: /* 0x3a */ 1810 /* File: armv5te/OP_IF_LTZ.S */ 1811 /* File: armv5te/zcmp.S */ 1812 /* 1813 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1814 * fragment that specifies the *reverse* comparison to perform, e.g. 1815 * for "if-le" you would use "gt". 1816 * 1817 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1818 */ 1819 /* if-cmp vAA, +BBBB */ 1820 mov r0, rINST, lsr #8 @ r0<- AA 1821 GET_VREG(r2, r0) @ r2<- vAA 1822 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1823 cmp r2, #0 @ compare (vA, 0) 1824 bge 1f @ branch to 1 if comparison failed 1825 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1826 movs r9, r9, asl #1 @ convert to bytes, check sign 1827 bmi common_backwardBranch @ backward branch, do periodic checks 1828 1: 1829 #if defined(WITH_JIT) 1830 GET_JIT_PROF_TABLE(r0) 1831 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1832 cmp r0,#0 1833 bne common_updateProfile 1834 GET_INST_OPCODE(ip) @ extract opcode from rINST 1835 GOTO_OPCODE(ip) @ jump to next instruction 1836 #else 1837 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1838 GET_INST_OPCODE(ip) @ extract opcode from rINST 1839 GOTO_OPCODE(ip) @ jump to next instruction 1840 #endif 1841 1842 1843 /* ------------------------------ */ 1844 .balign 64 1845 .L_OP_IF_GEZ: /* 0x3b */ 1846 /* File: armv5te/OP_IF_GEZ.S */ 1847 /* File: armv5te/zcmp.S */ 1848 /* 1849 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1850 * fragment that specifies the *reverse* comparison to perform, e.g. 1851 * for "if-le" you would use "gt". 1852 * 1853 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1854 */ 1855 /* if-cmp vAA, +BBBB */ 1856 mov r0, rINST, lsr #8 @ r0<- AA 1857 GET_VREG(r2, r0) @ r2<- vAA 1858 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1859 cmp r2, #0 @ compare (vA, 0) 1860 blt 1f @ branch to 1 if comparison failed 1861 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1862 movs r9, r9, asl #1 @ convert to bytes, check sign 1863 bmi common_backwardBranch @ backward branch, do periodic checks 1864 1: 1865 #if defined(WITH_JIT) 1866 GET_JIT_PROF_TABLE(r0) 1867 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1868 cmp r0,#0 1869 bne common_updateProfile 1870 GET_INST_OPCODE(ip) @ extract opcode from rINST 1871 GOTO_OPCODE(ip) @ jump to next instruction 1872 #else 1873 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1874 GET_INST_OPCODE(ip) @ extract opcode from rINST 1875 GOTO_OPCODE(ip) @ jump to next instruction 1876 #endif 1877 1878 1879 /* ------------------------------ */ 1880 .balign 64 1881 .L_OP_IF_GTZ: /* 0x3c */ 1882 /* File: armv5te/OP_IF_GTZ.S */ 1883 /* File: armv5te/zcmp.S */ 1884 /* 1885 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1886 * fragment that specifies the *reverse* comparison to perform, e.g. 1887 * for "if-le" you would use "gt". 1888 * 1889 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1890 */ 1891 /* if-cmp vAA, +BBBB */ 1892 mov r0, rINST, lsr #8 @ r0<- AA 1893 GET_VREG(r2, r0) @ r2<- vAA 1894 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1895 cmp r2, #0 @ compare (vA, 0) 1896 ble 1f @ branch to 1 if comparison failed 1897 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1898 movs r9, r9, asl #1 @ convert to bytes, check sign 1899 bmi common_backwardBranch @ backward branch, do periodic checks 1900 1: 1901 #if defined(WITH_JIT) 1902 GET_JIT_PROF_TABLE(r0) 1903 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1904 cmp r0,#0 1905 bne common_updateProfile 1906 GET_INST_OPCODE(ip) @ extract opcode from rINST 1907 GOTO_OPCODE(ip) @ jump to next instruction 1908 #else 1909 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1910 GET_INST_OPCODE(ip) @ extract opcode from rINST 1911 GOTO_OPCODE(ip) @ jump to next instruction 1912 #endif 1913 1914 1915 /* ------------------------------ */ 1916 .balign 64 1917 .L_OP_IF_LEZ: /* 0x3d */ 1918 /* File: armv5te/OP_IF_LEZ.S */ 1919 /* File: armv5te/zcmp.S */ 1920 /* 1921 * Generic one-operand compare-and-branch operation. Provide a "revcmp" 1922 * fragment that specifies the *reverse* comparison to perform, e.g. 1923 * for "if-le" you would use "gt". 1924 * 1925 * for: if-eqz, if-nez, if-ltz, if-gez, if-gtz, if-lez 1926 */ 1927 /* if-cmp vAA, +BBBB */ 1928 mov r0, rINST, lsr #8 @ r0<- AA 1929 GET_VREG(r2, r0) @ r2<- vAA 1930 mov r9, #4 @ r0<- BYTE branch dist for not-taken 1931 cmp r2, #0 @ compare (vA, 0) 1932 bgt 1f @ branch to 1 if comparison failed 1933 FETCH_S(r9, 1) @ r9<- branch offset, in code units 1934 movs r9, r9, asl #1 @ convert to bytes, check sign 1935 bmi common_backwardBranch @ backward branch, do periodic checks 1936 1: 1937 #if defined(WITH_JIT) 1938 GET_JIT_PROF_TABLE(r0) 1939 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1940 cmp r0,#0 1941 bne common_updateProfile 1942 GET_INST_OPCODE(ip) @ extract opcode from rINST 1943 GOTO_OPCODE(ip) @ jump to next instruction 1944 #else 1945 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 1946 GET_INST_OPCODE(ip) @ extract opcode from rINST 1947 GOTO_OPCODE(ip) @ jump to next instruction 1948 #endif 1949 1950 1951 /* ------------------------------ */ 1952 .balign 64 1953 .L_OP_UNUSED_3E: /* 0x3e */ 1954 /* File: armv5te/OP_UNUSED_3E.S */ 1955 /* File: armv5te/unused.S */ 1956 bl common_abort 1957 1958 1959 /* ------------------------------ */ 1960 .balign 64 1961 .L_OP_UNUSED_3F: /* 0x3f */ 1962 /* File: armv5te/OP_UNUSED_3F.S */ 1963 /* File: armv5te/unused.S */ 1964 bl common_abort 1965 1966 1967 /* ------------------------------ */ 1968 .balign 64 1969 .L_OP_UNUSED_40: /* 0x40 */ 1970 /* File: armv5te/OP_UNUSED_40.S */ 1971 /* File: armv5te/unused.S */ 1972 bl common_abort 1973 1974 1975 /* ------------------------------ */ 1976 .balign 64 1977 .L_OP_UNUSED_41: /* 0x41 */ 1978 /* File: armv5te/OP_UNUSED_41.S */ 1979 /* File: armv5te/unused.S */ 1980 bl common_abort 1981 1982 1983 /* ------------------------------ */ 1984 .balign 64 1985 .L_OP_UNUSED_42: /* 0x42 */ 1986 /* File: armv5te/OP_UNUSED_42.S */ 1987 /* File: armv5te/unused.S */ 1988 bl common_abort 1989 1990 1991 /* ------------------------------ */ 1992 .balign 64 1993 .L_OP_UNUSED_43: /* 0x43 */ 1994 /* File: armv5te/OP_UNUSED_43.S */ 1995 /* File: armv5te/unused.S */ 1996 bl common_abort 1997 1998 1999 /* ------------------------------ */ 2000 .balign 64 2001 .L_OP_AGET: /* 0x44 */ 2002 /* File: armv5te/OP_AGET.S */ 2003 /* 2004 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2005 * 2006 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2007 * instructions. We use a pair of FETCH_Bs instead. 2008 * 2009 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2010 */ 2011 /* op vAA, vBB, vCC */ 2012 FETCH_B(r2, 1, 0) @ r2<- BB 2013 mov r9, rINST, lsr #8 @ r9<- AA 2014 FETCH_B(r3, 1, 1) @ r3<- CC 2015 GET_VREG(r0, r2) @ r0<- vBB (array object) 2016 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2017 cmp r0, #0 @ null array object? 2018 beq common_errNullObject @ yes, bail 2019 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2020 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2021 cmp r1, r3 @ compare unsigned index, length 2022 bcs common_errArrayIndex @ index >= length, bail 2023 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2024 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2025 GET_INST_OPCODE(ip) @ extract opcode from rINST 2026 SET_VREG(r2, r9) @ vAA<- r2 2027 GOTO_OPCODE(ip) @ jump to next instruction 2028 2029 /* ------------------------------ */ 2030 .balign 64 2031 .L_OP_AGET_WIDE: /* 0x45 */ 2032 /* File: armv5te/OP_AGET_WIDE.S */ 2033 /* 2034 * Array get, 64 bits. vAA <- vBB[vCC]. 2035 * 2036 * Arrays of long/double are 64-bit aligned, so it's okay to use LDRD. 2037 */ 2038 /* aget-wide vAA, vBB, vCC */ 2039 FETCH(r0, 1) @ r0<- CCBB 2040 mov r9, rINST, lsr #8 @ r9<- AA 2041 and r2, r0, #255 @ r2<- BB 2042 mov r3, r0, lsr #8 @ r3<- CC 2043 GET_VREG(r0, r2) @ r0<- vBB (array object) 2044 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2045 cmp r0, #0 @ null array object? 2046 beq common_errNullObject @ yes, bail 2047 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2048 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2049 cmp r1, r3 @ compare unsigned index, length 2050 bcc .LOP_AGET_WIDE_finish @ okay, continue below 2051 b common_errArrayIndex @ index >= length, bail 2052 @ May want to swap the order of these two branches depending on how the 2053 @ branch prediction (if any) handles conditional forward branches vs. 2054 @ unconditional forward branches. 2055 2056 /* ------------------------------ */ 2057 .balign 64 2058 .L_OP_AGET_OBJECT: /* 0x46 */ 2059 /* File: armv5te/OP_AGET_OBJECT.S */ 2060 /* File: armv5te/OP_AGET.S */ 2061 /* 2062 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2063 * 2064 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2065 * instructions. We use a pair of FETCH_Bs instead. 2066 * 2067 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2068 */ 2069 /* op vAA, vBB, vCC */ 2070 FETCH_B(r2, 1, 0) @ r2<- BB 2071 mov r9, rINST, lsr #8 @ r9<- AA 2072 FETCH_B(r3, 1, 1) @ r3<- CC 2073 GET_VREG(r0, r2) @ r0<- vBB (array object) 2074 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2075 cmp r0, #0 @ null array object? 2076 beq common_errNullObject @ yes, bail 2077 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2078 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2079 cmp r1, r3 @ compare unsigned index, length 2080 bcs common_errArrayIndex @ index >= length, bail 2081 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2082 ldr r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2083 GET_INST_OPCODE(ip) @ extract opcode from rINST 2084 SET_VREG(r2, r9) @ vAA<- r2 2085 GOTO_OPCODE(ip) @ jump to next instruction 2086 2087 2088 /* ------------------------------ */ 2089 .balign 64 2090 .L_OP_AGET_BOOLEAN: /* 0x47 */ 2091 /* File: armv5te/OP_AGET_BOOLEAN.S */ 2092 /* File: armv5te/OP_AGET.S */ 2093 /* 2094 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2095 * 2096 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2097 * instructions. We use a pair of FETCH_Bs instead. 2098 * 2099 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2100 */ 2101 /* op vAA, vBB, vCC */ 2102 FETCH_B(r2, 1, 0) @ r2<- BB 2103 mov r9, rINST, lsr #8 @ r9<- AA 2104 FETCH_B(r3, 1, 1) @ r3<- CC 2105 GET_VREG(r0, r2) @ r0<- vBB (array object) 2106 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2107 cmp r0, #0 @ null array object? 2108 beq common_errNullObject @ yes, bail 2109 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2110 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2111 cmp r1, r3 @ compare unsigned index, length 2112 bcs common_errArrayIndex @ index >= length, bail 2113 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2114 ldrb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2115 GET_INST_OPCODE(ip) @ extract opcode from rINST 2116 SET_VREG(r2, r9) @ vAA<- r2 2117 GOTO_OPCODE(ip) @ jump to next instruction 2118 2119 2120 /* ------------------------------ */ 2121 .balign 64 2122 .L_OP_AGET_BYTE: /* 0x48 */ 2123 /* File: armv5te/OP_AGET_BYTE.S */ 2124 /* File: armv5te/OP_AGET.S */ 2125 /* 2126 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2127 * 2128 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2129 * instructions. We use a pair of FETCH_Bs instead. 2130 * 2131 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2132 */ 2133 /* op vAA, vBB, vCC */ 2134 FETCH_B(r2, 1, 0) @ r2<- BB 2135 mov r9, rINST, lsr #8 @ r9<- AA 2136 FETCH_B(r3, 1, 1) @ r3<- CC 2137 GET_VREG(r0, r2) @ r0<- vBB (array object) 2138 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2139 cmp r0, #0 @ null array object? 2140 beq common_errNullObject @ yes, bail 2141 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2142 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2143 cmp r1, r3 @ compare unsigned index, length 2144 bcs common_errArrayIndex @ index >= length, bail 2145 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2146 ldrsb r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2147 GET_INST_OPCODE(ip) @ extract opcode from rINST 2148 SET_VREG(r2, r9) @ vAA<- r2 2149 GOTO_OPCODE(ip) @ jump to next instruction 2150 2151 2152 /* ------------------------------ */ 2153 .balign 64 2154 .L_OP_AGET_CHAR: /* 0x49 */ 2155 /* File: armv5te/OP_AGET_CHAR.S */ 2156 /* File: armv5te/OP_AGET.S */ 2157 /* 2158 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2159 * 2160 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2161 * instructions. We use a pair of FETCH_Bs instead. 2162 * 2163 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2164 */ 2165 /* op vAA, vBB, vCC */ 2166 FETCH_B(r2, 1, 0) @ r2<- BB 2167 mov r9, rINST, lsr #8 @ r9<- AA 2168 FETCH_B(r3, 1, 1) @ r3<- CC 2169 GET_VREG(r0, r2) @ r0<- vBB (array object) 2170 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2171 cmp r0, #0 @ null array object? 2172 beq common_errNullObject @ yes, bail 2173 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2174 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2175 cmp r1, r3 @ compare unsigned index, length 2176 bcs common_errArrayIndex @ index >= length, bail 2177 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2178 ldrh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2179 GET_INST_OPCODE(ip) @ extract opcode from rINST 2180 SET_VREG(r2, r9) @ vAA<- r2 2181 GOTO_OPCODE(ip) @ jump to next instruction 2182 2183 2184 /* ------------------------------ */ 2185 .balign 64 2186 .L_OP_AGET_SHORT: /* 0x4a */ 2187 /* File: armv5te/OP_AGET_SHORT.S */ 2188 /* File: armv5te/OP_AGET.S */ 2189 /* 2190 * Array get, 32 bits or less. vAA <- vBB[vCC]. 2191 * 2192 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2193 * instructions. We use a pair of FETCH_Bs instead. 2194 * 2195 * for: aget, aget-object, aget-boolean, aget-byte, aget-char, aget-short 2196 */ 2197 /* op vAA, vBB, vCC */ 2198 FETCH_B(r2, 1, 0) @ r2<- BB 2199 mov r9, rINST, lsr #8 @ r9<- AA 2200 FETCH_B(r3, 1, 1) @ r3<- CC 2201 GET_VREG(r0, r2) @ r0<- vBB (array object) 2202 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2203 cmp r0, #0 @ null array object? 2204 beq common_errNullObject @ yes, bail 2205 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2206 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2207 cmp r1, r3 @ compare unsigned index, length 2208 bcs common_errArrayIndex @ index >= length, bail 2209 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2210 ldrsh r2, [r0, #offArrayObject_contents] @ r2<- vBB[vCC] 2211 GET_INST_OPCODE(ip) @ extract opcode from rINST 2212 SET_VREG(r2, r9) @ vAA<- r2 2213 GOTO_OPCODE(ip) @ jump to next instruction 2214 2215 2216 /* ------------------------------ */ 2217 .balign 64 2218 .L_OP_APUT: /* 0x4b */ 2219 /* File: armv5te/OP_APUT.S */ 2220 /* 2221 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2222 * 2223 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2224 * instructions. We use a pair of FETCH_Bs instead. 2225 * 2226 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2227 */ 2228 /* op vAA, vBB, vCC */ 2229 FETCH_B(r2, 1, 0) @ r2<- BB 2230 mov r9, rINST, lsr #8 @ r9<- AA 2231 FETCH_B(r3, 1, 1) @ r3<- CC 2232 GET_VREG(r0, r2) @ r0<- vBB (array object) 2233 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2234 cmp r0, #0 @ null array object? 2235 beq common_errNullObject @ yes, bail 2236 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2237 add r0, r0, r1, lsl #2 @ r0<- arrayObj + index*width 2238 cmp r1, r3 @ compare unsigned index, length 2239 bcs common_errArrayIndex @ index >= length, bail 2240 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2241 GET_VREG(r2, r9) @ r2<- vAA 2242 GET_INST_OPCODE(ip) @ extract opcode from rINST 2243 str r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2244 GOTO_OPCODE(ip) @ jump to next instruction 2245 2246 /* ------------------------------ */ 2247 .balign 64 2248 .L_OP_APUT_WIDE: /* 0x4c */ 2249 /* File: armv5te/OP_APUT_WIDE.S */ 2250 /* 2251 * Array put, 64 bits. vBB[vCC] <- vAA. 2252 * 2253 * Arrays of long/double are 64-bit aligned, so it's okay to use STRD. 2254 */ 2255 /* aput-wide vAA, vBB, vCC */ 2256 FETCH(r0, 1) @ r0<- CCBB 2257 mov r9, rINST, lsr #8 @ r9<- AA 2258 and r2, r0, #255 @ r2<- BB 2259 mov r3, r0, lsr #8 @ r3<- CC 2260 GET_VREG(r0, r2) @ r0<- vBB (array object) 2261 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2262 cmp r0, #0 @ null array object? 2263 beq common_errNullObject @ yes, bail 2264 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2265 add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width 2266 cmp r1, r3 @ compare unsigned index, length 2267 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2268 bcc .LOP_APUT_WIDE_finish @ okay, continue below 2269 b common_errArrayIndex @ index >= length, bail 2270 @ May want to swap the order of these two branches depending on how the 2271 @ branch prediction (if any) handles conditional forward branches vs. 2272 @ unconditional forward branches. 2273 2274 /* ------------------------------ */ 2275 .balign 64 2276 .L_OP_APUT_OBJECT: /* 0x4d */ 2277 /* File: armv5te/OP_APUT_OBJECT.S */ 2278 /* 2279 * Store an object into an array. vBB[vCC] <- vAA. 2280 * 2281 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2282 * instructions. We use a pair of FETCH_Bs instead. 2283 */ 2284 /* op vAA, vBB, vCC */ 2285 FETCH(r0, 1) @ r0<- CCBB 2286 mov r9, rINST, lsr #8 @ r9<- AA 2287 and r2, r0, #255 @ r2<- BB 2288 mov r3, r0, lsr #8 @ r3<- CC 2289 GET_VREG(rINST, r2) @ rINST<- vBB (array object) 2290 GET_VREG(r0, r3) @ r0<- vCC (requested index) 2291 cmp rINST, #0 @ null array object? 2292 GET_VREG(r9, r9) @ r9<- vAA 2293 beq common_errNullObject @ yes, bail 2294 ldr r3, [rINST, #offArrayObject_length] @ r3<- arrayObj->length 2295 add r10, rINST, r0, lsl #2 @ r10<- arrayObj + index*width 2296 cmp r0, r3 @ compare unsigned index, length 2297 bcc .LOP_APUT_OBJECT_finish @ we're okay, continue on 2298 b common_errArrayIndex @ index >= length, bail 2299 2300 2301 /* ------------------------------ */ 2302 .balign 64 2303 .L_OP_APUT_BOOLEAN: /* 0x4e */ 2304 /* File: armv5te/OP_APUT_BOOLEAN.S */ 2305 /* File: armv5te/OP_APUT.S */ 2306 /* 2307 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2308 * 2309 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2310 * instructions. We use a pair of FETCH_Bs instead. 2311 * 2312 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2313 */ 2314 /* op vAA, vBB, vCC */ 2315 FETCH_B(r2, 1, 0) @ r2<- BB 2316 mov r9, rINST, lsr #8 @ r9<- AA 2317 FETCH_B(r3, 1, 1) @ r3<- CC 2318 GET_VREG(r0, r2) @ r0<- vBB (array object) 2319 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2320 cmp r0, #0 @ null array object? 2321 beq common_errNullObject @ yes, bail 2322 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2323 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2324 cmp r1, r3 @ compare unsigned index, length 2325 bcs common_errArrayIndex @ index >= length, bail 2326 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2327 GET_VREG(r2, r9) @ r2<- vAA 2328 GET_INST_OPCODE(ip) @ extract opcode from rINST 2329 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2330 GOTO_OPCODE(ip) @ jump to next instruction 2331 2332 2333 /* ------------------------------ */ 2334 .balign 64 2335 .L_OP_APUT_BYTE: /* 0x4f */ 2336 /* File: armv5te/OP_APUT_BYTE.S */ 2337 /* File: armv5te/OP_APUT.S */ 2338 /* 2339 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2340 * 2341 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2342 * instructions. We use a pair of FETCH_Bs instead. 2343 * 2344 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2345 */ 2346 /* op vAA, vBB, vCC */ 2347 FETCH_B(r2, 1, 0) @ r2<- BB 2348 mov r9, rINST, lsr #8 @ r9<- AA 2349 FETCH_B(r3, 1, 1) @ r3<- CC 2350 GET_VREG(r0, r2) @ r0<- vBB (array object) 2351 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2352 cmp r0, #0 @ null array object? 2353 beq common_errNullObject @ yes, bail 2354 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2355 add r0, r0, r1, lsl #0 @ r0<- arrayObj + index*width 2356 cmp r1, r3 @ compare unsigned index, length 2357 bcs common_errArrayIndex @ index >= length, bail 2358 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2359 GET_VREG(r2, r9) @ r2<- vAA 2360 GET_INST_OPCODE(ip) @ extract opcode from rINST 2361 strb r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2362 GOTO_OPCODE(ip) @ jump to next instruction 2363 2364 2365 /* ------------------------------ */ 2366 .balign 64 2367 .L_OP_APUT_CHAR: /* 0x50 */ 2368 /* File: armv5te/OP_APUT_CHAR.S */ 2369 /* File: armv5te/OP_APUT.S */ 2370 /* 2371 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2372 * 2373 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2374 * instructions. We use a pair of FETCH_Bs instead. 2375 * 2376 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2377 */ 2378 /* op vAA, vBB, vCC */ 2379 FETCH_B(r2, 1, 0) @ r2<- BB 2380 mov r9, rINST, lsr #8 @ r9<- AA 2381 FETCH_B(r3, 1, 1) @ r3<- CC 2382 GET_VREG(r0, r2) @ r0<- vBB (array object) 2383 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2384 cmp r0, #0 @ null array object? 2385 beq common_errNullObject @ yes, bail 2386 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2387 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2388 cmp r1, r3 @ compare unsigned index, length 2389 bcs common_errArrayIndex @ index >= length, bail 2390 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2391 GET_VREG(r2, r9) @ r2<- vAA 2392 GET_INST_OPCODE(ip) @ extract opcode from rINST 2393 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2394 GOTO_OPCODE(ip) @ jump to next instruction 2395 2396 2397 /* ------------------------------ */ 2398 .balign 64 2399 .L_OP_APUT_SHORT: /* 0x51 */ 2400 /* File: armv5te/OP_APUT_SHORT.S */ 2401 /* File: armv5te/OP_APUT.S */ 2402 /* 2403 * Array put, 32 bits or less. vBB[vCC] <- vAA. 2404 * 2405 * Note: using the usual FETCH/and/shift stuff, this fits in exactly 17 2406 * instructions. We use a pair of FETCH_Bs instead. 2407 * 2408 * for: aput, aput-boolean, aput-byte, aput-char, aput-short 2409 */ 2410 /* op vAA, vBB, vCC */ 2411 FETCH_B(r2, 1, 0) @ r2<- BB 2412 mov r9, rINST, lsr #8 @ r9<- AA 2413 FETCH_B(r3, 1, 1) @ r3<- CC 2414 GET_VREG(r0, r2) @ r0<- vBB (array object) 2415 GET_VREG(r1, r3) @ r1<- vCC (requested index) 2416 cmp r0, #0 @ null array object? 2417 beq common_errNullObject @ yes, bail 2418 ldr r3, [r0, #offArrayObject_length] @ r3<- arrayObj->length 2419 add r0, r0, r1, lsl #1 @ r0<- arrayObj + index*width 2420 cmp r1, r3 @ compare unsigned index, length 2421 bcs common_errArrayIndex @ index >= length, bail 2422 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2423 GET_VREG(r2, r9) @ r2<- vAA 2424 GET_INST_OPCODE(ip) @ extract opcode from rINST 2425 strh r2, [r0, #offArrayObject_contents] @ vBB[vCC]<- r2 2426 GOTO_OPCODE(ip) @ jump to next instruction 2427 2428 2429 /* ------------------------------ */ 2430 .balign 64 2431 .L_OP_IGET: /* 0x52 */ 2432 /* File: armv5te/OP_IGET.S */ 2433 /* 2434 * General 32-bit instance field get. 2435 * 2436 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2437 */ 2438 /* op vA, vB, field@CCCC */ 2439 mov r0, rINST, lsr #12 @ r0<- B 2440 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2441 FETCH(r1, 1) @ r1<- field ref CCCC 2442 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2443 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2444 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2445 cmp r0, #0 @ is resolved entry null? 2446 bne .LOP_IGET_finish @ no, already resolved 2447 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2448 EXPORT_PC() @ resolve() could throw 2449 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2450 bl dvmResolveInstField @ r0<- resolved InstField ptr 2451 cmp r0, #0 2452 bne .LOP_IGET_finish 2453 b common_exceptionThrown 2454 2455 /* ------------------------------ */ 2456 .balign 64 2457 .L_OP_IGET_WIDE: /* 0x53 */ 2458 /* File: armv5te/OP_IGET_WIDE.S */ 2459 /* 2460 * Wide 32-bit instance field get. 2461 */ 2462 /* iget-wide vA, vB, field@CCCC */ 2463 mov r0, rINST, lsr #12 @ r0<- B 2464 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2465 FETCH(r1, 1) @ r1<- field ref CCCC 2466 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2467 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2468 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2469 cmp r0, #0 @ is resolved entry null? 2470 bne .LOP_IGET_WIDE_finish @ no, already resolved 2471 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2472 EXPORT_PC() @ resolve() could throw 2473 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2474 bl dvmResolveInstField @ r0<- resolved InstField ptr 2475 cmp r0, #0 2476 bne .LOP_IGET_WIDE_finish 2477 b common_exceptionThrown 2478 2479 /* ------------------------------ */ 2480 .balign 64 2481 .L_OP_IGET_OBJECT: /* 0x54 */ 2482 /* File: armv5te/OP_IGET_OBJECT.S */ 2483 /* File: armv5te/OP_IGET.S */ 2484 /* 2485 * General 32-bit instance field get. 2486 * 2487 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2488 */ 2489 /* op vA, vB, field@CCCC */ 2490 mov r0, rINST, lsr #12 @ r0<- B 2491 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2492 FETCH(r1, 1) @ r1<- field ref CCCC 2493 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2494 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2495 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2496 cmp r0, #0 @ is resolved entry null? 2497 bne .LOP_IGET_OBJECT_finish @ no, already resolved 2498 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2499 EXPORT_PC() @ resolve() could throw 2500 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2501 bl dvmResolveInstField @ r0<- resolved InstField ptr 2502 cmp r0, #0 2503 bne .LOP_IGET_OBJECT_finish 2504 b common_exceptionThrown 2505 2506 2507 /* ------------------------------ */ 2508 .balign 64 2509 .L_OP_IGET_BOOLEAN: /* 0x55 */ 2510 /* File: armv5te/OP_IGET_BOOLEAN.S */ 2511 @include "armv5te/OP_IGET.S" { "load":"ldrb", "sqnum":"1" } 2512 /* File: armv5te/OP_IGET.S */ 2513 /* 2514 * General 32-bit instance field get. 2515 * 2516 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2517 */ 2518 /* op vA, vB, field@CCCC */ 2519 mov r0, rINST, lsr #12 @ r0<- B 2520 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2521 FETCH(r1, 1) @ r1<- field ref CCCC 2522 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2523 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2524 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2525 cmp r0, #0 @ is resolved entry null? 2526 bne .LOP_IGET_BOOLEAN_finish @ no, already resolved 2527 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2528 EXPORT_PC() @ resolve() could throw 2529 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2530 bl dvmResolveInstField @ r0<- resolved InstField ptr 2531 cmp r0, #0 2532 bne .LOP_IGET_BOOLEAN_finish 2533 b common_exceptionThrown 2534 2535 2536 /* ------------------------------ */ 2537 .balign 64 2538 .L_OP_IGET_BYTE: /* 0x56 */ 2539 /* File: armv5te/OP_IGET_BYTE.S */ 2540 @include "armv5te/OP_IGET.S" { "load":"ldrsb", "sqnum":"2" } 2541 /* File: armv5te/OP_IGET.S */ 2542 /* 2543 * General 32-bit instance field get. 2544 * 2545 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2546 */ 2547 /* op vA, vB, field@CCCC */ 2548 mov r0, rINST, lsr #12 @ r0<- B 2549 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2550 FETCH(r1, 1) @ r1<- field ref CCCC 2551 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2552 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2553 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2554 cmp r0, #0 @ is resolved entry null? 2555 bne .LOP_IGET_BYTE_finish @ no, already resolved 2556 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2557 EXPORT_PC() @ resolve() could throw 2558 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2559 bl dvmResolveInstField @ r0<- resolved InstField ptr 2560 cmp r0, #0 2561 bne .LOP_IGET_BYTE_finish 2562 b common_exceptionThrown 2563 2564 2565 /* ------------------------------ */ 2566 .balign 64 2567 .L_OP_IGET_CHAR: /* 0x57 */ 2568 /* File: armv5te/OP_IGET_CHAR.S */ 2569 @include "armv5te/OP_IGET.S" { "load":"ldrh", "sqnum":"3" } 2570 /* File: armv5te/OP_IGET.S */ 2571 /* 2572 * General 32-bit instance field get. 2573 * 2574 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2575 */ 2576 /* op vA, vB, field@CCCC */ 2577 mov r0, rINST, lsr #12 @ r0<- B 2578 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2579 FETCH(r1, 1) @ r1<- field ref CCCC 2580 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2581 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2582 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2583 cmp r0, #0 @ is resolved entry null? 2584 bne .LOP_IGET_CHAR_finish @ no, already resolved 2585 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2586 EXPORT_PC() @ resolve() could throw 2587 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2588 bl dvmResolveInstField @ r0<- resolved InstField ptr 2589 cmp r0, #0 2590 bne .LOP_IGET_CHAR_finish 2591 b common_exceptionThrown 2592 2593 2594 /* ------------------------------ */ 2595 .balign 64 2596 .L_OP_IGET_SHORT: /* 0x58 */ 2597 /* File: armv5te/OP_IGET_SHORT.S */ 2598 @include "armv5te/OP_IGET.S" { "load":"ldrsh", "sqnum":"4" } 2599 /* File: armv5te/OP_IGET.S */ 2600 /* 2601 * General 32-bit instance field get. 2602 * 2603 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 2604 */ 2605 /* op vA, vB, field@CCCC */ 2606 mov r0, rINST, lsr #12 @ r0<- B 2607 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2608 FETCH(r1, 1) @ r1<- field ref CCCC 2609 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2610 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2611 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2612 cmp r0, #0 @ is resolved entry null? 2613 bne .LOP_IGET_SHORT_finish @ no, already resolved 2614 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2615 EXPORT_PC() @ resolve() could throw 2616 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2617 bl dvmResolveInstField @ r0<- resolved InstField ptr 2618 cmp r0, #0 2619 bne .LOP_IGET_SHORT_finish 2620 b common_exceptionThrown 2621 2622 2623 /* ------------------------------ */ 2624 .balign 64 2625 .L_OP_IPUT: /* 0x59 */ 2626 /* File: armv5te/OP_IPUT.S */ 2627 /* 2628 * General 32-bit instance field put. 2629 * 2630 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2631 */ 2632 /* op vA, vB, field@CCCC */ 2633 mov r0, rINST, lsr #12 @ r0<- B 2634 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2635 FETCH(r1, 1) @ r1<- field ref CCCC 2636 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2637 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2638 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2639 cmp r0, #0 @ is resolved entry null? 2640 bne .LOP_IPUT_finish @ no, already resolved 2641 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2642 EXPORT_PC() @ resolve() could throw 2643 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2644 bl dvmResolveInstField @ r0<- resolved InstField ptr 2645 cmp r0, #0 @ success? 2646 bne .LOP_IPUT_finish @ yes, finish up 2647 b common_exceptionThrown 2648 2649 /* ------------------------------ */ 2650 .balign 64 2651 .L_OP_IPUT_WIDE: /* 0x5a */ 2652 /* File: armv5te/OP_IPUT_WIDE.S */ 2653 /* iput-wide vA, vB, field@CCCC */ 2654 mov r0, rINST, lsr #12 @ r0<- B 2655 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2656 FETCH(r1, 1) @ r1<- field ref CCCC 2657 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 2658 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2659 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2660 cmp r0, #0 @ is resolved entry null? 2661 bne .LOP_IPUT_WIDE_finish @ no, already resolved 2662 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2663 EXPORT_PC() @ resolve() could throw 2664 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2665 bl dvmResolveInstField @ r0<- resolved InstField ptr 2666 cmp r0, #0 @ success? 2667 bne .LOP_IPUT_WIDE_finish @ yes, finish up 2668 b common_exceptionThrown 2669 2670 /* ------------------------------ */ 2671 .balign 64 2672 .L_OP_IPUT_OBJECT: /* 0x5b */ 2673 /* File: armv5te/OP_IPUT_OBJECT.S */ 2674 /* 2675 * 32-bit instance field put. 2676 * 2677 * for: iput-object, iput-object-volatile 2678 */ 2679 /* op vA, vB, field@CCCC */ 2680 mov r0, rINST, lsr #12 @ r0<- B 2681 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2682 FETCH(r1, 1) @ r1<- field ref CCCC 2683 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2684 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2685 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2686 cmp r0, #0 @ is resolved entry null? 2687 bne .LOP_IPUT_OBJECT_finish @ no, already resolved 2688 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2689 EXPORT_PC() @ resolve() could throw 2690 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2691 bl dvmResolveInstField @ r0<- resolved InstField ptr 2692 cmp r0, #0 @ success? 2693 bne .LOP_IPUT_OBJECT_finish @ yes, finish up 2694 b common_exceptionThrown 2695 2696 /* ------------------------------ */ 2697 .balign 64 2698 .L_OP_IPUT_BOOLEAN: /* 0x5c */ 2699 /* File: armv5te/OP_IPUT_BOOLEAN.S */ 2700 @include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"1" } 2701 /* File: armv5te/OP_IPUT.S */ 2702 /* 2703 * General 32-bit instance field put. 2704 * 2705 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2706 */ 2707 /* op vA, vB, field@CCCC */ 2708 mov r0, rINST, lsr #12 @ r0<- B 2709 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2710 FETCH(r1, 1) @ r1<- field ref CCCC 2711 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2712 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2713 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2714 cmp r0, #0 @ is resolved entry null? 2715 bne .LOP_IPUT_BOOLEAN_finish @ no, already resolved 2716 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2717 EXPORT_PC() @ resolve() could throw 2718 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2719 bl dvmResolveInstField @ r0<- resolved InstField ptr 2720 cmp r0, #0 @ success? 2721 bne .LOP_IPUT_BOOLEAN_finish @ yes, finish up 2722 b common_exceptionThrown 2723 2724 2725 /* ------------------------------ */ 2726 .balign 64 2727 .L_OP_IPUT_BYTE: /* 0x5d */ 2728 /* File: armv5te/OP_IPUT_BYTE.S */ 2729 @include "armv5te/OP_IPUT.S" { "store":"strb", "sqnum":"2" } 2730 /* File: armv5te/OP_IPUT.S */ 2731 /* 2732 * General 32-bit instance field put. 2733 * 2734 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2735 */ 2736 /* op vA, vB, field@CCCC */ 2737 mov r0, rINST, lsr #12 @ r0<- B 2738 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2739 FETCH(r1, 1) @ r1<- field ref CCCC 2740 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2741 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2742 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2743 cmp r0, #0 @ is resolved entry null? 2744 bne .LOP_IPUT_BYTE_finish @ no, already resolved 2745 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2746 EXPORT_PC() @ resolve() could throw 2747 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2748 bl dvmResolveInstField @ r0<- resolved InstField ptr 2749 cmp r0, #0 @ success? 2750 bne .LOP_IPUT_BYTE_finish @ yes, finish up 2751 b common_exceptionThrown 2752 2753 2754 /* ------------------------------ */ 2755 .balign 64 2756 .L_OP_IPUT_CHAR: /* 0x5e */ 2757 /* File: armv5te/OP_IPUT_CHAR.S */ 2758 @include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"3" } 2759 /* File: armv5te/OP_IPUT.S */ 2760 /* 2761 * General 32-bit instance field put. 2762 * 2763 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2764 */ 2765 /* op vA, vB, field@CCCC */ 2766 mov r0, rINST, lsr #12 @ r0<- B 2767 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2768 FETCH(r1, 1) @ r1<- field ref CCCC 2769 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2770 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2771 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2772 cmp r0, #0 @ is resolved entry null? 2773 bne .LOP_IPUT_CHAR_finish @ no, already resolved 2774 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2775 EXPORT_PC() @ resolve() could throw 2776 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2777 bl dvmResolveInstField @ r0<- resolved InstField ptr 2778 cmp r0, #0 @ success? 2779 bne .LOP_IPUT_CHAR_finish @ yes, finish up 2780 b common_exceptionThrown 2781 2782 2783 /* ------------------------------ */ 2784 .balign 64 2785 .L_OP_IPUT_SHORT: /* 0x5f */ 2786 /* File: armv5te/OP_IPUT_SHORT.S */ 2787 @include "armv5te/OP_IPUT.S" { "store":"strh", "sqnum":"4" } 2788 /* File: armv5te/OP_IPUT.S */ 2789 /* 2790 * General 32-bit instance field put. 2791 * 2792 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 2793 */ 2794 /* op vA, vB, field@CCCC */ 2795 mov r0, rINST, lsr #12 @ r0<- B 2796 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 2797 FETCH(r1, 1) @ r1<- field ref CCCC 2798 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 2799 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 2800 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 2801 cmp r0, #0 @ is resolved entry null? 2802 bne .LOP_IPUT_SHORT_finish @ no, already resolved 2803 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 2804 EXPORT_PC() @ resolve() could throw 2805 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 2806 bl dvmResolveInstField @ r0<- resolved InstField ptr 2807 cmp r0, #0 @ success? 2808 bne .LOP_IPUT_SHORT_finish @ yes, finish up 2809 b common_exceptionThrown 2810 2811 2812 /* ------------------------------ */ 2813 .balign 64 2814 .L_OP_SGET: /* 0x60 */ 2815 /* File: armv5te/OP_SGET.S */ 2816 /* 2817 * General 32-bit SGET handler. 2818 * 2819 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2820 */ 2821 /* op vAA, field@BBBB */ 2822 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2823 FETCH(r1, 1) @ r1<- field ref BBBB 2824 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2825 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2826 cmp r0, #0 @ is resolved entry null? 2827 beq .LOP_SGET_resolve @ yes, do resolve 2828 .LOP_SGET_finish: @ field ptr in r0 2829 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2830 @ no-op @ acquiring load 2831 mov r2, rINST, lsr #8 @ r2<- AA 2832 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2833 SET_VREG(r1, r2) @ fp[AA]<- r1 2834 GET_INST_OPCODE(ip) @ extract opcode from rINST 2835 GOTO_OPCODE(ip) @ jump to next instruction 2836 2837 /* ------------------------------ */ 2838 .balign 64 2839 .L_OP_SGET_WIDE: /* 0x61 */ 2840 /* File: armv5te/OP_SGET_WIDE.S */ 2841 /* 2842 * 64-bit SGET handler. 2843 */ 2844 /* sget-wide vAA, field@BBBB */ 2845 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2846 FETCH(r1, 1) @ r1<- field ref BBBB 2847 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2848 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2849 cmp r0, #0 @ is resolved entry null? 2850 beq .LOP_SGET_WIDE_resolve @ yes, do resolve 2851 .LOP_SGET_WIDE_finish: 2852 mov r9, rINST, lsr #8 @ r9<- AA 2853 .if 0 2854 add r0, r0, #offStaticField_value @ r0<- pointer to data 2855 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 2856 .else 2857 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 2858 .endif 2859 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 2860 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2861 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 2862 GET_INST_OPCODE(ip) @ extract opcode from rINST 2863 GOTO_OPCODE(ip) @ jump to next instruction 2864 2865 /* ------------------------------ */ 2866 .balign 64 2867 .L_OP_SGET_OBJECT: /* 0x62 */ 2868 /* File: armv5te/OP_SGET_OBJECT.S */ 2869 /* File: armv5te/OP_SGET.S */ 2870 /* 2871 * General 32-bit SGET handler. 2872 * 2873 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2874 */ 2875 /* op vAA, field@BBBB */ 2876 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2877 FETCH(r1, 1) @ r1<- field ref BBBB 2878 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2879 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2880 cmp r0, #0 @ is resolved entry null? 2881 beq .LOP_SGET_OBJECT_resolve @ yes, do resolve 2882 .LOP_SGET_OBJECT_finish: @ field ptr in r0 2883 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2884 @ no-op @ acquiring load 2885 mov r2, rINST, lsr #8 @ r2<- AA 2886 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2887 SET_VREG(r1, r2) @ fp[AA]<- r1 2888 GET_INST_OPCODE(ip) @ extract opcode from rINST 2889 GOTO_OPCODE(ip) @ jump to next instruction 2890 2891 2892 /* ------------------------------ */ 2893 .balign 64 2894 .L_OP_SGET_BOOLEAN: /* 0x63 */ 2895 /* File: armv5te/OP_SGET_BOOLEAN.S */ 2896 /* File: armv5te/OP_SGET.S */ 2897 /* 2898 * General 32-bit SGET handler. 2899 * 2900 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2901 */ 2902 /* op vAA, field@BBBB */ 2903 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2904 FETCH(r1, 1) @ r1<- field ref BBBB 2905 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2906 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2907 cmp r0, #0 @ is resolved entry null? 2908 beq .LOP_SGET_BOOLEAN_resolve @ yes, do resolve 2909 .LOP_SGET_BOOLEAN_finish: @ field ptr in r0 2910 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2911 @ no-op @ acquiring load 2912 mov r2, rINST, lsr #8 @ r2<- AA 2913 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2914 SET_VREG(r1, r2) @ fp[AA]<- r1 2915 GET_INST_OPCODE(ip) @ extract opcode from rINST 2916 GOTO_OPCODE(ip) @ jump to next instruction 2917 2918 2919 /* ------------------------------ */ 2920 .balign 64 2921 .L_OP_SGET_BYTE: /* 0x64 */ 2922 /* File: armv5te/OP_SGET_BYTE.S */ 2923 /* File: armv5te/OP_SGET.S */ 2924 /* 2925 * General 32-bit SGET handler. 2926 * 2927 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2928 */ 2929 /* op vAA, field@BBBB */ 2930 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2931 FETCH(r1, 1) @ r1<- field ref BBBB 2932 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2933 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2934 cmp r0, #0 @ is resolved entry null? 2935 beq .LOP_SGET_BYTE_resolve @ yes, do resolve 2936 .LOP_SGET_BYTE_finish: @ field ptr in r0 2937 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2938 @ no-op @ acquiring load 2939 mov r2, rINST, lsr #8 @ r2<- AA 2940 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2941 SET_VREG(r1, r2) @ fp[AA]<- r1 2942 GET_INST_OPCODE(ip) @ extract opcode from rINST 2943 GOTO_OPCODE(ip) @ jump to next instruction 2944 2945 2946 /* ------------------------------ */ 2947 .balign 64 2948 .L_OP_SGET_CHAR: /* 0x65 */ 2949 /* File: armv5te/OP_SGET_CHAR.S */ 2950 /* File: armv5te/OP_SGET.S */ 2951 /* 2952 * General 32-bit SGET handler. 2953 * 2954 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2955 */ 2956 /* op vAA, field@BBBB */ 2957 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2958 FETCH(r1, 1) @ r1<- field ref BBBB 2959 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2960 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2961 cmp r0, #0 @ is resolved entry null? 2962 beq .LOP_SGET_CHAR_resolve @ yes, do resolve 2963 .LOP_SGET_CHAR_finish: @ field ptr in r0 2964 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2965 @ no-op @ acquiring load 2966 mov r2, rINST, lsr #8 @ r2<- AA 2967 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2968 SET_VREG(r1, r2) @ fp[AA]<- r1 2969 GET_INST_OPCODE(ip) @ extract opcode from rINST 2970 GOTO_OPCODE(ip) @ jump to next instruction 2971 2972 2973 /* ------------------------------ */ 2974 .balign 64 2975 .L_OP_SGET_SHORT: /* 0x66 */ 2976 /* File: armv5te/OP_SGET_SHORT.S */ 2977 /* File: armv5te/OP_SGET.S */ 2978 /* 2979 * General 32-bit SGET handler. 2980 * 2981 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 2982 */ 2983 /* op vAA, field@BBBB */ 2984 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 2985 FETCH(r1, 1) @ r1<- field ref BBBB 2986 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 2987 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 2988 cmp r0, #0 @ is resolved entry null? 2989 beq .LOP_SGET_SHORT_resolve @ yes, do resolve 2990 .LOP_SGET_SHORT_finish: @ field ptr in r0 2991 ldr r1, [r0, #offStaticField_value] @ r1<- field value 2992 @ no-op @ acquiring load 2993 mov r2, rINST, lsr #8 @ r2<- AA 2994 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 2995 SET_VREG(r1, r2) @ fp[AA]<- r1 2996 GET_INST_OPCODE(ip) @ extract opcode from rINST 2997 GOTO_OPCODE(ip) @ jump to next instruction 2998 2999 3000 /* ------------------------------ */ 3001 .balign 64 3002 .L_OP_SPUT: /* 0x67 */ 3003 /* File: armv5te/OP_SPUT.S */ 3004 /* 3005 * General 32-bit SPUT handler. 3006 * 3007 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3008 */ 3009 /* op vAA, field@BBBB */ 3010 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3011 FETCH(r1, 1) @ r1<- field ref BBBB 3012 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3013 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3014 cmp r0, #0 @ is resolved entry null? 3015 beq .LOP_SPUT_resolve @ yes, do resolve 3016 .LOP_SPUT_finish: @ field ptr in r0 3017 mov r2, rINST, lsr #8 @ r2<- AA 3018 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3019 GET_VREG(r1, r2) @ r1<- fp[AA] 3020 GET_INST_OPCODE(ip) @ extract opcode from rINST 3021 @ no-op @ releasing store 3022 str r1, [r0, #offStaticField_value] @ field<- vAA 3023 GOTO_OPCODE(ip) @ jump to next instruction 3024 3025 /* ------------------------------ */ 3026 .balign 64 3027 .L_OP_SPUT_WIDE: /* 0x68 */ 3028 /* File: armv5te/OP_SPUT_WIDE.S */ 3029 /* 3030 * 64-bit SPUT handler. 3031 */ 3032 /* sput-wide vAA, field@BBBB */ 3033 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 3034 FETCH(r1, 1) @ r1<- field ref BBBB 3035 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 3036 mov r9, rINST, lsr #8 @ r9<- AA 3037 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 3038 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 3039 cmp r2, #0 @ is resolved entry null? 3040 beq .LOP_SPUT_WIDE_resolve @ yes, do resolve 3041 .LOP_SPUT_WIDE_finish: @ field ptr in r2, AA in r9 3042 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3043 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 3044 GET_INST_OPCODE(r10) @ extract opcode from rINST 3045 .if 0 3046 add r2, r2, #offStaticField_value @ r2<- pointer to data 3047 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 3048 .else 3049 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 3050 .endif 3051 GOTO_OPCODE(r10) @ jump to next instruction 3052 3053 /* ------------------------------ */ 3054 .balign 64 3055 .L_OP_SPUT_OBJECT: /* 0x69 */ 3056 /* File: armv5te/OP_SPUT_OBJECT.S */ 3057 /* 3058 * 32-bit SPUT handler for objects 3059 * 3060 * for: sput-object, sput-object-volatile 3061 */ 3062 /* op vAA, field@BBBB */ 3063 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3064 FETCH(r1, 1) @ r1<- field ref BBBB 3065 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3066 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3067 cmp r0, #0 @ is resolved entry null? 3068 bne .LOP_SPUT_OBJECT_finish @ no, continue 3069 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3070 EXPORT_PC() @ resolve() could throw, so export now 3071 ldr r0, [r9, #offMethod_clazz] @ r0<- method->clazz 3072 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 3073 cmp r0, #0 @ success? 3074 bne .LOP_SPUT_OBJECT_finish @ yes, finish 3075 b common_exceptionThrown @ no, handle exception 3076 3077 3078 /* ------------------------------ */ 3079 .balign 64 3080 .L_OP_SPUT_BOOLEAN: /* 0x6a */ 3081 /* File: armv5te/OP_SPUT_BOOLEAN.S */ 3082 /* File: armv5te/OP_SPUT.S */ 3083 /* 3084 * General 32-bit SPUT handler. 3085 * 3086 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3087 */ 3088 /* op vAA, field@BBBB */ 3089 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3090 FETCH(r1, 1) @ r1<- field ref BBBB 3091 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3092 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3093 cmp r0, #0 @ is resolved entry null? 3094 beq .LOP_SPUT_BOOLEAN_resolve @ yes, do resolve 3095 .LOP_SPUT_BOOLEAN_finish: @ field ptr in r0 3096 mov r2, rINST, lsr #8 @ r2<- AA 3097 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3098 GET_VREG(r1, r2) @ r1<- fp[AA] 3099 GET_INST_OPCODE(ip) @ extract opcode from rINST 3100 @ no-op @ releasing store 3101 str r1, [r0, #offStaticField_value] @ field<- vAA 3102 GOTO_OPCODE(ip) @ jump to next instruction 3103 3104 3105 /* ------------------------------ */ 3106 .balign 64 3107 .L_OP_SPUT_BYTE: /* 0x6b */ 3108 /* File: armv5te/OP_SPUT_BYTE.S */ 3109 /* File: armv5te/OP_SPUT.S */ 3110 /* 3111 * General 32-bit SPUT handler. 3112 * 3113 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3114 */ 3115 /* op vAA, field@BBBB */ 3116 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3117 FETCH(r1, 1) @ r1<- field ref BBBB 3118 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3119 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3120 cmp r0, #0 @ is resolved entry null? 3121 beq .LOP_SPUT_BYTE_resolve @ yes, do resolve 3122 .LOP_SPUT_BYTE_finish: @ field ptr in r0 3123 mov r2, rINST, lsr #8 @ r2<- AA 3124 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3125 GET_VREG(r1, r2) @ r1<- fp[AA] 3126 GET_INST_OPCODE(ip) @ extract opcode from rINST 3127 @ no-op @ releasing store 3128 str r1, [r0, #offStaticField_value] @ field<- vAA 3129 GOTO_OPCODE(ip) @ jump to next instruction 3130 3131 3132 /* ------------------------------ */ 3133 .balign 64 3134 .L_OP_SPUT_CHAR: /* 0x6c */ 3135 /* File: armv5te/OP_SPUT_CHAR.S */ 3136 /* File: armv5te/OP_SPUT.S */ 3137 /* 3138 * General 32-bit SPUT handler. 3139 * 3140 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3141 */ 3142 /* op vAA, field@BBBB */ 3143 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3144 FETCH(r1, 1) @ r1<- field ref BBBB 3145 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3146 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3147 cmp r0, #0 @ is resolved entry null? 3148 beq .LOP_SPUT_CHAR_resolve @ yes, do resolve 3149 .LOP_SPUT_CHAR_finish: @ field ptr in r0 3150 mov r2, rINST, lsr #8 @ r2<- AA 3151 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3152 GET_VREG(r1, r2) @ r1<- fp[AA] 3153 GET_INST_OPCODE(ip) @ extract opcode from rINST 3154 @ no-op @ releasing store 3155 str r1, [r0, #offStaticField_value] @ field<- vAA 3156 GOTO_OPCODE(ip) @ jump to next instruction 3157 3158 3159 /* ------------------------------ */ 3160 .balign 64 3161 .L_OP_SPUT_SHORT: /* 0x6d */ 3162 /* File: armv5te/OP_SPUT_SHORT.S */ 3163 /* File: armv5te/OP_SPUT.S */ 3164 /* 3165 * General 32-bit SPUT handler. 3166 * 3167 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 3168 */ 3169 /* op vAA, field@BBBB */ 3170 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 3171 FETCH(r1, 1) @ r1<- field ref BBBB 3172 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 3173 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 3174 cmp r0, #0 @ is resolved entry null? 3175 beq .LOP_SPUT_SHORT_resolve @ yes, do resolve 3176 .LOP_SPUT_SHORT_finish: @ field ptr in r0 3177 mov r2, rINST, lsr #8 @ r2<- AA 3178 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 3179 GET_VREG(r1, r2) @ r1<- fp[AA] 3180 GET_INST_OPCODE(ip) @ extract opcode from rINST 3181 @ no-op @ releasing store 3182 str r1, [r0, #offStaticField_value] @ field<- vAA 3183 GOTO_OPCODE(ip) @ jump to next instruction 3184 3185 3186 /* ------------------------------ */ 3187 .balign 64 3188 .L_OP_INVOKE_VIRTUAL: /* 0x6e */ 3189 /* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3190 /* 3191 * Handle a virtual method call. 3192 * 3193 * for: invoke-virtual, invoke-virtual/range 3194 */ 3195 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3196 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3197 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3198 FETCH(r1, 1) @ r1<- BBBB 3199 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3200 FETCH(r10, 2) @ r10<- GFED or CCCC 3201 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3202 .if (!0) 3203 and r10, r10, #15 @ r10<- D (or stays CCCC) 3204 .endif 3205 cmp r0, #0 @ already resolved? 3206 EXPORT_PC() @ must export for invoke 3207 bne .LOP_INVOKE_VIRTUAL_continue @ yes, continue on 3208 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3209 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3210 mov r2, #METHOD_VIRTUAL @ resolver method type 3211 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3212 cmp r0, #0 @ got null? 3213 bne .LOP_INVOKE_VIRTUAL_continue @ no, continue 3214 b common_exceptionThrown @ yes, handle exception 3215 3216 /* ------------------------------ */ 3217 .balign 64 3218 .L_OP_INVOKE_SUPER: /* 0x6f */ 3219 /* File: armv5te/OP_INVOKE_SUPER.S */ 3220 /* 3221 * Handle a "super" method call. 3222 * 3223 * for: invoke-super, invoke-super/range 3224 */ 3225 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3226 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3227 FETCH(r10, 2) @ r10<- GFED or CCCC 3228 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3229 .if (!0) 3230 and r10, r10, #15 @ r10<- D (or stays CCCC) 3231 .endif 3232 FETCH(r1, 1) @ r1<- BBBB 3233 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3234 GET_VREG(r2, r10) @ r2<- "this" ptr 3235 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3236 cmp r2, #0 @ null "this"? 3237 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3238 beq common_errNullObject @ null "this", throw exception 3239 cmp r0, #0 @ already resolved? 3240 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3241 EXPORT_PC() @ must export for invoke 3242 bne .LOP_INVOKE_SUPER_continue @ resolved, continue on 3243 b .LOP_INVOKE_SUPER_resolve @ do resolve now 3244 3245 /* ------------------------------ */ 3246 .balign 64 3247 .L_OP_INVOKE_DIRECT: /* 0x70 */ 3248 /* File: armv5te/OP_INVOKE_DIRECT.S */ 3249 /* 3250 * Handle a direct method call. 3251 * 3252 * (We could defer the "is 'this' pointer null" test to the common 3253 * method invocation code, and use a flag to indicate that static 3254 * calls don't count. If we do this as part of copying the arguments 3255 * out we could avoiding loading the first arg twice.) 3256 * 3257 * for: invoke-direct, invoke-direct/range 3258 */ 3259 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3260 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3261 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3262 FETCH(r1, 1) @ r1<- BBBB 3263 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3264 FETCH(r10, 2) @ r10<- GFED or CCCC 3265 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3266 .if (!0) 3267 and r10, r10, #15 @ r10<- D (or stays CCCC) 3268 .endif 3269 cmp r0, #0 @ already resolved? 3270 EXPORT_PC() @ must export for invoke 3271 GET_VREG(r2, r10) @ r2<- "this" ptr 3272 beq .LOP_INVOKE_DIRECT_resolve @ not resolved, do it now 3273 .LOP_INVOKE_DIRECT_finish: 3274 cmp r2, #0 @ null "this" ref? 3275 bne common_invokeMethodNoRange @ no, continue on 3276 b common_errNullObject @ yes, throw exception 3277 3278 /* ------------------------------ */ 3279 .balign 64 3280 .L_OP_INVOKE_STATIC: /* 0x71 */ 3281 /* File: armv5te/OP_INVOKE_STATIC.S */ 3282 /* 3283 * Handle a static method call. 3284 * 3285 * for: invoke-static, invoke-static/range 3286 */ 3287 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3288 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3289 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3290 FETCH(r1, 1) @ r1<- BBBB 3291 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3292 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3293 cmp r0, #0 @ already resolved? 3294 EXPORT_PC() @ must export for invoke 3295 bne common_invokeMethodNoRange @ yes, continue on 3296 0: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3297 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3298 mov r2, #METHOD_STATIC @ resolver method type 3299 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3300 cmp r0, #0 @ got null? 3301 bne common_invokeMethodNoRange @ no, continue 3302 b common_exceptionThrown @ yes, handle exception 3303 3304 /* ------------------------------ */ 3305 .balign 64 3306 .L_OP_INVOKE_INTERFACE: /* 0x72 */ 3307 /* File: armv5te/OP_INVOKE_INTERFACE.S */ 3308 /* 3309 * Handle an interface method call. 3310 * 3311 * for: invoke-interface, invoke-interface/range 3312 */ 3313 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3314 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3315 FETCH(r2, 2) @ r2<- FEDC or CCCC 3316 FETCH(r1, 1) @ r1<- BBBB 3317 .if (!0) 3318 and r2, r2, #15 @ r2<- C (or stays CCCC) 3319 .endif 3320 EXPORT_PC() @ must export for invoke 3321 GET_VREG(r0, r2) @ r0<- first arg ("this") 3322 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3323 cmp r0, #0 @ null obj? 3324 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3325 beq common_errNullObject @ yes, fail 3326 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3327 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3328 cmp r0, #0 @ failed? 3329 beq common_exceptionThrown @ yes, handle exception 3330 b common_invokeMethodNoRange @ jump to common handler 3331 3332 /* ------------------------------ */ 3333 .balign 64 3334 .L_OP_UNUSED_73: /* 0x73 */ 3335 /* File: armv5te/OP_UNUSED_73.S */ 3336 /* File: armv5te/unused.S */ 3337 bl common_abort 3338 3339 3340 /* ------------------------------ */ 3341 .balign 64 3342 .L_OP_INVOKE_VIRTUAL_RANGE: /* 0x74 */ 3343 /* File: armv5te/OP_INVOKE_VIRTUAL_RANGE.S */ 3344 /* File: armv5te/OP_INVOKE_VIRTUAL.S */ 3345 /* 3346 * Handle a virtual method call. 3347 * 3348 * for: invoke-virtual, invoke-virtual/range 3349 */ 3350 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3351 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3352 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3353 FETCH(r1, 1) @ r1<- BBBB 3354 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3355 FETCH(r10, 2) @ r10<- GFED or CCCC 3356 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3357 .if (!1) 3358 and r10, r10, #15 @ r10<- D (or stays CCCC) 3359 .endif 3360 cmp r0, #0 @ already resolved? 3361 EXPORT_PC() @ must export for invoke 3362 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ yes, continue on 3363 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3364 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3365 mov r2, #METHOD_VIRTUAL @ resolver method type 3366 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3367 cmp r0, #0 @ got null? 3368 bne .LOP_INVOKE_VIRTUAL_RANGE_continue @ no, continue 3369 b common_exceptionThrown @ yes, handle exception 3370 3371 3372 /* ------------------------------ */ 3373 .balign 64 3374 .L_OP_INVOKE_SUPER_RANGE: /* 0x75 */ 3375 /* File: armv5te/OP_INVOKE_SUPER_RANGE.S */ 3376 /* File: armv5te/OP_INVOKE_SUPER.S */ 3377 /* 3378 * Handle a "super" method call. 3379 * 3380 * for: invoke-super, invoke-super/range 3381 */ 3382 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3383 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3384 FETCH(r10, 2) @ r10<- GFED or CCCC 3385 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3386 .if (!1) 3387 and r10, r10, #15 @ r10<- D (or stays CCCC) 3388 .endif 3389 FETCH(r1, 1) @ r1<- BBBB 3390 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3391 GET_VREG(r2, r10) @ r2<- "this" ptr 3392 ldr r0, [r3, r1, lsl #2] @ r0<- resolved baseMethod 3393 cmp r2, #0 @ null "this"? 3394 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 3395 beq common_errNullObject @ null "this", throw exception 3396 cmp r0, #0 @ already resolved? 3397 ldr r9, [r9, #offMethod_clazz] @ r9<- method->clazz 3398 EXPORT_PC() @ must export for invoke 3399 bne .LOP_INVOKE_SUPER_RANGE_continue @ resolved, continue on 3400 b .LOP_INVOKE_SUPER_RANGE_resolve @ do resolve now 3401 3402 3403 /* ------------------------------ */ 3404 .balign 64 3405 .L_OP_INVOKE_DIRECT_RANGE: /* 0x76 */ 3406 /* File: armv5te/OP_INVOKE_DIRECT_RANGE.S */ 3407 /* File: armv5te/OP_INVOKE_DIRECT.S */ 3408 /* 3409 * Handle a direct method call. 3410 * 3411 * (We could defer the "is 'this' pointer null" test to the common 3412 * method invocation code, and use a flag to indicate that static 3413 * calls don't count. If we do this as part of copying the arguments 3414 * out we could avoiding loading the first arg twice.) 3415 * 3416 * for: invoke-direct, invoke-direct/range 3417 */ 3418 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3419 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3420 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3421 FETCH(r1, 1) @ r1<- BBBB 3422 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3423 FETCH(r10, 2) @ r10<- GFED or CCCC 3424 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3425 .if (!1) 3426 and r10, r10, #15 @ r10<- D (or stays CCCC) 3427 .endif 3428 cmp r0, #0 @ already resolved? 3429 EXPORT_PC() @ must export for invoke 3430 GET_VREG(r2, r10) @ r2<- "this" ptr 3431 beq .LOP_INVOKE_DIRECT_RANGE_resolve @ not resolved, do it now 3432 .LOP_INVOKE_DIRECT_RANGE_finish: 3433 cmp r2, #0 @ null "this" ref? 3434 bne common_invokeMethodRange @ no, continue on 3435 b common_errNullObject @ yes, throw exception 3436 3437 3438 /* ------------------------------ */ 3439 .balign 64 3440 .L_OP_INVOKE_STATIC_RANGE: /* 0x77 */ 3441 /* File: armv5te/OP_INVOKE_STATIC_RANGE.S */ 3442 /* File: armv5te/OP_INVOKE_STATIC.S */ 3443 /* 3444 * Handle a static method call. 3445 * 3446 * for: invoke-static, invoke-static/range 3447 */ 3448 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3449 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3450 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- pDvmDex 3451 FETCH(r1, 1) @ r1<- BBBB 3452 ldr r3, [r3, #offDvmDex_pResMethods] @ r3<- pDvmDex->pResMethods 3453 ldr r0, [r3, r1, lsl #2] @ r0<- resolved methodToCall 3454 cmp r0, #0 @ already resolved? 3455 EXPORT_PC() @ must export for invoke 3456 bne common_invokeMethodRange @ yes, continue on 3457 0: ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 3458 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 3459 mov r2, #METHOD_STATIC @ resolver method type 3460 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 3461 cmp r0, #0 @ got null? 3462 bne common_invokeMethodRange @ no, continue 3463 b common_exceptionThrown @ yes, handle exception 3464 3465 3466 /* ------------------------------ */ 3467 .balign 64 3468 .L_OP_INVOKE_INTERFACE_RANGE: /* 0x78 */ 3469 /* File: armv5te/OP_INVOKE_INTERFACE_RANGE.S */ 3470 /* File: armv5te/OP_INVOKE_INTERFACE.S */ 3471 /* 3472 * Handle an interface method call. 3473 * 3474 * for: invoke-interface, invoke-interface/range 3475 */ 3476 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 3477 /* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 3478 FETCH(r2, 2) @ r2<- FEDC or CCCC 3479 FETCH(r1, 1) @ r1<- BBBB 3480 .if (!1) 3481 and r2, r2, #15 @ r2<- C (or stays CCCC) 3482 .endif 3483 EXPORT_PC() @ must export for invoke 3484 GET_VREG(r0, r2) @ r0<- first arg ("this") 3485 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- methodClassDex 3486 cmp r0, #0 @ null obj? 3487 ldr r2, [rGLUE, #offGlue_method] @ r2<- method 3488 beq common_errNullObject @ yes, fail 3489 ldr r0, [r0, #offObject_clazz] @ r0<- thisPtr->clazz 3490 bl dvmFindInterfaceMethodInCache @ r0<- call(class, ref, method, dex) 3491 cmp r0, #0 @ failed? 3492 beq common_exceptionThrown @ yes, handle exception 3493 b common_invokeMethodRange @ jump to common handler 3494 3495 3496 /* ------------------------------ */ 3497 .balign 64 3498 .L_OP_UNUSED_79: /* 0x79 */ 3499 /* File: armv5te/OP_UNUSED_79.S */ 3500 /* File: armv5te/unused.S */ 3501 bl common_abort 3502 3503 3504 /* ------------------------------ */ 3505 .balign 64 3506 .L_OP_UNUSED_7A: /* 0x7a */ 3507 /* File: armv5te/OP_UNUSED_7A.S */ 3508 /* File: armv5te/unused.S */ 3509 bl common_abort 3510 3511 3512 /* ------------------------------ */ 3513 .balign 64 3514 .L_OP_NEG_INT: /* 0x7b */ 3515 /* File: armv5te/OP_NEG_INT.S */ 3516 /* File: armv5te/unop.S */ 3517 /* 3518 * Generic 32-bit unary operation. Provide an "instr" line that 3519 * specifies an instruction that performs "result = op r0". 3520 * This could be an ARM instruction or a function call. 3521 * 3522 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3523 * int-to-byte, int-to-char, int-to-short 3524 */ 3525 /* unop vA, vB */ 3526 mov r3, rINST, lsr #12 @ r3<- B 3527 mov r9, rINST, lsr #8 @ r9<- A+ 3528 GET_VREG(r0, r3) @ r0<- vB 3529 and r9, r9, #15 3530 @ optional op; may set condition codes 3531 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3532 rsb r0, r0, #0 @ r0<- op, r0-r3 changed 3533 GET_INST_OPCODE(ip) @ extract opcode from rINST 3534 SET_VREG(r0, r9) @ vAA<- r0 3535 GOTO_OPCODE(ip) @ jump to next instruction 3536 /* 9-10 instructions */ 3537 3538 3539 /* ------------------------------ */ 3540 .balign 64 3541 .L_OP_NOT_INT: /* 0x7c */ 3542 /* File: armv5te/OP_NOT_INT.S */ 3543 /* File: armv5te/unop.S */ 3544 /* 3545 * Generic 32-bit unary operation. Provide an "instr" line that 3546 * specifies an instruction that performs "result = op r0". 3547 * This could be an ARM instruction or a function call. 3548 * 3549 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3550 * int-to-byte, int-to-char, int-to-short 3551 */ 3552 /* unop vA, vB */ 3553 mov r3, rINST, lsr #12 @ r3<- B 3554 mov r9, rINST, lsr #8 @ r9<- A+ 3555 GET_VREG(r0, r3) @ r0<- vB 3556 and r9, r9, #15 3557 @ optional op; may set condition codes 3558 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3559 mvn r0, r0 @ r0<- op, r0-r3 changed 3560 GET_INST_OPCODE(ip) @ extract opcode from rINST 3561 SET_VREG(r0, r9) @ vAA<- r0 3562 GOTO_OPCODE(ip) @ jump to next instruction 3563 /* 9-10 instructions */ 3564 3565 3566 /* ------------------------------ */ 3567 .balign 64 3568 .L_OP_NEG_LONG: /* 0x7d */ 3569 /* File: armv5te/OP_NEG_LONG.S */ 3570 /* File: armv5te/unopWide.S */ 3571 /* 3572 * Generic 64-bit unary operation. Provide an "instr" line that 3573 * specifies an instruction that performs "result = op r0/r1". 3574 * This could be an ARM instruction or a function call. 3575 * 3576 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3577 */ 3578 /* unop vA, vB */ 3579 mov r9, rINST, lsr #8 @ r9<- A+ 3580 mov r3, rINST, lsr #12 @ r3<- B 3581 and r9, r9, #15 3582 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3583 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3584 ldmia r3, {r0-r1} @ r0/r1<- vAA 3585 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3586 rsbs r0, r0, #0 @ optional op; may set condition codes 3587 rsc r1, r1, #0 @ r0/r1<- op, r2-r3 changed 3588 GET_INST_OPCODE(ip) @ extract opcode from rINST 3589 stmia r9, {r0-r1} @ vAA<- r0/r1 3590 GOTO_OPCODE(ip) @ jump to next instruction 3591 /* 12-13 instructions */ 3592 3593 3594 /* ------------------------------ */ 3595 .balign 64 3596 .L_OP_NOT_LONG: /* 0x7e */ 3597 /* File: armv5te/OP_NOT_LONG.S */ 3598 /* File: armv5te/unopWide.S */ 3599 /* 3600 * Generic 64-bit unary operation. Provide an "instr" line that 3601 * specifies an instruction that performs "result = op r0/r1". 3602 * This could be an ARM instruction or a function call. 3603 * 3604 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3605 */ 3606 /* unop vA, vB */ 3607 mov r9, rINST, lsr #8 @ r9<- A+ 3608 mov r3, rINST, lsr #12 @ r3<- B 3609 and r9, r9, #15 3610 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3611 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3612 ldmia r3, {r0-r1} @ r0/r1<- vAA 3613 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3614 mvn r0, r0 @ optional op; may set condition codes 3615 mvn r1, r1 @ r0/r1<- op, r2-r3 changed 3616 GET_INST_OPCODE(ip) @ extract opcode from rINST 3617 stmia r9, {r0-r1} @ vAA<- r0/r1 3618 GOTO_OPCODE(ip) @ jump to next instruction 3619 /* 12-13 instructions */ 3620 3621 3622 /* ------------------------------ */ 3623 .balign 64 3624 .L_OP_NEG_FLOAT: /* 0x7f */ 3625 /* File: armv5te/OP_NEG_FLOAT.S */ 3626 /* File: armv5te/unop.S */ 3627 /* 3628 * Generic 32-bit unary operation. Provide an "instr" line that 3629 * specifies an instruction that performs "result = op r0". 3630 * This could be an ARM instruction or a function call. 3631 * 3632 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3633 * int-to-byte, int-to-char, int-to-short 3634 */ 3635 /* unop vA, vB */ 3636 mov r3, rINST, lsr #12 @ r3<- B 3637 mov r9, rINST, lsr #8 @ r9<- A+ 3638 GET_VREG(r0, r3) @ r0<- vB 3639 and r9, r9, #15 3640 @ optional op; may set condition codes 3641 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3642 add r0, r0, #0x80000000 @ r0<- op, r0-r3 changed 3643 GET_INST_OPCODE(ip) @ extract opcode from rINST 3644 SET_VREG(r0, r9) @ vAA<- r0 3645 GOTO_OPCODE(ip) @ jump to next instruction 3646 /* 9-10 instructions */ 3647 3648 3649 /* ------------------------------ */ 3650 .balign 64 3651 .L_OP_NEG_DOUBLE: /* 0x80 */ 3652 /* File: armv5te/OP_NEG_DOUBLE.S */ 3653 /* File: armv5te/unopWide.S */ 3654 /* 3655 * Generic 64-bit unary operation. Provide an "instr" line that 3656 * specifies an instruction that performs "result = op r0/r1". 3657 * This could be an ARM instruction or a function call. 3658 * 3659 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3660 */ 3661 /* unop vA, vB */ 3662 mov r9, rINST, lsr #8 @ r9<- A+ 3663 mov r3, rINST, lsr #12 @ r3<- B 3664 and r9, r9, #15 3665 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3666 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3667 ldmia r3, {r0-r1} @ r0/r1<- vAA 3668 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3669 @ optional op; may set condition codes 3670 add r1, r1, #0x80000000 @ r0/r1<- op, r2-r3 changed 3671 GET_INST_OPCODE(ip) @ extract opcode from rINST 3672 stmia r9, {r0-r1} @ vAA<- r0/r1 3673 GOTO_OPCODE(ip) @ jump to next instruction 3674 /* 12-13 instructions */ 3675 3676 3677 /* ------------------------------ */ 3678 .balign 64 3679 .L_OP_INT_TO_LONG: /* 0x81 */ 3680 /* File: armv5te/OP_INT_TO_LONG.S */ 3681 /* File: armv5te/unopWider.S */ 3682 /* 3683 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3684 * that specifies an instruction that performs "result = op r0", where 3685 * "result" is a 64-bit quantity in r0/r1. 3686 * 3687 * For: int-to-long, int-to-double, float-to-long, float-to-double 3688 */ 3689 /* unop vA, vB */ 3690 mov r9, rINST, lsr #8 @ r9<- A+ 3691 mov r3, rINST, lsr #12 @ r3<- B 3692 and r9, r9, #15 3693 GET_VREG(r0, r3) @ r0<- vB 3694 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3695 @ optional op; may set condition codes 3696 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3697 mov r1, r0, asr #31 @ r0<- op, r0-r3 changed 3698 GET_INST_OPCODE(ip) @ extract opcode from rINST 3699 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3700 GOTO_OPCODE(ip) @ jump to next instruction 3701 /* 10-11 instructions */ 3702 3703 3704 /* ------------------------------ */ 3705 .balign 64 3706 .L_OP_INT_TO_FLOAT: /* 0x82 */ 3707 /* File: armv5te/OP_INT_TO_FLOAT.S */ 3708 /* File: armv5te/unop.S */ 3709 /* 3710 * Generic 32-bit unary operation. Provide an "instr" line that 3711 * specifies an instruction that performs "result = op r0". 3712 * This could be an ARM instruction or a function call. 3713 * 3714 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3715 * int-to-byte, int-to-char, int-to-short 3716 */ 3717 /* unop vA, vB */ 3718 mov r3, rINST, lsr #12 @ r3<- B 3719 mov r9, rINST, lsr #8 @ r9<- A+ 3720 GET_VREG(r0, r3) @ r0<- vB 3721 and r9, r9, #15 3722 @ optional op; may set condition codes 3723 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3724 bl __aeabi_i2f @ r0<- op, r0-r3 changed 3725 GET_INST_OPCODE(ip) @ extract opcode from rINST 3726 SET_VREG(r0, r9) @ vAA<- r0 3727 GOTO_OPCODE(ip) @ jump to next instruction 3728 /* 9-10 instructions */ 3729 3730 3731 /* ------------------------------ */ 3732 .balign 64 3733 .L_OP_INT_TO_DOUBLE: /* 0x83 */ 3734 /* File: armv5te/OP_INT_TO_DOUBLE.S */ 3735 /* File: armv5te/unopWider.S */ 3736 /* 3737 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3738 * that specifies an instruction that performs "result = op r0", where 3739 * "result" is a 64-bit quantity in r0/r1. 3740 * 3741 * For: int-to-long, int-to-double, float-to-long, float-to-double 3742 */ 3743 /* unop vA, vB */ 3744 mov r9, rINST, lsr #8 @ r9<- A+ 3745 mov r3, rINST, lsr #12 @ r3<- B 3746 and r9, r9, #15 3747 GET_VREG(r0, r3) @ r0<- vB 3748 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3749 @ optional op; may set condition codes 3750 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3751 bl __aeabi_i2d @ r0<- op, r0-r3 changed 3752 GET_INST_OPCODE(ip) @ extract opcode from rINST 3753 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3754 GOTO_OPCODE(ip) @ jump to next instruction 3755 /* 10-11 instructions */ 3756 3757 3758 /* ------------------------------ */ 3759 .balign 64 3760 .L_OP_LONG_TO_INT: /* 0x84 */ 3761 /* File: armv5te/OP_LONG_TO_INT.S */ 3762 /* we ignore the high word, making this equivalent to a 32-bit reg move */ 3763 /* File: armv5te/OP_MOVE.S */ 3764 /* for move, move-object, long-to-int */ 3765 /* op vA, vB */ 3766 mov r1, rINST, lsr #12 @ r1<- B from 15:12 3767 mov r0, rINST, lsr #8 @ r0<- A from 11:8 3768 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3769 GET_VREG(r2, r1) @ r2<- fp[B] 3770 and r0, r0, #15 3771 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 3772 SET_VREG(r2, r0) @ fp[A]<- r2 3773 GOTO_OPCODE(ip) @ execute next instruction 3774 3775 3776 /* ------------------------------ */ 3777 .balign 64 3778 .L_OP_LONG_TO_FLOAT: /* 0x85 */ 3779 /* File: armv5te/OP_LONG_TO_FLOAT.S */ 3780 /* File: armv5te/unopNarrower.S */ 3781 /* 3782 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3783 * that specifies an instruction that performs "result = op r0/r1", where 3784 * "result" is a 32-bit quantity in r0. 3785 * 3786 * For: long-to-float, double-to-int, double-to-float 3787 * 3788 * (This would work for long-to-int, but that instruction is actually 3789 * an exact match for OP_MOVE.) 3790 */ 3791 /* unop vA, vB */ 3792 mov r3, rINST, lsr #12 @ r3<- B 3793 mov r9, rINST, lsr #8 @ r9<- A+ 3794 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3795 and r9, r9, #15 3796 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3797 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3798 @ optional op; may set condition codes 3799 bl __aeabi_l2f @ r0<- op, r0-r3 changed 3800 GET_INST_OPCODE(ip) @ extract opcode from rINST 3801 SET_VREG(r0, r9) @ vA<- r0 3802 GOTO_OPCODE(ip) @ jump to next instruction 3803 /* 10-11 instructions */ 3804 3805 3806 /* ------------------------------ */ 3807 .balign 64 3808 .L_OP_LONG_TO_DOUBLE: /* 0x86 */ 3809 /* File: armv5te/OP_LONG_TO_DOUBLE.S */ 3810 /* File: armv5te/unopWide.S */ 3811 /* 3812 * Generic 64-bit unary operation. Provide an "instr" line that 3813 * specifies an instruction that performs "result = op r0/r1". 3814 * This could be an ARM instruction or a function call. 3815 * 3816 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 3817 */ 3818 /* unop vA, vB */ 3819 mov r9, rINST, lsr #8 @ r9<- A+ 3820 mov r3, rINST, lsr #12 @ r3<- B 3821 and r9, r9, #15 3822 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3823 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3824 ldmia r3, {r0-r1} @ r0/r1<- vAA 3825 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3826 @ optional op; may set condition codes 3827 bl __aeabi_l2d @ r0/r1<- op, r2-r3 changed 3828 GET_INST_OPCODE(ip) @ extract opcode from rINST 3829 stmia r9, {r0-r1} @ vAA<- r0/r1 3830 GOTO_OPCODE(ip) @ jump to next instruction 3831 /* 12-13 instructions */ 3832 3833 3834 /* ------------------------------ */ 3835 .balign 64 3836 .L_OP_FLOAT_TO_INT: /* 0x87 */ 3837 /* File: armv5te/OP_FLOAT_TO_INT.S */ 3838 /* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3839 /* File: armv5te/unop.S */ 3840 /* 3841 * Generic 32-bit unary operation. Provide an "instr" line that 3842 * specifies an instruction that performs "result = op r0". 3843 * This could be an ARM instruction or a function call. 3844 * 3845 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 3846 * int-to-byte, int-to-char, int-to-short 3847 */ 3848 /* unop vA, vB */ 3849 mov r3, rINST, lsr #12 @ r3<- B 3850 mov r9, rINST, lsr #8 @ r9<- A+ 3851 GET_VREG(r0, r3) @ r0<- vB 3852 and r9, r9, #15 3853 @ optional op; may set condition codes 3854 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3855 bl __aeabi_f2iz @ r0<- op, r0-r3 changed 3856 GET_INST_OPCODE(ip) @ extract opcode from rINST 3857 SET_VREG(r0, r9) @ vAA<- r0 3858 GOTO_OPCODE(ip) @ jump to next instruction 3859 /* 9-10 instructions */ 3860 3861 3862 #if 0 3863 @include "armv5te/unop.S" {"instr":"bl f2i_doconv"} 3864 @break 3865 /* 3866 * Convert the float in r0 to an int in r0. 3867 * 3868 * We have to clip values to int min/max per the specification. The 3869 * expected common case is a "reasonable" value that converts directly 3870 * to modest integer. The EABI convert function isn't doing this for us. 3871 */ 3872 f2i_doconv: 3873 stmfd sp!, {r4, lr} 3874 mov r1, #0x4f000000 @ (float)maxint 3875 mov r4, r0 3876 bl __aeabi_fcmpge @ is arg >= maxint? 3877 cmp r0, #0 @ nonzero == yes 3878 mvnne r0, #0x80000000 @ return maxint (7fffffff) 3879 ldmnefd sp!, {r4, pc} 3880 3881 mov r0, r4 @ recover arg 3882 mov r1, #0xcf000000 @ (float)minint 3883 bl __aeabi_fcmple @ is arg <= minint? 3884 cmp r0, #0 @ nonzero == yes 3885 movne r0, #0x80000000 @ return minint (80000000) 3886 ldmnefd sp!, {r4, pc} 3887 3888 mov r0, r4 @ recover arg 3889 mov r1, r4 3890 bl __aeabi_fcmpeq @ is arg == self? 3891 cmp r0, #0 @ zero == no 3892 ldmeqfd sp!, {r4, pc} @ return zero for NaN 3893 3894 mov r0, r4 @ recover arg 3895 bl __aeabi_f2iz @ convert float to int 3896 ldmfd sp!, {r4, pc} 3897 #endif 3898 3899 /* ------------------------------ */ 3900 .balign 64 3901 .L_OP_FLOAT_TO_LONG: /* 0x88 */ 3902 /* File: armv5te/OP_FLOAT_TO_LONG.S */ 3903 @include "armv5te/unopWider.S" {"instr":"bl __aeabi_f2lz"} 3904 /* File: armv5te/unopWider.S */ 3905 /* 3906 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3907 * that specifies an instruction that performs "result = op r0", where 3908 * "result" is a 64-bit quantity in r0/r1. 3909 * 3910 * For: int-to-long, int-to-double, float-to-long, float-to-double 3911 */ 3912 /* unop vA, vB */ 3913 mov r9, rINST, lsr #8 @ r9<- A+ 3914 mov r3, rINST, lsr #12 @ r3<- B 3915 and r9, r9, #15 3916 GET_VREG(r0, r3) @ r0<- vB 3917 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3918 @ optional op; may set condition codes 3919 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3920 bl f2l_doconv @ r0<- op, r0-r3 changed 3921 GET_INST_OPCODE(ip) @ extract opcode from rINST 3922 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3923 GOTO_OPCODE(ip) @ jump to next instruction 3924 /* 10-11 instructions */ 3925 3926 3927 3928 /* ------------------------------ */ 3929 .balign 64 3930 .L_OP_FLOAT_TO_DOUBLE: /* 0x89 */ 3931 /* File: armv5te/OP_FLOAT_TO_DOUBLE.S */ 3932 /* File: armv5te/unopWider.S */ 3933 /* 3934 * Generic 32bit-to-64bit unary operation. Provide an "instr" line 3935 * that specifies an instruction that performs "result = op r0", where 3936 * "result" is a 64-bit quantity in r0/r1. 3937 * 3938 * For: int-to-long, int-to-double, float-to-long, float-to-double 3939 */ 3940 /* unop vA, vB */ 3941 mov r9, rINST, lsr #8 @ r9<- A+ 3942 mov r3, rINST, lsr #12 @ r3<- B 3943 and r9, r9, #15 3944 GET_VREG(r0, r3) @ r0<- vB 3945 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 3946 @ optional op; may set condition codes 3947 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3948 bl __aeabi_f2d @ r0<- op, r0-r3 changed 3949 GET_INST_OPCODE(ip) @ extract opcode from rINST 3950 stmia r9, {r0-r1} @ vA/vA+1<- r0/r1 3951 GOTO_OPCODE(ip) @ jump to next instruction 3952 /* 10-11 instructions */ 3953 3954 3955 /* ------------------------------ */ 3956 .balign 64 3957 .L_OP_DOUBLE_TO_INT: /* 0x8a */ 3958 /* File: armv5te/OP_DOUBLE_TO_INT.S */ 3959 /* EABI appears to have Java-style conversions of +inf/-inf/NaN */ 3960 /* File: armv5te/unopNarrower.S */ 3961 /* 3962 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 3963 * that specifies an instruction that performs "result = op r0/r1", where 3964 * "result" is a 32-bit quantity in r0. 3965 * 3966 * For: long-to-float, double-to-int, double-to-float 3967 * 3968 * (This would work for long-to-int, but that instruction is actually 3969 * an exact match for OP_MOVE.) 3970 */ 3971 /* unop vA, vB */ 3972 mov r3, rINST, lsr #12 @ r3<- B 3973 mov r9, rINST, lsr #8 @ r9<- A+ 3974 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 3975 and r9, r9, #15 3976 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 3977 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 3978 @ optional op; may set condition codes 3979 bl __aeabi_d2iz @ r0<- op, r0-r3 changed 3980 GET_INST_OPCODE(ip) @ extract opcode from rINST 3981 SET_VREG(r0, r9) @ vA<- r0 3982 GOTO_OPCODE(ip) @ jump to next instruction 3983 /* 10-11 instructions */ 3984 3985 3986 #if 0 3987 @include "armv5te/unopNarrower.S" {"instr":"bl d2i_doconv"} 3988 @break 3989 /* 3990 * Convert the double in r0/r1 to an int in r0. 3991 * 3992 * We have to clip values to int min/max per the specification. The 3993 * expected common case is a "reasonable" value that converts directly 3994 * to modest integer. The EABI convert function isn't doing this for us. 3995 */ 3996 d2i_doconv: 3997 stmfd sp!, {r4, r5, lr} @ save regs 3998 mov r2, #0x80000000 @ maxint, as a double (low word) 3999 mov r2, r2, asr #9 @ 0xffc00000 4000 sub sp, sp, #4 @ align for EABI 4001 mvn r3, #0xbe000000 @ maxint, as a double (high word) 4002 sub r3, r3, #0x00200000 @ 0x41dfffff 4003 mov r4, r0 @ save a copy of r0 4004 mov r5, r1 @ and r1 4005 bl __aeabi_dcmpge @ is arg >= maxint? 4006 cmp r0, #0 @ nonzero == yes 4007 mvnne r0, #0x80000000 @ return maxint (0x7fffffff) 4008 bne 1f 4009 4010 mov r0, r4 @ recover arg 4011 mov r1, r5 4012 mov r3, #0xc1000000 @ minint, as a double (high word) 4013 add r3, r3, #0x00e00000 @ 0xc1e00000 4014 mov r2, #0 @ minint, as a double (low word) 4015 bl __aeabi_dcmple @ is arg <= minint? 4016 cmp r0, #0 @ nonzero == yes 4017 movne r0, #0x80000000 @ return minint (80000000) 4018 bne 1f 4019 4020 mov r0, r4 @ recover arg 4021 mov r1, r5 4022 mov r2, r4 @ compare against self 4023 mov r3, r5 4024 bl __aeabi_dcmpeq @ is arg == self? 4025 cmp r0, #0 @ zero == no 4026 beq 1f @ return zero for NaN 4027 4028 mov r0, r4 @ recover arg 4029 mov r1, r5 4030 bl __aeabi_d2iz @ convert double to int 4031 4032 1: 4033 add sp, sp, #4 4034 ldmfd sp!, {r4, r5, pc} 4035 #endif 4036 4037 /* ------------------------------ */ 4038 .balign 64 4039 .L_OP_DOUBLE_TO_LONG: /* 0x8b */ 4040 /* File: armv5te/OP_DOUBLE_TO_LONG.S */ 4041 @include "armv5te/unopWide.S" {"instr":"bl __aeabi_d2lz"} 4042 /* File: armv5te/unopWide.S */ 4043 /* 4044 * Generic 64-bit unary operation. Provide an "instr" line that 4045 * specifies an instruction that performs "result = op r0/r1". 4046 * This could be an ARM instruction or a function call. 4047 * 4048 * For: neg-long, not-long, neg-double, long-to-double, double-to-long 4049 */ 4050 /* unop vA, vB */ 4051 mov r9, rINST, lsr #8 @ r9<- A+ 4052 mov r3, rINST, lsr #12 @ r3<- B 4053 and r9, r9, #15 4054 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4055 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 4056 ldmia r3, {r0-r1} @ r0/r1<- vAA 4057 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4058 @ optional op; may set condition codes 4059 bl d2l_doconv @ r0/r1<- op, r2-r3 changed 4060 GET_INST_OPCODE(ip) @ extract opcode from rINST 4061 stmia r9, {r0-r1} @ vAA<- r0/r1 4062 GOTO_OPCODE(ip) @ jump to next instruction 4063 /* 12-13 instructions */ 4064 4065 4066 4067 /* ------------------------------ */ 4068 .balign 64 4069 .L_OP_DOUBLE_TO_FLOAT: /* 0x8c */ 4070 /* File: armv5te/OP_DOUBLE_TO_FLOAT.S */ 4071 /* File: armv5te/unopNarrower.S */ 4072 /* 4073 * Generic 64bit-to-32bit unary operation. Provide an "instr" line 4074 * that specifies an instruction that performs "result = op r0/r1", where 4075 * "result" is a 32-bit quantity in r0. 4076 * 4077 * For: long-to-float, double-to-int, double-to-float 4078 * 4079 * (This would work for long-to-int, but that instruction is actually 4080 * an exact match for OP_MOVE.) 4081 */ 4082 /* unop vA, vB */ 4083 mov r3, rINST, lsr #12 @ r3<- B 4084 mov r9, rINST, lsr #8 @ r9<- A+ 4085 add r3, rFP, r3, lsl #2 @ r3<- &fp[B] 4086 and r9, r9, #15 4087 ldmia r3, {r0-r1} @ r0/r1<- vB/vB+1 4088 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4089 @ optional op; may set condition codes 4090 bl __aeabi_d2f @ r0<- op, r0-r3 changed 4091 GET_INST_OPCODE(ip) @ extract opcode from rINST 4092 SET_VREG(r0, r9) @ vA<- r0 4093 GOTO_OPCODE(ip) @ jump to next instruction 4094 /* 10-11 instructions */ 4095 4096 4097 /* ------------------------------ */ 4098 .balign 64 4099 .L_OP_INT_TO_BYTE: /* 0x8d */ 4100 /* File: armv5te/OP_INT_TO_BYTE.S */ 4101 /* File: armv5te/unop.S */ 4102 /* 4103 * Generic 32-bit unary operation. Provide an "instr" line that 4104 * specifies an instruction that performs "result = op r0". 4105 * This could be an ARM instruction or a function call. 4106 * 4107 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4108 * int-to-byte, int-to-char, int-to-short 4109 */ 4110 /* unop vA, vB */ 4111 mov r3, rINST, lsr #12 @ r3<- B 4112 mov r9, rINST, lsr #8 @ r9<- A+ 4113 GET_VREG(r0, r3) @ r0<- vB 4114 and r9, r9, #15 4115 mov r0, r0, asl #24 @ optional op; may set condition codes 4116 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4117 mov r0, r0, asr #24 @ r0<- op, r0-r3 changed 4118 GET_INST_OPCODE(ip) @ extract opcode from rINST 4119 SET_VREG(r0, r9) @ vAA<- r0 4120 GOTO_OPCODE(ip) @ jump to next instruction 4121 /* 9-10 instructions */ 4122 4123 4124 /* ------------------------------ */ 4125 .balign 64 4126 .L_OP_INT_TO_CHAR: /* 0x8e */ 4127 /* File: armv5te/OP_INT_TO_CHAR.S */ 4128 /* File: armv5te/unop.S */ 4129 /* 4130 * Generic 32-bit unary operation. Provide an "instr" line that 4131 * specifies an instruction that performs "result = op r0". 4132 * This could be an ARM instruction or a function call. 4133 * 4134 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4135 * int-to-byte, int-to-char, int-to-short 4136 */ 4137 /* unop vA, vB */ 4138 mov r3, rINST, lsr #12 @ r3<- B 4139 mov r9, rINST, lsr #8 @ r9<- A+ 4140 GET_VREG(r0, r3) @ r0<- vB 4141 and r9, r9, #15 4142 mov r0, r0, asl #16 @ optional op; may set condition codes 4143 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4144 mov r0, r0, lsr #16 @ r0<- op, r0-r3 changed 4145 GET_INST_OPCODE(ip) @ extract opcode from rINST 4146 SET_VREG(r0, r9) @ vAA<- r0 4147 GOTO_OPCODE(ip) @ jump to next instruction 4148 /* 9-10 instructions */ 4149 4150 4151 /* ------------------------------ */ 4152 .balign 64 4153 .L_OP_INT_TO_SHORT: /* 0x8f */ 4154 /* File: armv5te/OP_INT_TO_SHORT.S */ 4155 /* File: armv5te/unop.S */ 4156 /* 4157 * Generic 32-bit unary operation. Provide an "instr" line that 4158 * specifies an instruction that performs "result = op r0". 4159 * This could be an ARM instruction or a function call. 4160 * 4161 * for: neg-int, not-int, neg-float, int-to-float, float-to-int, 4162 * int-to-byte, int-to-char, int-to-short 4163 */ 4164 /* unop vA, vB */ 4165 mov r3, rINST, lsr #12 @ r3<- B 4166 mov r9, rINST, lsr #8 @ r9<- A+ 4167 GET_VREG(r0, r3) @ r0<- vB 4168 and r9, r9, #15 4169 mov r0, r0, asl #16 @ optional op; may set condition codes 4170 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 4171 mov r0, r0, asr #16 @ r0<- op, r0-r3 changed 4172 GET_INST_OPCODE(ip) @ extract opcode from rINST 4173 SET_VREG(r0, r9) @ vAA<- r0 4174 GOTO_OPCODE(ip) @ jump to next instruction 4175 /* 9-10 instructions */ 4176 4177 4178 /* ------------------------------ */ 4179 .balign 64 4180 .L_OP_ADD_INT: /* 0x90 */ 4181 /* File: armv5te/OP_ADD_INT.S */ 4182 /* File: armv5te/binop.S */ 4183 /* 4184 * Generic 32-bit binary operation. Provide an "instr" line that 4185 * specifies an instruction that performs "result = r0 op r1". 4186 * This could be an ARM instruction or a function call. (If the result 4187 * comes back in a register other than r0, you can override "result".) 4188 * 4189 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4190 * vCC (r1). Useful for integer division and modulus. Note that we 4191 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4192 * handles it correctly. 4193 * 4194 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4195 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4196 * mul-float, div-float, rem-float 4197 */ 4198 /* binop vAA, vBB, vCC */ 4199 FETCH(r0, 1) @ r0<- CCBB 4200 mov r9, rINST, lsr #8 @ r9<- AA 4201 mov r3, r0, lsr #8 @ r3<- CC 4202 and r2, r0, #255 @ r2<- BB 4203 GET_VREG(r1, r3) @ r1<- vCC 4204 GET_VREG(r0, r2) @ r0<- vBB 4205 .if 0 4206 cmp r1, #0 @ is second operand zero? 4207 beq common_errDivideByZero 4208 .endif 4209 4210 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4211 @ optional op; may set condition codes 4212 add r0, r0, r1 @ r0<- op, r0-r3 changed 4213 GET_INST_OPCODE(ip) @ extract opcode from rINST 4214 SET_VREG(r0, r9) @ vAA<- r0 4215 GOTO_OPCODE(ip) @ jump to next instruction 4216 /* 11-14 instructions */ 4217 4218 4219 /* ------------------------------ */ 4220 .balign 64 4221 .L_OP_SUB_INT: /* 0x91 */ 4222 /* File: armv5te/OP_SUB_INT.S */ 4223 /* File: armv5te/binop.S */ 4224 /* 4225 * Generic 32-bit binary operation. Provide an "instr" line that 4226 * specifies an instruction that performs "result = r0 op r1". 4227 * This could be an ARM instruction or a function call. (If the result 4228 * comes back in a register other than r0, you can override "result".) 4229 * 4230 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4231 * vCC (r1). Useful for integer division and modulus. Note that we 4232 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4233 * handles it correctly. 4234 * 4235 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4236 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4237 * mul-float, div-float, rem-float 4238 */ 4239 /* binop vAA, vBB, vCC */ 4240 FETCH(r0, 1) @ r0<- CCBB 4241 mov r9, rINST, lsr #8 @ r9<- AA 4242 mov r3, r0, lsr #8 @ r3<- CC 4243 and r2, r0, #255 @ r2<- BB 4244 GET_VREG(r1, r3) @ r1<- vCC 4245 GET_VREG(r0, r2) @ r0<- vBB 4246 .if 0 4247 cmp r1, #0 @ is second operand zero? 4248 beq common_errDivideByZero 4249 .endif 4250 4251 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4252 @ optional op; may set condition codes 4253 sub r0, r0, r1 @ r0<- op, r0-r3 changed 4254 GET_INST_OPCODE(ip) @ extract opcode from rINST 4255 SET_VREG(r0, r9) @ vAA<- r0 4256 GOTO_OPCODE(ip) @ jump to next instruction 4257 /* 11-14 instructions */ 4258 4259 4260 /* ------------------------------ */ 4261 .balign 64 4262 .L_OP_MUL_INT: /* 0x92 */ 4263 /* File: armv5te/OP_MUL_INT.S */ 4264 /* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 4265 /* File: armv5te/binop.S */ 4266 /* 4267 * Generic 32-bit binary operation. Provide an "instr" line that 4268 * specifies an instruction that performs "result = r0 op r1". 4269 * This could be an ARM instruction or a function call. (If the result 4270 * comes back in a register other than r0, you can override "result".) 4271 * 4272 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4273 * vCC (r1). Useful for integer division and modulus. Note that we 4274 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4275 * handles it correctly. 4276 * 4277 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4278 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4279 * mul-float, div-float, rem-float 4280 */ 4281 /* binop vAA, vBB, vCC */ 4282 FETCH(r0, 1) @ r0<- CCBB 4283 mov r9, rINST, lsr #8 @ r9<- AA 4284 mov r3, r0, lsr #8 @ r3<- CC 4285 and r2, r0, #255 @ r2<- BB 4286 GET_VREG(r1, r3) @ r1<- vCC 4287 GET_VREG(r0, r2) @ r0<- vBB 4288 .if 0 4289 cmp r1, #0 @ is second operand zero? 4290 beq common_errDivideByZero 4291 .endif 4292 4293 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4294 @ optional op; may set condition codes 4295 mul r0, r1, r0 @ r0<- op, r0-r3 changed 4296 GET_INST_OPCODE(ip) @ extract opcode from rINST 4297 SET_VREG(r0, r9) @ vAA<- r0 4298 GOTO_OPCODE(ip) @ jump to next instruction 4299 /* 11-14 instructions */ 4300 4301 4302 /* ------------------------------ */ 4303 .balign 64 4304 .L_OP_DIV_INT: /* 0x93 */ 4305 /* File: armv5te/OP_DIV_INT.S */ 4306 /* File: armv5te/binop.S */ 4307 /* 4308 * Generic 32-bit binary operation. Provide an "instr" line that 4309 * specifies an instruction that performs "result = r0 op r1". 4310 * This could be an ARM instruction or a function call. (If the result 4311 * comes back in a register other than r0, you can override "result".) 4312 * 4313 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4314 * vCC (r1). Useful for integer division and modulus. Note that we 4315 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4316 * handles it correctly. 4317 * 4318 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4319 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4320 * mul-float, div-float, rem-float 4321 */ 4322 /* binop vAA, vBB, vCC */ 4323 FETCH(r0, 1) @ r0<- CCBB 4324 mov r9, rINST, lsr #8 @ r9<- AA 4325 mov r3, r0, lsr #8 @ r3<- CC 4326 and r2, r0, #255 @ r2<- BB 4327 GET_VREG(r1, r3) @ r1<- vCC 4328 GET_VREG(r0, r2) @ r0<- vBB 4329 .if 1 4330 cmp r1, #0 @ is second operand zero? 4331 beq common_errDivideByZero 4332 .endif 4333 4334 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4335 @ optional op; may set condition codes 4336 bl __aeabi_idiv @ r0<- op, r0-r3 changed 4337 GET_INST_OPCODE(ip) @ extract opcode from rINST 4338 SET_VREG(r0, r9) @ vAA<- r0 4339 GOTO_OPCODE(ip) @ jump to next instruction 4340 /* 11-14 instructions */ 4341 4342 4343 /* ------------------------------ */ 4344 .balign 64 4345 .L_OP_REM_INT: /* 0x94 */ 4346 /* File: armv5te/OP_REM_INT.S */ 4347 /* idivmod returns quotient in r0 and remainder in r1 */ 4348 /* File: armv5te/binop.S */ 4349 /* 4350 * Generic 32-bit binary operation. Provide an "instr" line that 4351 * specifies an instruction that performs "result = r0 op r1". 4352 * This could be an ARM instruction or a function call. (If the result 4353 * comes back in a register other than r0, you can override "result".) 4354 * 4355 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4356 * vCC (r1). Useful for integer division and modulus. Note that we 4357 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4358 * handles it correctly. 4359 * 4360 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4361 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4362 * mul-float, div-float, rem-float 4363 */ 4364 /* binop vAA, vBB, vCC */ 4365 FETCH(r0, 1) @ r0<- CCBB 4366 mov r9, rINST, lsr #8 @ r9<- AA 4367 mov r3, r0, lsr #8 @ r3<- CC 4368 and r2, r0, #255 @ r2<- BB 4369 GET_VREG(r1, r3) @ r1<- vCC 4370 GET_VREG(r0, r2) @ r0<- vBB 4371 .if 1 4372 cmp r1, #0 @ is second operand zero? 4373 beq common_errDivideByZero 4374 .endif 4375 4376 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4377 @ optional op; may set condition codes 4378 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 4379 GET_INST_OPCODE(ip) @ extract opcode from rINST 4380 SET_VREG(r1, r9) @ vAA<- r1 4381 GOTO_OPCODE(ip) @ jump to next instruction 4382 /* 11-14 instructions */ 4383 4384 4385 /* ------------------------------ */ 4386 .balign 64 4387 .L_OP_AND_INT: /* 0x95 */ 4388 /* File: armv5te/OP_AND_INT.S */ 4389 /* File: armv5te/binop.S */ 4390 /* 4391 * Generic 32-bit binary operation. Provide an "instr" line that 4392 * specifies an instruction that performs "result = r0 op r1". 4393 * This could be an ARM instruction or a function call. (If the result 4394 * comes back in a register other than r0, you can override "result".) 4395 * 4396 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4397 * vCC (r1). Useful for integer division and modulus. Note that we 4398 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4399 * handles it correctly. 4400 * 4401 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4402 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4403 * mul-float, div-float, rem-float 4404 */ 4405 /* binop vAA, vBB, vCC */ 4406 FETCH(r0, 1) @ r0<- CCBB 4407 mov r9, rINST, lsr #8 @ r9<- AA 4408 mov r3, r0, lsr #8 @ r3<- CC 4409 and r2, r0, #255 @ r2<- BB 4410 GET_VREG(r1, r3) @ r1<- vCC 4411 GET_VREG(r0, r2) @ r0<- vBB 4412 .if 0 4413 cmp r1, #0 @ is second operand zero? 4414 beq common_errDivideByZero 4415 .endif 4416 4417 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4418 @ optional op; may set condition codes 4419 and r0, r0, r1 @ r0<- op, r0-r3 changed 4420 GET_INST_OPCODE(ip) @ extract opcode from rINST 4421 SET_VREG(r0, r9) @ vAA<- r0 4422 GOTO_OPCODE(ip) @ jump to next instruction 4423 /* 11-14 instructions */ 4424 4425 4426 /* ------------------------------ */ 4427 .balign 64 4428 .L_OP_OR_INT: /* 0x96 */ 4429 /* File: armv5te/OP_OR_INT.S */ 4430 /* File: armv5te/binop.S */ 4431 /* 4432 * Generic 32-bit binary operation. Provide an "instr" line that 4433 * specifies an instruction that performs "result = r0 op r1". 4434 * This could be an ARM instruction or a function call. (If the result 4435 * comes back in a register other than r0, you can override "result".) 4436 * 4437 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4438 * vCC (r1). Useful for integer division and modulus. Note that we 4439 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4440 * handles it correctly. 4441 * 4442 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4443 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4444 * mul-float, div-float, rem-float 4445 */ 4446 /* binop vAA, vBB, vCC */ 4447 FETCH(r0, 1) @ r0<- CCBB 4448 mov r9, rINST, lsr #8 @ r9<- AA 4449 mov r3, r0, lsr #8 @ r3<- CC 4450 and r2, r0, #255 @ r2<- BB 4451 GET_VREG(r1, r3) @ r1<- vCC 4452 GET_VREG(r0, r2) @ r0<- vBB 4453 .if 0 4454 cmp r1, #0 @ is second operand zero? 4455 beq common_errDivideByZero 4456 .endif 4457 4458 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4459 @ optional op; may set condition codes 4460 orr r0, r0, r1 @ r0<- op, r0-r3 changed 4461 GET_INST_OPCODE(ip) @ extract opcode from rINST 4462 SET_VREG(r0, r9) @ vAA<- r0 4463 GOTO_OPCODE(ip) @ jump to next instruction 4464 /* 11-14 instructions */ 4465 4466 4467 /* ------------------------------ */ 4468 .balign 64 4469 .L_OP_XOR_INT: /* 0x97 */ 4470 /* File: armv5te/OP_XOR_INT.S */ 4471 /* File: armv5te/binop.S */ 4472 /* 4473 * Generic 32-bit binary operation. Provide an "instr" line that 4474 * specifies an instruction that performs "result = r0 op r1". 4475 * This could be an ARM instruction or a function call. (If the result 4476 * comes back in a register other than r0, you can override "result".) 4477 * 4478 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4479 * vCC (r1). Useful for integer division and modulus. Note that we 4480 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4481 * handles it correctly. 4482 * 4483 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4484 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4485 * mul-float, div-float, rem-float 4486 */ 4487 /* binop vAA, vBB, vCC */ 4488 FETCH(r0, 1) @ r0<- CCBB 4489 mov r9, rINST, lsr #8 @ r9<- AA 4490 mov r3, r0, lsr #8 @ r3<- CC 4491 and r2, r0, #255 @ r2<- BB 4492 GET_VREG(r1, r3) @ r1<- vCC 4493 GET_VREG(r0, r2) @ r0<- vBB 4494 .if 0 4495 cmp r1, #0 @ is second operand zero? 4496 beq common_errDivideByZero 4497 .endif 4498 4499 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4500 @ optional op; may set condition codes 4501 eor r0, r0, r1 @ r0<- op, r0-r3 changed 4502 GET_INST_OPCODE(ip) @ extract opcode from rINST 4503 SET_VREG(r0, r9) @ vAA<- r0 4504 GOTO_OPCODE(ip) @ jump to next instruction 4505 /* 11-14 instructions */ 4506 4507 4508 /* ------------------------------ */ 4509 .balign 64 4510 .L_OP_SHL_INT: /* 0x98 */ 4511 /* File: armv5te/OP_SHL_INT.S */ 4512 /* File: armv5te/binop.S */ 4513 /* 4514 * Generic 32-bit binary operation. Provide an "instr" line that 4515 * specifies an instruction that performs "result = r0 op r1". 4516 * This could be an ARM instruction or a function call. (If the result 4517 * comes back in a register other than r0, you can override "result".) 4518 * 4519 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4520 * vCC (r1). Useful for integer division and modulus. Note that we 4521 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4522 * handles it correctly. 4523 * 4524 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4525 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4526 * mul-float, div-float, rem-float 4527 */ 4528 /* binop vAA, vBB, vCC */ 4529 FETCH(r0, 1) @ r0<- CCBB 4530 mov r9, rINST, lsr #8 @ r9<- AA 4531 mov r3, r0, lsr #8 @ r3<- CC 4532 and r2, r0, #255 @ r2<- BB 4533 GET_VREG(r1, r3) @ r1<- vCC 4534 GET_VREG(r0, r2) @ r0<- vBB 4535 .if 0 4536 cmp r1, #0 @ is second operand zero? 4537 beq common_errDivideByZero 4538 .endif 4539 4540 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4541 and r1, r1, #31 @ optional op; may set condition codes 4542 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 4543 GET_INST_OPCODE(ip) @ extract opcode from rINST 4544 SET_VREG(r0, r9) @ vAA<- r0 4545 GOTO_OPCODE(ip) @ jump to next instruction 4546 /* 11-14 instructions */ 4547 4548 4549 /* ------------------------------ */ 4550 .balign 64 4551 .L_OP_SHR_INT: /* 0x99 */ 4552 /* File: armv5te/OP_SHR_INT.S */ 4553 /* File: armv5te/binop.S */ 4554 /* 4555 * Generic 32-bit binary operation. Provide an "instr" line that 4556 * specifies an instruction that performs "result = r0 op r1". 4557 * This could be an ARM instruction or a function call. (If the result 4558 * comes back in a register other than r0, you can override "result".) 4559 * 4560 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4561 * vCC (r1). Useful for integer division and modulus. Note that we 4562 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4563 * handles it correctly. 4564 * 4565 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4566 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4567 * mul-float, div-float, rem-float 4568 */ 4569 /* binop vAA, vBB, vCC */ 4570 FETCH(r0, 1) @ r0<- CCBB 4571 mov r9, rINST, lsr #8 @ r9<- AA 4572 mov r3, r0, lsr #8 @ r3<- CC 4573 and r2, r0, #255 @ r2<- BB 4574 GET_VREG(r1, r3) @ r1<- vCC 4575 GET_VREG(r0, r2) @ r0<- vBB 4576 .if 0 4577 cmp r1, #0 @ is second operand zero? 4578 beq common_errDivideByZero 4579 .endif 4580 4581 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4582 and r1, r1, #31 @ optional op; may set condition codes 4583 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 4584 GET_INST_OPCODE(ip) @ extract opcode from rINST 4585 SET_VREG(r0, r9) @ vAA<- r0 4586 GOTO_OPCODE(ip) @ jump to next instruction 4587 /* 11-14 instructions */ 4588 4589 4590 /* ------------------------------ */ 4591 .balign 64 4592 .L_OP_USHR_INT: /* 0x9a */ 4593 /* File: armv5te/OP_USHR_INT.S */ 4594 /* File: armv5te/binop.S */ 4595 /* 4596 * Generic 32-bit binary operation. Provide an "instr" line that 4597 * specifies an instruction that performs "result = r0 op r1". 4598 * This could be an ARM instruction or a function call. (If the result 4599 * comes back in a register other than r0, you can override "result".) 4600 * 4601 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4602 * vCC (r1). Useful for integer division and modulus. Note that we 4603 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 4604 * handles it correctly. 4605 * 4606 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 4607 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 4608 * mul-float, div-float, rem-float 4609 */ 4610 /* binop vAA, vBB, vCC */ 4611 FETCH(r0, 1) @ r0<- CCBB 4612 mov r9, rINST, lsr #8 @ r9<- AA 4613 mov r3, r0, lsr #8 @ r3<- CC 4614 and r2, r0, #255 @ r2<- BB 4615 GET_VREG(r1, r3) @ r1<- vCC 4616 GET_VREG(r0, r2) @ r0<- vBB 4617 .if 0 4618 cmp r1, #0 @ is second operand zero? 4619 beq common_errDivideByZero 4620 .endif 4621 4622 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4623 and r1, r1, #31 @ optional op; may set condition codes 4624 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 4625 GET_INST_OPCODE(ip) @ extract opcode from rINST 4626 SET_VREG(r0, r9) @ vAA<- r0 4627 GOTO_OPCODE(ip) @ jump to next instruction 4628 /* 11-14 instructions */ 4629 4630 4631 /* ------------------------------ */ 4632 .balign 64 4633 .L_OP_ADD_LONG: /* 0x9b */ 4634 /* File: armv5te/OP_ADD_LONG.S */ 4635 /* File: armv5te/binopWide.S */ 4636 /* 4637 * Generic 64-bit binary operation. Provide an "instr" line that 4638 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4639 * This could be an ARM instruction or a function call. (If the result 4640 * comes back in a register other than r0, you can override "result".) 4641 * 4642 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4643 * vCC (r1). Useful for integer division and modulus. 4644 * 4645 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4646 * xor-long, add-double, sub-double, mul-double, div-double, 4647 * rem-double 4648 * 4649 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4650 */ 4651 /* binop vAA, vBB, vCC */ 4652 FETCH(r0, 1) @ r0<- CCBB 4653 mov r9, rINST, lsr #8 @ r9<- AA 4654 and r2, r0, #255 @ r2<- BB 4655 mov r3, r0, lsr #8 @ r3<- CC 4656 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4657 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4658 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4659 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4660 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4661 .if 0 4662 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4663 beq common_errDivideByZero 4664 .endif 4665 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4666 4667 adds r0, r0, r2 @ optional op; may set condition codes 4668 adc r1, r1, r3 @ result<- op, r0-r3 changed 4669 GET_INST_OPCODE(ip) @ extract opcode from rINST 4670 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4671 GOTO_OPCODE(ip) @ jump to next instruction 4672 /* 14-17 instructions */ 4673 4674 4675 /* ------------------------------ */ 4676 .balign 64 4677 .L_OP_SUB_LONG: /* 0x9c */ 4678 /* File: armv5te/OP_SUB_LONG.S */ 4679 /* File: armv5te/binopWide.S */ 4680 /* 4681 * Generic 64-bit binary operation. Provide an "instr" line that 4682 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4683 * This could be an ARM instruction or a function call. (If the result 4684 * comes back in a register other than r0, you can override "result".) 4685 * 4686 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4687 * vCC (r1). Useful for integer division and modulus. 4688 * 4689 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4690 * xor-long, add-double, sub-double, mul-double, div-double, 4691 * rem-double 4692 * 4693 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4694 */ 4695 /* binop vAA, vBB, vCC */ 4696 FETCH(r0, 1) @ r0<- CCBB 4697 mov r9, rINST, lsr #8 @ r9<- AA 4698 and r2, r0, #255 @ r2<- BB 4699 mov r3, r0, lsr #8 @ r3<- CC 4700 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4701 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4702 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4703 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4704 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4705 .if 0 4706 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4707 beq common_errDivideByZero 4708 .endif 4709 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4710 4711 subs r0, r0, r2 @ optional op; may set condition codes 4712 sbc r1, r1, r3 @ result<- op, r0-r3 changed 4713 GET_INST_OPCODE(ip) @ extract opcode from rINST 4714 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4715 GOTO_OPCODE(ip) @ jump to next instruction 4716 /* 14-17 instructions */ 4717 4718 4719 /* ------------------------------ */ 4720 .balign 64 4721 .L_OP_MUL_LONG: /* 0x9d */ 4722 /* File: armv5te/OP_MUL_LONG.S */ 4723 /* 4724 * Signed 64-bit integer multiply. 4725 * 4726 * Consider WXxYZ (r1r0 x r3r2) with a long multiply: 4727 * WX 4728 * x YZ 4729 * -------- 4730 * ZW ZX 4731 * YW YX 4732 * 4733 * The low word of the result holds ZX, the high word holds 4734 * (ZW+YX) + (the high overflow from ZX). YW doesn't matter because 4735 * it doesn't fit in the low 64 bits. 4736 * 4737 * Unlike most ARM math operations, multiply instructions have 4738 * restrictions on using the same register more than once (Rd and Rm 4739 * cannot be the same). 4740 */ 4741 /* mul-long vAA, vBB, vCC */ 4742 FETCH(r0, 1) @ r0<- CCBB 4743 and r2, r0, #255 @ r2<- BB 4744 mov r3, r0, lsr #8 @ r3<- CC 4745 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4746 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4747 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4748 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4749 mul ip, r2, r1 @ ip<- ZxW 4750 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 4751 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 4752 mov r0, rINST, lsr #8 @ r0<- AA 4753 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 4754 add r0, rFP, r0, lsl #2 @ r0<- &fp[AA] 4755 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4756 b .LOP_MUL_LONG_finish 4757 4758 /* ------------------------------ */ 4759 .balign 64 4760 .L_OP_DIV_LONG: /* 0x9e */ 4761 /* File: armv5te/OP_DIV_LONG.S */ 4762 /* File: armv5te/binopWide.S */ 4763 /* 4764 * Generic 64-bit binary operation. Provide an "instr" line that 4765 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4766 * This could be an ARM instruction or a function call. (If the result 4767 * comes back in a register other than r0, you can override "result".) 4768 * 4769 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4770 * vCC (r1). Useful for integer division and modulus. 4771 * 4772 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4773 * xor-long, add-double, sub-double, mul-double, div-double, 4774 * rem-double 4775 * 4776 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4777 */ 4778 /* binop vAA, vBB, vCC */ 4779 FETCH(r0, 1) @ r0<- CCBB 4780 mov r9, rINST, lsr #8 @ r9<- AA 4781 and r2, r0, #255 @ r2<- BB 4782 mov r3, r0, lsr #8 @ r3<- CC 4783 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4784 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4785 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4786 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4787 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4788 .if 1 4789 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4790 beq common_errDivideByZero 4791 .endif 4792 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4793 4794 @ optional op; may set condition codes 4795 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4796 GET_INST_OPCODE(ip) @ extract opcode from rINST 4797 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4798 GOTO_OPCODE(ip) @ jump to next instruction 4799 /* 14-17 instructions */ 4800 4801 4802 /* ------------------------------ */ 4803 .balign 64 4804 .L_OP_REM_LONG: /* 0x9f */ 4805 /* File: armv5te/OP_REM_LONG.S */ 4806 /* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 4807 /* File: armv5te/binopWide.S */ 4808 /* 4809 * Generic 64-bit binary operation. Provide an "instr" line that 4810 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4811 * This could be an ARM instruction or a function call. (If the result 4812 * comes back in a register other than r0, you can override "result".) 4813 * 4814 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4815 * vCC (r1). Useful for integer division and modulus. 4816 * 4817 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4818 * xor-long, add-double, sub-double, mul-double, div-double, 4819 * rem-double 4820 * 4821 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4822 */ 4823 /* binop vAA, vBB, vCC */ 4824 FETCH(r0, 1) @ r0<- CCBB 4825 mov r9, rINST, lsr #8 @ r9<- AA 4826 and r2, r0, #255 @ r2<- BB 4827 mov r3, r0, lsr #8 @ r3<- CC 4828 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4829 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4830 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4831 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4832 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4833 .if 1 4834 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4835 beq common_errDivideByZero 4836 .endif 4837 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4838 4839 @ optional op; may set condition codes 4840 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 4841 GET_INST_OPCODE(ip) @ extract opcode from rINST 4842 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 4843 GOTO_OPCODE(ip) @ jump to next instruction 4844 /* 14-17 instructions */ 4845 4846 4847 /* ------------------------------ */ 4848 .balign 64 4849 .L_OP_AND_LONG: /* 0xa0 */ 4850 /* File: armv5te/OP_AND_LONG.S */ 4851 /* File: armv5te/binopWide.S */ 4852 /* 4853 * Generic 64-bit binary operation. Provide an "instr" line that 4854 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4855 * This could be an ARM instruction or a function call. (If the result 4856 * comes back in a register other than r0, you can override "result".) 4857 * 4858 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4859 * vCC (r1). Useful for integer division and modulus. 4860 * 4861 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4862 * xor-long, add-double, sub-double, mul-double, div-double, 4863 * rem-double 4864 * 4865 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4866 */ 4867 /* binop vAA, vBB, vCC */ 4868 FETCH(r0, 1) @ r0<- CCBB 4869 mov r9, rINST, lsr #8 @ r9<- AA 4870 and r2, r0, #255 @ r2<- BB 4871 mov r3, r0, lsr #8 @ r3<- CC 4872 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4873 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4874 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4875 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4876 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4877 .if 0 4878 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4879 beq common_errDivideByZero 4880 .endif 4881 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4882 4883 and r0, r0, r2 @ optional op; may set condition codes 4884 and r1, r1, r3 @ result<- op, r0-r3 changed 4885 GET_INST_OPCODE(ip) @ extract opcode from rINST 4886 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4887 GOTO_OPCODE(ip) @ jump to next instruction 4888 /* 14-17 instructions */ 4889 4890 4891 /* ------------------------------ */ 4892 .balign 64 4893 .L_OP_OR_LONG: /* 0xa1 */ 4894 /* File: armv5te/OP_OR_LONG.S */ 4895 /* File: armv5te/binopWide.S */ 4896 /* 4897 * Generic 64-bit binary operation. Provide an "instr" line that 4898 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4899 * This could be an ARM instruction or a function call. (If the result 4900 * comes back in a register other than r0, you can override "result".) 4901 * 4902 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4903 * vCC (r1). Useful for integer division and modulus. 4904 * 4905 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4906 * xor-long, add-double, sub-double, mul-double, div-double, 4907 * rem-double 4908 * 4909 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4910 */ 4911 /* binop vAA, vBB, vCC */ 4912 FETCH(r0, 1) @ r0<- CCBB 4913 mov r9, rINST, lsr #8 @ r9<- AA 4914 and r2, r0, #255 @ r2<- BB 4915 mov r3, r0, lsr #8 @ r3<- CC 4916 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4917 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4918 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4919 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4920 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4921 .if 0 4922 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4923 beq common_errDivideByZero 4924 .endif 4925 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4926 4927 orr r0, r0, r2 @ optional op; may set condition codes 4928 orr r1, r1, r3 @ result<- op, r0-r3 changed 4929 GET_INST_OPCODE(ip) @ extract opcode from rINST 4930 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4931 GOTO_OPCODE(ip) @ jump to next instruction 4932 /* 14-17 instructions */ 4933 4934 4935 /* ------------------------------ */ 4936 .balign 64 4937 .L_OP_XOR_LONG: /* 0xa2 */ 4938 /* File: armv5te/OP_XOR_LONG.S */ 4939 /* File: armv5te/binopWide.S */ 4940 /* 4941 * Generic 64-bit binary operation. Provide an "instr" line that 4942 * specifies an instruction that performs "result = r0-r1 op r2-r3". 4943 * This could be an ARM instruction or a function call. (If the result 4944 * comes back in a register other than r0, you can override "result".) 4945 * 4946 * If "chkzero" is set to 1, we perform a divide-by-zero check on 4947 * vCC (r1). Useful for integer division and modulus. 4948 * 4949 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 4950 * xor-long, add-double, sub-double, mul-double, div-double, 4951 * rem-double 4952 * 4953 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 4954 */ 4955 /* binop vAA, vBB, vCC */ 4956 FETCH(r0, 1) @ r0<- CCBB 4957 mov r9, rINST, lsr #8 @ r9<- AA 4958 and r2, r0, #255 @ r2<- BB 4959 mov r3, r0, lsr #8 @ r3<- CC 4960 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4961 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 4962 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 4963 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 4964 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 4965 .if 0 4966 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 4967 beq common_errDivideByZero 4968 .endif 4969 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 4970 4971 eor r0, r0, r2 @ optional op; may set condition codes 4972 eor r1, r1, r3 @ result<- op, r0-r3 changed 4973 GET_INST_OPCODE(ip) @ extract opcode from rINST 4974 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 4975 GOTO_OPCODE(ip) @ jump to next instruction 4976 /* 14-17 instructions */ 4977 4978 4979 /* ------------------------------ */ 4980 .balign 64 4981 .L_OP_SHL_LONG: /* 0xa3 */ 4982 /* File: armv5te/OP_SHL_LONG.S */ 4983 /* 4984 * Long integer shift. This is different from the generic 32/64-bit 4985 * binary operations because vAA/vBB are 64-bit but vCC (the shift 4986 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 4987 * 6 bits of the shift distance. 4988 */ 4989 /* shl-long vAA, vBB, vCC */ 4990 FETCH(r0, 1) @ r0<- CCBB 4991 mov r9, rINST, lsr #8 @ r9<- AA 4992 and r3, r0, #255 @ r3<- BB 4993 mov r0, r0, lsr #8 @ r0<- CC 4994 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 4995 GET_VREG(r2, r0) @ r2<- vCC 4996 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 4997 and r2, r2, #63 @ r2<- r2 & 0x3f 4998 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 4999 5000 mov r1, r1, asl r2 @ r1<- r1 << r2 5001 rsb r3, r2, #32 @ r3<- 32 - r2 5002 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 5003 subs ip, r2, #32 @ ip<- r2 - 32 5004 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 5005 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5006 b .LOP_SHL_LONG_finish 5007 5008 /* ------------------------------ */ 5009 .balign 64 5010 .L_OP_SHR_LONG: /* 0xa4 */ 5011 /* File: armv5te/OP_SHR_LONG.S */ 5012 /* 5013 * Long integer shift. This is different from the generic 32/64-bit 5014 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5015 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5016 * 6 bits of the shift distance. 5017 */ 5018 /* shr-long vAA, vBB, vCC */ 5019 FETCH(r0, 1) @ r0<- CCBB 5020 mov r9, rINST, lsr #8 @ r9<- AA 5021 and r3, r0, #255 @ r3<- BB 5022 mov r0, r0, lsr #8 @ r0<- CC 5023 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5024 GET_VREG(r2, r0) @ r2<- vCC 5025 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5026 and r2, r2, #63 @ r0<- r0 & 0x3f 5027 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5028 5029 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5030 rsb r3, r2, #32 @ r3<- 32 - r2 5031 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5032 subs ip, r2, #32 @ ip<- r2 - 32 5033 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 5034 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5035 b .LOP_SHR_LONG_finish 5036 5037 /* ------------------------------ */ 5038 .balign 64 5039 .L_OP_USHR_LONG: /* 0xa5 */ 5040 /* File: armv5te/OP_USHR_LONG.S */ 5041 /* 5042 * Long integer shift. This is different from the generic 32/64-bit 5043 * binary operations because vAA/vBB are 64-bit but vCC (the shift 5044 * distance) is 32-bit. Also, Dalvik requires us to mask off the low 5045 * 6 bits of the shift distance. 5046 */ 5047 /* ushr-long vAA, vBB, vCC */ 5048 FETCH(r0, 1) @ r0<- CCBB 5049 mov r9, rINST, lsr #8 @ r9<- AA 5050 and r3, r0, #255 @ r3<- BB 5051 mov r0, r0, lsr #8 @ r0<- CC 5052 add r3, rFP, r3, lsl #2 @ r3<- &fp[BB] 5053 GET_VREG(r2, r0) @ r2<- vCC 5054 ldmia r3, {r0-r1} @ r0/r1<- vBB/vBB+1 5055 and r2, r2, #63 @ r0<- r0 & 0x3f 5056 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5057 5058 mov r0, r0, lsr r2 @ r0<- r2 >> r2 5059 rsb r3, r2, #32 @ r3<- 32 - r2 5060 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 5061 subs ip, r2, #32 @ ip<- r2 - 32 5062 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 5063 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5064 b .LOP_USHR_LONG_finish 5065 5066 /* ------------------------------ */ 5067 .balign 64 5068 .L_OP_ADD_FLOAT: /* 0xa6 */ 5069 /* File: armv5te/OP_ADD_FLOAT.S */ 5070 /* File: armv5te/binop.S */ 5071 /* 5072 * Generic 32-bit binary operation. Provide an "instr" line that 5073 * specifies an instruction that performs "result = r0 op r1". 5074 * This could be an ARM instruction or a function call. (If the result 5075 * comes back in a register other than r0, you can override "result".) 5076 * 5077 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5078 * vCC (r1). Useful for integer division and modulus. Note that we 5079 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5080 * handles it correctly. 5081 * 5082 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5083 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5084 * mul-float, div-float, rem-float 5085 */ 5086 /* binop vAA, vBB, vCC */ 5087 FETCH(r0, 1) @ r0<- CCBB 5088 mov r9, rINST, lsr #8 @ r9<- AA 5089 mov r3, r0, lsr #8 @ r3<- CC 5090 and r2, r0, #255 @ r2<- BB 5091 GET_VREG(r1, r3) @ r1<- vCC 5092 GET_VREG(r0, r2) @ r0<- vBB 5093 .if 0 5094 cmp r1, #0 @ is second operand zero? 5095 beq common_errDivideByZero 5096 .endif 5097 5098 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5099 @ optional op; may set condition codes 5100 bl __aeabi_fadd @ r0<- op, r0-r3 changed 5101 GET_INST_OPCODE(ip) @ extract opcode from rINST 5102 SET_VREG(r0, r9) @ vAA<- r0 5103 GOTO_OPCODE(ip) @ jump to next instruction 5104 /* 11-14 instructions */ 5105 5106 5107 /* ------------------------------ */ 5108 .balign 64 5109 .L_OP_SUB_FLOAT: /* 0xa7 */ 5110 /* File: armv5te/OP_SUB_FLOAT.S */ 5111 /* File: armv5te/binop.S */ 5112 /* 5113 * Generic 32-bit binary operation. Provide an "instr" line that 5114 * specifies an instruction that performs "result = r0 op r1". 5115 * This could be an ARM instruction or a function call. (If the result 5116 * comes back in a register other than r0, you can override "result".) 5117 * 5118 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5119 * vCC (r1). Useful for integer division and modulus. Note that we 5120 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5121 * handles it correctly. 5122 * 5123 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5124 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5125 * mul-float, div-float, rem-float 5126 */ 5127 /* binop vAA, vBB, vCC */ 5128 FETCH(r0, 1) @ r0<- CCBB 5129 mov r9, rINST, lsr #8 @ r9<- AA 5130 mov r3, r0, lsr #8 @ r3<- CC 5131 and r2, r0, #255 @ r2<- BB 5132 GET_VREG(r1, r3) @ r1<- vCC 5133 GET_VREG(r0, r2) @ r0<- vBB 5134 .if 0 5135 cmp r1, #0 @ is second operand zero? 5136 beq common_errDivideByZero 5137 .endif 5138 5139 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5140 @ optional op; may set condition codes 5141 bl __aeabi_fsub @ r0<- op, r0-r3 changed 5142 GET_INST_OPCODE(ip) @ extract opcode from rINST 5143 SET_VREG(r0, r9) @ vAA<- r0 5144 GOTO_OPCODE(ip) @ jump to next instruction 5145 /* 11-14 instructions */ 5146 5147 5148 /* ------------------------------ */ 5149 .balign 64 5150 .L_OP_MUL_FLOAT: /* 0xa8 */ 5151 /* File: armv5te/OP_MUL_FLOAT.S */ 5152 /* File: armv5te/binop.S */ 5153 /* 5154 * Generic 32-bit binary operation. Provide an "instr" line that 5155 * specifies an instruction that performs "result = r0 op r1". 5156 * This could be an ARM instruction or a function call. (If the result 5157 * comes back in a register other than r0, you can override "result".) 5158 * 5159 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5160 * vCC (r1). Useful for integer division and modulus. Note that we 5161 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5162 * handles it correctly. 5163 * 5164 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5165 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5166 * mul-float, div-float, rem-float 5167 */ 5168 /* binop vAA, vBB, vCC */ 5169 FETCH(r0, 1) @ r0<- CCBB 5170 mov r9, rINST, lsr #8 @ r9<- AA 5171 mov r3, r0, lsr #8 @ r3<- CC 5172 and r2, r0, #255 @ r2<- BB 5173 GET_VREG(r1, r3) @ r1<- vCC 5174 GET_VREG(r0, r2) @ r0<- vBB 5175 .if 0 5176 cmp r1, #0 @ is second operand zero? 5177 beq common_errDivideByZero 5178 .endif 5179 5180 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5181 @ optional op; may set condition codes 5182 bl __aeabi_fmul @ r0<- op, r0-r3 changed 5183 GET_INST_OPCODE(ip) @ extract opcode from rINST 5184 SET_VREG(r0, r9) @ vAA<- r0 5185 GOTO_OPCODE(ip) @ jump to next instruction 5186 /* 11-14 instructions */ 5187 5188 5189 /* ------------------------------ */ 5190 .balign 64 5191 .L_OP_DIV_FLOAT: /* 0xa9 */ 5192 /* File: armv5te/OP_DIV_FLOAT.S */ 5193 /* File: armv5te/binop.S */ 5194 /* 5195 * Generic 32-bit binary operation. Provide an "instr" line that 5196 * specifies an instruction that performs "result = r0 op r1". 5197 * This could be an ARM instruction or a function call. (If the result 5198 * comes back in a register other than r0, you can override "result".) 5199 * 5200 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5201 * vCC (r1). Useful for integer division and modulus. Note that we 5202 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5203 * handles it correctly. 5204 * 5205 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5206 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5207 * mul-float, div-float, rem-float 5208 */ 5209 /* binop vAA, vBB, vCC */ 5210 FETCH(r0, 1) @ r0<- CCBB 5211 mov r9, rINST, lsr #8 @ r9<- AA 5212 mov r3, r0, lsr #8 @ r3<- CC 5213 and r2, r0, #255 @ r2<- BB 5214 GET_VREG(r1, r3) @ r1<- vCC 5215 GET_VREG(r0, r2) @ r0<- vBB 5216 .if 0 5217 cmp r1, #0 @ is second operand zero? 5218 beq common_errDivideByZero 5219 .endif 5220 5221 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5222 @ optional op; may set condition codes 5223 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 5224 GET_INST_OPCODE(ip) @ extract opcode from rINST 5225 SET_VREG(r0, r9) @ vAA<- r0 5226 GOTO_OPCODE(ip) @ jump to next instruction 5227 /* 11-14 instructions */ 5228 5229 5230 /* ------------------------------ */ 5231 .balign 64 5232 .L_OP_REM_FLOAT: /* 0xaa */ 5233 /* File: armv5te/OP_REM_FLOAT.S */ 5234 /* EABI doesn't define a float remainder function, but libm does */ 5235 /* File: armv5te/binop.S */ 5236 /* 5237 * Generic 32-bit binary operation. Provide an "instr" line that 5238 * specifies an instruction that performs "result = r0 op r1". 5239 * This could be an ARM instruction or a function call. (If the result 5240 * comes back in a register other than r0, you can override "result".) 5241 * 5242 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5243 * vCC (r1). Useful for integer division and modulus. Note that we 5244 * *don't* check for (INT_MIN / -1) here, because the ARM math lib 5245 * handles it correctly. 5246 * 5247 * For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int, 5248 * xor-int, shl-int, shr-int, ushr-int, add-float, sub-float, 5249 * mul-float, div-float, rem-float 5250 */ 5251 /* binop vAA, vBB, vCC */ 5252 FETCH(r0, 1) @ r0<- CCBB 5253 mov r9, rINST, lsr #8 @ r9<- AA 5254 mov r3, r0, lsr #8 @ r3<- CC 5255 and r2, r0, #255 @ r2<- BB 5256 GET_VREG(r1, r3) @ r1<- vCC 5257 GET_VREG(r0, r2) @ r0<- vBB 5258 .if 0 5259 cmp r1, #0 @ is second operand zero? 5260 beq common_errDivideByZero 5261 .endif 5262 5263 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5264 @ optional op; may set condition codes 5265 bl fmodf @ r0<- op, r0-r3 changed 5266 GET_INST_OPCODE(ip) @ extract opcode from rINST 5267 SET_VREG(r0, r9) @ vAA<- r0 5268 GOTO_OPCODE(ip) @ jump to next instruction 5269 /* 11-14 instructions */ 5270 5271 5272 /* ------------------------------ */ 5273 .balign 64 5274 .L_OP_ADD_DOUBLE: /* 0xab */ 5275 /* File: armv5te/OP_ADD_DOUBLE.S */ 5276 /* File: armv5te/binopWide.S */ 5277 /* 5278 * Generic 64-bit binary operation. Provide an "instr" line that 5279 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5280 * This could be an ARM instruction or a function call. (If the result 5281 * comes back in a register other than r0, you can override "result".) 5282 * 5283 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5284 * vCC (r1). Useful for integer division and modulus. 5285 * 5286 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5287 * xor-long, add-double, sub-double, mul-double, div-double, 5288 * rem-double 5289 * 5290 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5291 */ 5292 /* binop vAA, vBB, vCC */ 5293 FETCH(r0, 1) @ r0<- CCBB 5294 mov r9, rINST, lsr #8 @ r9<- AA 5295 and r2, r0, #255 @ r2<- BB 5296 mov r3, r0, lsr #8 @ r3<- CC 5297 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5298 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5299 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5300 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5301 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5302 .if 0 5303 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5304 beq common_errDivideByZero 5305 .endif 5306 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5307 5308 @ optional op; may set condition codes 5309 bl __aeabi_dadd @ result<- op, r0-r3 changed 5310 GET_INST_OPCODE(ip) @ extract opcode from rINST 5311 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5312 GOTO_OPCODE(ip) @ jump to next instruction 5313 /* 14-17 instructions */ 5314 5315 5316 /* ------------------------------ */ 5317 .balign 64 5318 .L_OP_SUB_DOUBLE: /* 0xac */ 5319 /* File: armv5te/OP_SUB_DOUBLE.S */ 5320 /* File: armv5te/binopWide.S */ 5321 /* 5322 * Generic 64-bit binary operation. Provide an "instr" line that 5323 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5324 * This could be an ARM instruction or a function call. (If the result 5325 * comes back in a register other than r0, you can override "result".) 5326 * 5327 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5328 * vCC (r1). Useful for integer division and modulus. 5329 * 5330 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5331 * xor-long, add-double, sub-double, mul-double, div-double, 5332 * rem-double 5333 * 5334 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5335 */ 5336 /* binop vAA, vBB, vCC */ 5337 FETCH(r0, 1) @ r0<- CCBB 5338 mov r9, rINST, lsr #8 @ r9<- AA 5339 and r2, r0, #255 @ r2<- BB 5340 mov r3, r0, lsr #8 @ r3<- CC 5341 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5342 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5343 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5344 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5345 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5346 .if 0 5347 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5348 beq common_errDivideByZero 5349 .endif 5350 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5351 5352 @ optional op; may set condition codes 5353 bl __aeabi_dsub @ result<- op, r0-r3 changed 5354 GET_INST_OPCODE(ip) @ extract opcode from rINST 5355 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5356 GOTO_OPCODE(ip) @ jump to next instruction 5357 /* 14-17 instructions */ 5358 5359 5360 /* ------------------------------ */ 5361 .balign 64 5362 .L_OP_MUL_DOUBLE: /* 0xad */ 5363 /* File: armv5te/OP_MUL_DOUBLE.S */ 5364 /* File: armv5te/binopWide.S */ 5365 /* 5366 * Generic 64-bit binary operation. Provide an "instr" line that 5367 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5368 * This could be an ARM instruction or a function call. (If the result 5369 * comes back in a register other than r0, you can override "result".) 5370 * 5371 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5372 * vCC (r1). Useful for integer division and modulus. 5373 * 5374 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5375 * xor-long, add-double, sub-double, mul-double, div-double, 5376 * rem-double 5377 * 5378 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5379 */ 5380 /* binop vAA, vBB, vCC */ 5381 FETCH(r0, 1) @ r0<- CCBB 5382 mov r9, rINST, lsr #8 @ r9<- AA 5383 and r2, r0, #255 @ r2<- BB 5384 mov r3, r0, lsr #8 @ r3<- CC 5385 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5386 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5387 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5388 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5389 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5390 .if 0 5391 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5392 beq common_errDivideByZero 5393 .endif 5394 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5395 5396 @ optional op; may set condition codes 5397 bl __aeabi_dmul @ result<- op, r0-r3 changed 5398 GET_INST_OPCODE(ip) @ extract opcode from rINST 5399 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5400 GOTO_OPCODE(ip) @ jump to next instruction 5401 /* 14-17 instructions */ 5402 5403 5404 /* ------------------------------ */ 5405 .balign 64 5406 .L_OP_DIV_DOUBLE: /* 0xae */ 5407 /* File: armv5te/OP_DIV_DOUBLE.S */ 5408 /* File: armv5te/binopWide.S */ 5409 /* 5410 * Generic 64-bit binary operation. Provide an "instr" line that 5411 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5412 * This could be an ARM instruction or a function call. (If the result 5413 * comes back in a register other than r0, you can override "result".) 5414 * 5415 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5416 * vCC (r1). Useful for integer division and modulus. 5417 * 5418 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5419 * xor-long, add-double, sub-double, mul-double, div-double, 5420 * rem-double 5421 * 5422 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5423 */ 5424 /* binop vAA, vBB, vCC */ 5425 FETCH(r0, 1) @ r0<- CCBB 5426 mov r9, rINST, lsr #8 @ r9<- AA 5427 and r2, r0, #255 @ r2<- BB 5428 mov r3, r0, lsr #8 @ r3<- CC 5429 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5430 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5431 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5432 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5433 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5434 .if 0 5435 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5436 beq common_errDivideByZero 5437 .endif 5438 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5439 5440 @ optional op; may set condition codes 5441 bl __aeabi_ddiv @ result<- op, r0-r3 changed 5442 GET_INST_OPCODE(ip) @ extract opcode from rINST 5443 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5444 GOTO_OPCODE(ip) @ jump to next instruction 5445 /* 14-17 instructions */ 5446 5447 5448 /* ------------------------------ */ 5449 .balign 64 5450 .L_OP_REM_DOUBLE: /* 0xaf */ 5451 /* File: armv5te/OP_REM_DOUBLE.S */ 5452 /* EABI doesn't define a double remainder function, but libm does */ 5453 /* File: armv5te/binopWide.S */ 5454 /* 5455 * Generic 64-bit binary operation. Provide an "instr" line that 5456 * specifies an instruction that performs "result = r0-r1 op r2-r3". 5457 * This could be an ARM instruction or a function call. (If the result 5458 * comes back in a register other than r0, you can override "result".) 5459 * 5460 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5461 * vCC (r1). Useful for integer division and modulus. 5462 * 5463 * for: add-long, sub-long, div-long, rem-long, and-long, or-long, 5464 * xor-long, add-double, sub-double, mul-double, div-double, 5465 * rem-double 5466 * 5467 * IMPORTANT: you may specify "chkzero" or "preinstr" but not both. 5468 */ 5469 /* binop vAA, vBB, vCC */ 5470 FETCH(r0, 1) @ r0<- CCBB 5471 mov r9, rINST, lsr #8 @ r9<- AA 5472 and r2, r0, #255 @ r2<- BB 5473 mov r3, r0, lsr #8 @ r3<- CC 5474 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 5475 add r2, rFP, r2, lsl #2 @ r2<- &fp[BB] 5476 add r3, rFP, r3, lsl #2 @ r3<- &fp[CC] 5477 ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1 5478 ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1 5479 .if 0 5480 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5481 beq common_errDivideByZero 5482 .endif 5483 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 5484 5485 @ optional op; may set condition codes 5486 bl fmod @ result<- op, r0-r3 changed 5487 GET_INST_OPCODE(ip) @ extract opcode from rINST 5488 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5489 GOTO_OPCODE(ip) @ jump to next instruction 5490 /* 14-17 instructions */ 5491 5492 5493 /* ------------------------------ */ 5494 .balign 64 5495 .L_OP_ADD_INT_2ADDR: /* 0xb0 */ 5496 /* File: armv5te/OP_ADD_INT_2ADDR.S */ 5497 /* File: armv5te/binop2addr.S */ 5498 /* 5499 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5500 * that specifies an instruction that performs "result = r0 op r1". 5501 * This could be an ARM instruction or a function call. (If the result 5502 * comes back in a register other than r0, you can override "result".) 5503 * 5504 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5505 * vCC (r1). Useful for integer division and modulus. 5506 * 5507 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5508 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5509 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5510 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5511 */ 5512 /* binop/2addr vA, vB */ 5513 mov r9, rINST, lsr #8 @ r9<- A+ 5514 mov r3, rINST, lsr #12 @ r3<- B 5515 and r9, r9, #15 5516 GET_VREG(r1, r3) @ r1<- vB 5517 GET_VREG(r0, r9) @ r0<- vA 5518 .if 0 5519 cmp r1, #0 @ is second operand zero? 5520 beq common_errDivideByZero 5521 .endif 5522 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5523 5524 @ optional op; may set condition codes 5525 add r0, r0, r1 @ r0<- op, r0-r3 changed 5526 GET_INST_OPCODE(ip) @ extract opcode from rINST 5527 SET_VREG(r0, r9) @ vAA<- r0 5528 GOTO_OPCODE(ip) @ jump to next instruction 5529 /* 10-13 instructions */ 5530 5531 5532 /* ------------------------------ */ 5533 .balign 64 5534 .L_OP_SUB_INT_2ADDR: /* 0xb1 */ 5535 /* File: armv5te/OP_SUB_INT_2ADDR.S */ 5536 /* File: armv5te/binop2addr.S */ 5537 /* 5538 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5539 * that specifies an instruction that performs "result = r0 op r1". 5540 * This could be an ARM instruction or a function call. (If the result 5541 * comes back in a register other than r0, you can override "result".) 5542 * 5543 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5544 * vCC (r1). Useful for integer division and modulus. 5545 * 5546 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5547 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5548 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5549 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5550 */ 5551 /* binop/2addr vA, vB */ 5552 mov r9, rINST, lsr #8 @ r9<- A+ 5553 mov r3, rINST, lsr #12 @ r3<- B 5554 and r9, r9, #15 5555 GET_VREG(r1, r3) @ r1<- vB 5556 GET_VREG(r0, r9) @ r0<- vA 5557 .if 0 5558 cmp r1, #0 @ is second operand zero? 5559 beq common_errDivideByZero 5560 .endif 5561 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5562 5563 @ optional op; may set condition codes 5564 sub r0, r0, r1 @ r0<- op, r0-r3 changed 5565 GET_INST_OPCODE(ip) @ extract opcode from rINST 5566 SET_VREG(r0, r9) @ vAA<- r0 5567 GOTO_OPCODE(ip) @ jump to next instruction 5568 /* 10-13 instructions */ 5569 5570 5571 /* ------------------------------ */ 5572 .balign 64 5573 .L_OP_MUL_INT_2ADDR: /* 0xb2 */ 5574 /* File: armv5te/OP_MUL_INT_2ADDR.S */ 5575 /* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 5576 /* File: armv5te/binop2addr.S */ 5577 /* 5578 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5579 * that specifies an instruction that performs "result = r0 op r1". 5580 * This could be an ARM instruction or a function call. (If the result 5581 * comes back in a register other than r0, you can override "result".) 5582 * 5583 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5584 * vCC (r1). Useful for integer division and modulus. 5585 * 5586 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5587 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5588 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5589 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5590 */ 5591 /* binop/2addr vA, vB */ 5592 mov r9, rINST, lsr #8 @ r9<- A+ 5593 mov r3, rINST, lsr #12 @ r3<- B 5594 and r9, r9, #15 5595 GET_VREG(r1, r3) @ r1<- vB 5596 GET_VREG(r0, r9) @ r0<- vA 5597 .if 0 5598 cmp r1, #0 @ is second operand zero? 5599 beq common_errDivideByZero 5600 .endif 5601 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5602 5603 @ optional op; may set condition codes 5604 mul r0, r1, r0 @ r0<- op, r0-r3 changed 5605 GET_INST_OPCODE(ip) @ extract opcode from rINST 5606 SET_VREG(r0, r9) @ vAA<- r0 5607 GOTO_OPCODE(ip) @ jump to next instruction 5608 /* 10-13 instructions */ 5609 5610 5611 /* ------------------------------ */ 5612 .balign 64 5613 .L_OP_DIV_INT_2ADDR: /* 0xb3 */ 5614 /* File: armv5te/OP_DIV_INT_2ADDR.S */ 5615 /* File: armv5te/binop2addr.S */ 5616 /* 5617 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5618 * that specifies an instruction that performs "result = r0 op r1". 5619 * This could be an ARM instruction or a function call. (If the result 5620 * comes back in a register other than r0, you can override "result".) 5621 * 5622 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5623 * vCC (r1). Useful for integer division and modulus. 5624 * 5625 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5626 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5627 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5628 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5629 */ 5630 /* binop/2addr vA, vB */ 5631 mov r9, rINST, lsr #8 @ r9<- A+ 5632 mov r3, rINST, lsr #12 @ r3<- B 5633 and r9, r9, #15 5634 GET_VREG(r1, r3) @ r1<- vB 5635 GET_VREG(r0, r9) @ r0<- vA 5636 .if 1 5637 cmp r1, #0 @ is second operand zero? 5638 beq common_errDivideByZero 5639 .endif 5640 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5641 5642 @ optional op; may set condition codes 5643 bl __aeabi_idiv @ r0<- op, r0-r3 changed 5644 GET_INST_OPCODE(ip) @ extract opcode from rINST 5645 SET_VREG(r0, r9) @ vAA<- r0 5646 GOTO_OPCODE(ip) @ jump to next instruction 5647 /* 10-13 instructions */ 5648 5649 5650 /* ------------------------------ */ 5651 .balign 64 5652 .L_OP_REM_INT_2ADDR: /* 0xb4 */ 5653 /* File: armv5te/OP_REM_INT_2ADDR.S */ 5654 /* idivmod returns quotient in r0 and remainder in r1 */ 5655 /* File: armv5te/binop2addr.S */ 5656 /* 5657 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5658 * that specifies an instruction that performs "result = r0 op r1". 5659 * This could be an ARM instruction or a function call. (If the result 5660 * comes back in a register other than r0, you can override "result".) 5661 * 5662 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5663 * vCC (r1). Useful for integer division and modulus. 5664 * 5665 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5666 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5667 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5668 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5669 */ 5670 /* binop/2addr vA, vB */ 5671 mov r9, rINST, lsr #8 @ r9<- A+ 5672 mov r3, rINST, lsr #12 @ r3<- B 5673 and r9, r9, #15 5674 GET_VREG(r1, r3) @ r1<- vB 5675 GET_VREG(r0, r9) @ r0<- vA 5676 .if 1 5677 cmp r1, #0 @ is second operand zero? 5678 beq common_errDivideByZero 5679 .endif 5680 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5681 5682 @ optional op; may set condition codes 5683 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 5684 GET_INST_OPCODE(ip) @ extract opcode from rINST 5685 SET_VREG(r1, r9) @ vAA<- r1 5686 GOTO_OPCODE(ip) @ jump to next instruction 5687 /* 10-13 instructions */ 5688 5689 5690 /* ------------------------------ */ 5691 .balign 64 5692 .L_OP_AND_INT_2ADDR: /* 0xb5 */ 5693 /* File: armv5te/OP_AND_INT_2ADDR.S */ 5694 /* File: armv5te/binop2addr.S */ 5695 /* 5696 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5697 * that specifies an instruction that performs "result = r0 op r1". 5698 * This could be an ARM instruction or a function call. (If the result 5699 * comes back in a register other than r0, you can override "result".) 5700 * 5701 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5702 * vCC (r1). Useful for integer division and modulus. 5703 * 5704 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5705 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5706 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5707 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5708 */ 5709 /* binop/2addr vA, vB */ 5710 mov r9, rINST, lsr #8 @ r9<- A+ 5711 mov r3, rINST, lsr #12 @ r3<- B 5712 and r9, r9, #15 5713 GET_VREG(r1, r3) @ r1<- vB 5714 GET_VREG(r0, r9) @ r0<- vA 5715 .if 0 5716 cmp r1, #0 @ is second operand zero? 5717 beq common_errDivideByZero 5718 .endif 5719 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5720 5721 @ optional op; may set condition codes 5722 and r0, r0, r1 @ r0<- op, r0-r3 changed 5723 GET_INST_OPCODE(ip) @ extract opcode from rINST 5724 SET_VREG(r0, r9) @ vAA<- r0 5725 GOTO_OPCODE(ip) @ jump to next instruction 5726 /* 10-13 instructions */ 5727 5728 5729 /* ------------------------------ */ 5730 .balign 64 5731 .L_OP_OR_INT_2ADDR: /* 0xb6 */ 5732 /* File: armv5te/OP_OR_INT_2ADDR.S */ 5733 /* File: armv5te/binop2addr.S */ 5734 /* 5735 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5736 * that specifies an instruction that performs "result = r0 op r1". 5737 * This could be an ARM instruction or a function call. (If the result 5738 * comes back in a register other than r0, you can override "result".) 5739 * 5740 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5741 * vCC (r1). Useful for integer division and modulus. 5742 * 5743 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5744 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5745 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5746 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5747 */ 5748 /* binop/2addr vA, vB */ 5749 mov r9, rINST, lsr #8 @ r9<- A+ 5750 mov r3, rINST, lsr #12 @ r3<- B 5751 and r9, r9, #15 5752 GET_VREG(r1, r3) @ r1<- vB 5753 GET_VREG(r0, r9) @ r0<- vA 5754 .if 0 5755 cmp r1, #0 @ is second operand zero? 5756 beq common_errDivideByZero 5757 .endif 5758 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5759 5760 @ optional op; may set condition codes 5761 orr r0, r0, r1 @ r0<- op, r0-r3 changed 5762 GET_INST_OPCODE(ip) @ extract opcode from rINST 5763 SET_VREG(r0, r9) @ vAA<- r0 5764 GOTO_OPCODE(ip) @ jump to next instruction 5765 /* 10-13 instructions */ 5766 5767 5768 /* ------------------------------ */ 5769 .balign 64 5770 .L_OP_XOR_INT_2ADDR: /* 0xb7 */ 5771 /* File: armv5te/OP_XOR_INT_2ADDR.S */ 5772 /* File: armv5te/binop2addr.S */ 5773 /* 5774 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5775 * that specifies an instruction that performs "result = r0 op r1". 5776 * This could be an ARM instruction or a function call. (If the result 5777 * comes back in a register other than r0, you can override "result".) 5778 * 5779 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5780 * vCC (r1). Useful for integer division and modulus. 5781 * 5782 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5783 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5784 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5785 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5786 */ 5787 /* binop/2addr vA, vB */ 5788 mov r9, rINST, lsr #8 @ r9<- A+ 5789 mov r3, rINST, lsr #12 @ r3<- B 5790 and r9, r9, #15 5791 GET_VREG(r1, r3) @ r1<- vB 5792 GET_VREG(r0, r9) @ r0<- vA 5793 .if 0 5794 cmp r1, #0 @ is second operand zero? 5795 beq common_errDivideByZero 5796 .endif 5797 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5798 5799 @ optional op; may set condition codes 5800 eor r0, r0, r1 @ r0<- op, r0-r3 changed 5801 GET_INST_OPCODE(ip) @ extract opcode from rINST 5802 SET_VREG(r0, r9) @ vAA<- r0 5803 GOTO_OPCODE(ip) @ jump to next instruction 5804 /* 10-13 instructions */ 5805 5806 5807 /* ------------------------------ */ 5808 .balign 64 5809 .L_OP_SHL_INT_2ADDR: /* 0xb8 */ 5810 /* File: armv5te/OP_SHL_INT_2ADDR.S */ 5811 /* File: armv5te/binop2addr.S */ 5812 /* 5813 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5814 * that specifies an instruction that performs "result = r0 op r1". 5815 * This could be an ARM instruction or a function call. (If the result 5816 * comes back in a register other than r0, you can override "result".) 5817 * 5818 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5819 * vCC (r1). Useful for integer division and modulus. 5820 * 5821 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5822 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5823 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5824 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5825 */ 5826 /* binop/2addr vA, vB */ 5827 mov r9, rINST, lsr #8 @ r9<- A+ 5828 mov r3, rINST, lsr #12 @ r3<- B 5829 and r9, r9, #15 5830 GET_VREG(r1, r3) @ r1<- vB 5831 GET_VREG(r0, r9) @ r0<- vA 5832 .if 0 5833 cmp r1, #0 @ is second operand zero? 5834 beq common_errDivideByZero 5835 .endif 5836 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5837 5838 and r1, r1, #31 @ optional op; may set condition codes 5839 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 5840 GET_INST_OPCODE(ip) @ extract opcode from rINST 5841 SET_VREG(r0, r9) @ vAA<- r0 5842 GOTO_OPCODE(ip) @ jump to next instruction 5843 /* 10-13 instructions */ 5844 5845 5846 /* ------------------------------ */ 5847 .balign 64 5848 .L_OP_SHR_INT_2ADDR: /* 0xb9 */ 5849 /* File: armv5te/OP_SHR_INT_2ADDR.S */ 5850 /* File: armv5te/binop2addr.S */ 5851 /* 5852 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5853 * that specifies an instruction that performs "result = r0 op r1". 5854 * This could be an ARM instruction or a function call. (If the result 5855 * comes back in a register other than r0, you can override "result".) 5856 * 5857 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5858 * vCC (r1). Useful for integer division and modulus. 5859 * 5860 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5861 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5862 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5863 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5864 */ 5865 /* binop/2addr vA, vB */ 5866 mov r9, rINST, lsr #8 @ r9<- A+ 5867 mov r3, rINST, lsr #12 @ r3<- B 5868 and r9, r9, #15 5869 GET_VREG(r1, r3) @ r1<- vB 5870 GET_VREG(r0, r9) @ r0<- vA 5871 .if 0 5872 cmp r1, #0 @ is second operand zero? 5873 beq common_errDivideByZero 5874 .endif 5875 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5876 5877 and r1, r1, #31 @ optional op; may set condition codes 5878 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 5879 GET_INST_OPCODE(ip) @ extract opcode from rINST 5880 SET_VREG(r0, r9) @ vAA<- r0 5881 GOTO_OPCODE(ip) @ jump to next instruction 5882 /* 10-13 instructions */ 5883 5884 5885 /* ------------------------------ */ 5886 .balign 64 5887 .L_OP_USHR_INT_2ADDR: /* 0xba */ 5888 /* File: armv5te/OP_USHR_INT_2ADDR.S */ 5889 /* File: armv5te/binop2addr.S */ 5890 /* 5891 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 5892 * that specifies an instruction that performs "result = r0 op r1". 5893 * This could be an ARM instruction or a function call. (If the result 5894 * comes back in a register other than r0, you can override "result".) 5895 * 5896 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5897 * vCC (r1). Useful for integer division and modulus. 5898 * 5899 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 5900 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 5901 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 5902 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 5903 */ 5904 /* binop/2addr vA, vB */ 5905 mov r9, rINST, lsr #8 @ r9<- A+ 5906 mov r3, rINST, lsr #12 @ r3<- B 5907 and r9, r9, #15 5908 GET_VREG(r1, r3) @ r1<- vB 5909 GET_VREG(r0, r9) @ r0<- vA 5910 .if 0 5911 cmp r1, #0 @ is second operand zero? 5912 beq common_errDivideByZero 5913 .endif 5914 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5915 5916 and r1, r1, #31 @ optional op; may set condition codes 5917 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 5918 GET_INST_OPCODE(ip) @ extract opcode from rINST 5919 SET_VREG(r0, r9) @ vAA<- r0 5920 GOTO_OPCODE(ip) @ jump to next instruction 5921 /* 10-13 instructions */ 5922 5923 5924 /* ------------------------------ */ 5925 .balign 64 5926 .L_OP_ADD_LONG_2ADDR: /* 0xbb */ 5927 /* File: armv5te/OP_ADD_LONG_2ADDR.S */ 5928 /* File: armv5te/binopWide2addr.S */ 5929 /* 5930 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5931 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5932 * This could be an ARM instruction or a function call. (If the result 5933 * comes back in a register other than r0, you can override "result".) 5934 * 5935 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5936 * vCC (r1). Useful for integer division and modulus. 5937 * 5938 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5939 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5940 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5941 * rem-double/2addr 5942 */ 5943 /* binop/2addr vA, vB */ 5944 mov r9, rINST, lsr #8 @ r9<- A+ 5945 mov r1, rINST, lsr #12 @ r1<- B 5946 and r9, r9, #15 5947 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5948 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5949 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5950 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5951 .if 0 5952 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5953 beq common_errDivideByZero 5954 .endif 5955 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5956 5957 adds r0, r0, r2 @ optional op; may set condition codes 5958 adc r1, r1, r3 @ result<- op, r0-r3 changed 5959 GET_INST_OPCODE(ip) @ extract opcode from rINST 5960 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 5961 GOTO_OPCODE(ip) @ jump to next instruction 5962 /* 12-15 instructions */ 5963 5964 5965 /* ------------------------------ */ 5966 .balign 64 5967 .L_OP_SUB_LONG_2ADDR: /* 0xbc */ 5968 /* File: armv5te/OP_SUB_LONG_2ADDR.S */ 5969 /* File: armv5te/binopWide2addr.S */ 5970 /* 5971 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 5972 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 5973 * This could be an ARM instruction or a function call. (If the result 5974 * comes back in a register other than r0, you can override "result".) 5975 * 5976 * If "chkzero" is set to 1, we perform a divide-by-zero check on 5977 * vCC (r1). Useful for integer division and modulus. 5978 * 5979 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 5980 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 5981 * sub-double/2addr, mul-double/2addr, div-double/2addr, 5982 * rem-double/2addr 5983 */ 5984 /* binop/2addr vA, vB */ 5985 mov r9, rINST, lsr #8 @ r9<- A+ 5986 mov r1, rINST, lsr #12 @ r1<- B 5987 and r9, r9, #15 5988 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 5989 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 5990 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 5991 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 5992 .if 0 5993 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 5994 beq common_errDivideByZero 5995 .endif 5996 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 5997 5998 subs r0, r0, r2 @ optional op; may set condition codes 5999 sbc r1, r1, r3 @ result<- op, r0-r3 changed 6000 GET_INST_OPCODE(ip) @ extract opcode from rINST 6001 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6002 GOTO_OPCODE(ip) @ jump to next instruction 6003 /* 12-15 instructions */ 6004 6005 6006 /* ------------------------------ */ 6007 .balign 64 6008 .L_OP_MUL_LONG_2ADDR: /* 0xbd */ 6009 /* File: armv5te/OP_MUL_LONG_2ADDR.S */ 6010 /* 6011 * Signed 64-bit integer multiply, "/2addr" version. 6012 * 6013 * See OP_MUL_LONG for an explanation. 6014 * 6015 * We get a little tight on registers, so to avoid looking up &fp[A] 6016 * again we stuff it into rINST. 6017 */ 6018 /* mul-long/2addr vA, vB */ 6019 mov r9, rINST, lsr #8 @ r9<- A+ 6020 mov r1, rINST, lsr #12 @ r1<- B 6021 and r9, r9, #15 6022 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6023 add rINST, rFP, r9, lsl #2 @ rINST<- &fp[A] 6024 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6025 ldmia rINST, {r0-r1} @ r0/r1<- vAA/vAA+1 6026 mul ip, r2, r1 @ ip<- ZxW 6027 umull r9, r10, r2, r0 @ r9/r10 <- ZxX 6028 mla r2, r0, r3, ip @ r2<- YxX + (ZxW) 6029 mov r0, rINST @ r0<- &fp[A] (free up rINST) 6030 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6031 add r10, r2, r10 @ r10<- r10 + low(ZxW + (YxX)) 6032 GET_INST_OPCODE(ip) @ extract opcode from rINST 6033 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 6034 GOTO_OPCODE(ip) @ jump to next instruction 6035 6036 /* ------------------------------ */ 6037 .balign 64 6038 .L_OP_DIV_LONG_2ADDR: /* 0xbe */ 6039 /* File: armv5te/OP_DIV_LONG_2ADDR.S */ 6040 /* File: armv5te/binopWide2addr.S */ 6041 /* 6042 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6043 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6044 * This could be an ARM instruction or a function call. (If the result 6045 * comes back in a register other than r0, you can override "result".) 6046 * 6047 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6048 * vCC (r1). Useful for integer division and modulus. 6049 * 6050 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6051 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6052 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6053 * rem-double/2addr 6054 */ 6055 /* binop/2addr vA, vB */ 6056 mov r9, rINST, lsr #8 @ r9<- A+ 6057 mov r1, rINST, lsr #12 @ r1<- B 6058 and r9, r9, #15 6059 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6060 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6061 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6062 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6063 .if 1 6064 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6065 beq common_errDivideByZero 6066 .endif 6067 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6068 6069 @ optional op; may set condition codes 6070 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6071 GET_INST_OPCODE(ip) @ extract opcode from rINST 6072 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6073 GOTO_OPCODE(ip) @ jump to next instruction 6074 /* 12-15 instructions */ 6075 6076 6077 /* ------------------------------ */ 6078 .balign 64 6079 .L_OP_REM_LONG_2ADDR: /* 0xbf */ 6080 /* File: armv5te/OP_REM_LONG_2ADDR.S */ 6081 /* ldivmod returns quotient in r0/r1 and remainder in r2/r3 */ 6082 /* File: armv5te/binopWide2addr.S */ 6083 /* 6084 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6085 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6086 * This could be an ARM instruction or a function call. (If the result 6087 * comes back in a register other than r0, you can override "result".) 6088 * 6089 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6090 * vCC (r1). Useful for integer division and modulus. 6091 * 6092 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6093 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6094 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6095 * rem-double/2addr 6096 */ 6097 /* binop/2addr vA, vB */ 6098 mov r9, rINST, lsr #8 @ r9<- A+ 6099 mov r1, rINST, lsr #12 @ r1<- B 6100 and r9, r9, #15 6101 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6102 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6103 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6104 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6105 .if 1 6106 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6107 beq common_errDivideByZero 6108 .endif 6109 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6110 6111 @ optional op; may set condition codes 6112 bl __aeabi_ldivmod @ result<- op, r0-r3 changed 6113 GET_INST_OPCODE(ip) @ extract opcode from rINST 6114 stmia r9, {r2,r3} @ vAA/vAA+1<- r2/r3 6115 GOTO_OPCODE(ip) @ jump to next instruction 6116 /* 12-15 instructions */ 6117 6118 6119 /* ------------------------------ */ 6120 .balign 64 6121 .L_OP_AND_LONG_2ADDR: /* 0xc0 */ 6122 /* File: armv5te/OP_AND_LONG_2ADDR.S */ 6123 /* File: armv5te/binopWide2addr.S */ 6124 /* 6125 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6126 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6127 * This could be an ARM instruction or a function call. (If the result 6128 * comes back in a register other than r0, you can override "result".) 6129 * 6130 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6131 * vCC (r1). Useful for integer division and modulus. 6132 * 6133 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6134 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6135 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6136 * rem-double/2addr 6137 */ 6138 /* binop/2addr vA, vB */ 6139 mov r9, rINST, lsr #8 @ r9<- A+ 6140 mov r1, rINST, lsr #12 @ r1<- B 6141 and r9, r9, #15 6142 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6143 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6144 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6145 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6146 .if 0 6147 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6148 beq common_errDivideByZero 6149 .endif 6150 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6151 6152 and r0, r0, r2 @ optional op; may set condition codes 6153 and r1, r1, r3 @ result<- op, r0-r3 changed 6154 GET_INST_OPCODE(ip) @ extract opcode from rINST 6155 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6156 GOTO_OPCODE(ip) @ jump to next instruction 6157 /* 12-15 instructions */ 6158 6159 6160 /* ------------------------------ */ 6161 .balign 64 6162 .L_OP_OR_LONG_2ADDR: /* 0xc1 */ 6163 /* File: armv5te/OP_OR_LONG_2ADDR.S */ 6164 /* File: armv5te/binopWide2addr.S */ 6165 /* 6166 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6167 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6168 * This could be an ARM instruction or a function call. (If the result 6169 * comes back in a register other than r0, you can override "result".) 6170 * 6171 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6172 * vCC (r1). Useful for integer division and modulus. 6173 * 6174 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6175 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6176 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6177 * rem-double/2addr 6178 */ 6179 /* binop/2addr vA, vB */ 6180 mov r9, rINST, lsr #8 @ r9<- A+ 6181 mov r1, rINST, lsr #12 @ r1<- B 6182 and r9, r9, #15 6183 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6184 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6185 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6186 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6187 .if 0 6188 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6189 beq common_errDivideByZero 6190 .endif 6191 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6192 6193 orr r0, r0, r2 @ optional op; may set condition codes 6194 orr r1, r1, r3 @ result<- op, r0-r3 changed 6195 GET_INST_OPCODE(ip) @ extract opcode from rINST 6196 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6197 GOTO_OPCODE(ip) @ jump to next instruction 6198 /* 12-15 instructions */ 6199 6200 6201 /* ------------------------------ */ 6202 .balign 64 6203 .L_OP_XOR_LONG_2ADDR: /* 0xc2 */ 6204 /* File: armv5te/OP_XOR_LONG_2ADDR.S */ 6205 /* File: armv5te/binopWide2addr.S */ 6206 /* 6207 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6208 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6209 * This could be an ARM instruction or a function call. (If the result 6210 * comes back in a register other than r0, you can override "result".) 6211 * 6212 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6213 * vCC (r1). Useful for integer division and modulus. 6214 * 6215 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6216 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6217 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6218 * rem-double/2addr 6219 */ 6220 /* binop/2addr vA, vB */ 6221 mov r9, rINST, lsr #8 @ r9<- A+ 6222 mov r1, rINST, lsr #12 @ r1<- B 6223 and r9, r9, #15 6224 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6225 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6226 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6227 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6228 .if 0 6229 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6230 beq common_errDivideByZero 6231 .endif 6232 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6233 6234 eor r0, r0, r2 @ optional op; may set condition codes 6235 eor r1, r1, r3 @ result<- op, r0-r3 changed 6236 GET_INST_OPCODE(ip) @ extract opcode from rINST 6237 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6238 GOTO_OPCODE(ip) @ jump to next instruction 6239 /* 12-15 instructions */ 6240 6241 6242 /* ------------------------------ */ 6243 .balign 64 6244 .L_OP_SHL_LONG_2ADDR: /* 0xc3 */ 6245 /* File: armv5te/OP_SHL_LONG_2ADDR.S */ 6246 /* 6247 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6248 * 32-bit shift distance. 6249 */ 6250 /* shl-long/2addr vA, vB */ 6251 mov r9, rINST, lsr #8 @ r9<- A+ 6252 mov r3, rINST, lsr #12 @ r3<- B 6253 and r9, r9, #15 6254 GET_VREG(r2, r3) @ r2<- vB 6255 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6256 and r2, r2, #63 @ r2<- r2 & 0x3f 6257 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6258 6259 mov r1, r1, asl r2 @ r1<- r1 << r2 6260 rsb r3, r2, #32 @ r3<- 32 - r2 6261 orr r1, r1, r0, lsr r3 @ r1<- r1 | (r0 << (32-r2)) 6262 subs ip, r2, #32 @ ip<- r2 - 32 6263 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6264 movpl r1, r0, asl ip @ if r2 >= 32, r1<- r0 << (r2-32) 6265 mov r0, r0, asl r2 @ r0<- r0 << r2 6266 b .LOP_SHL_LONG_2ADDR_finish 6267 6268 /* ------------------------------ */ 6269 .balign 64 6270 .L_OP_SHR_LONG_2ADDR: /* 0xc4 */ 6271 /* File: armv5te/OP_SHR_LONG_2ADDR.S */ 6272 /* 6273 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6274 * 32-bit shift distance. 6275 */ 6276 /* shr-long/2addr vA, vB */ 6277 mov r9, rINST, lsr #8 @ r9<- A+ 6278 mov r3, rINST, lsr #12 @ r3<- B 6279 and r9, r9, #15 6280 GET_VREG(r2, r3) @ r2<- vB 6281 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6282 and r2, r2, #63 @ r2<- r2 & 0x3f 6283 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6284 6285 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6286 rsb r3, r2, #32 @ r3<- 32 - r2 6287 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6288 subs ip, r2, #32 @ ip<- r2 - 32 6289 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6290 movpl r0, r1, asr ip @ if r2 >= 32, r0<-r1 >> (r2-32) 6291 mov r1, r1, asr r2 @ r1<- r1 >> r2 6292 b .LOP_SHR_LONG_2ADDR_finish 6293 6294 /* ------------------------------ */ 6295 .balign 64 6296 .L_OP_USHR_LONG_2ADDR: /* 0xc5 */ 6297 /* File: armv5te/OP_USHR_LONG_2ADDR.S */ 6298 /* 6299 * Long integer shift, 2addr version. vA is 64-bit value/result, vB is 6300 * 32-bit shift distance. 6301 */ 6302 /* ushr-long/2addr vA, vB */ 6303 mov r9, rINST, lsr #8 @ r9<- A+ 6304 mov r3, rINST, lsr #12 @ r3<- B 6305 and r9, r9, #15 6306 GET_VREG(r2, r3) @ r2<- vB 6307 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6308 and r2, r2, #63 @ r2<- r2 & 0x3f 6309 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6310 6311 mov r0, r0, lsr r2 @ r0<- r2 >> r2 6312 rsb r3, r2, #32 @ r3<- 32 - r2 6313 orr r0, r0, r1, asl r3 @ r0<- r0 | (r1 << (32-r2)) 6314 subs ip, r2, #32 @ ip<- r2 - 32 6315 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6316 movpl r0, r1, lsr ip @ if r2 >= 32, r0<-r1 >>> (r2-32) 6317 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 6318 b .LOP_USHR_LONG_2ADDR_finish 6319 6320 /* ------------------------------ */ 6321 .balign 64 6322 .L_OP_ADD_FLOAT_2ADDR: /* 0xc6 */ 6323 /* File: armv5te/OP_ADD_FLOAT_2ADDR.S */ 6324 /* File: armv5te/binop2addr.S */ 6325 /* 6326 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6327 * that specifies an instruction that performs "result = r0 op r1". 6328 * This could be an ARM instruction or a function call. (If the result 6329 * comes back in a register other than r0, you can override "result".) 6330 * 6331 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6332 * vCC (r1). Useful for integer division and modulus. 6333 * 6334 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6335 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6336 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6337 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6338 */ 6339 /* binop/2addr vA, vB */ 6340 mov r9, rINST, lsr #8 @ r9<- A+ 6341 mov r3, rINST, lsr #12 @ r3<- B 6342 and r9, r9, #15 6343 GET_VREG(r1, r3) @ r1<- vB 6344 GET_VREG(r0, r9) @ r0<- vA 6345 .if 0 6346 cmp r1, #0 @ is second operand zero? 6347 beq common_errDivideByZero 6348 .endif 6349 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6350 6351 @ optional op; may set condition codes 6352 bl __aeabi_fadd @ r0<- op, r0-r3 changed 6353 GET_INST_OPCODE(ip) @ extract opcode from rINST 6354 SET_VREG(r0, r9) @ vAA<- r0 6355 GOTO_OPCODE(ip) @ jump to next instruction 6356 /* 10-13 instructions */ 6357 6358 6359 /* ------------------------------ */ 6360 .balign 64 6361 .L_OP_SUB_FLOAT_2ADDR: /* 0xc7 */ 6362 /* File: armv5te/OP_SUB_FLOAT_2ADDR.S */ 6363 /* File: armv5te/binop2addr.S */ 6364 /* 6365 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6366 * that specifies an instruction that performs "result = r0 op r1". 6367 * This could be an ARM instruction or a function call. (If the result 6368 * comes back in a register other than r0, you can override "result".) 6369 * 6370 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6371 * vCC (r1). Useful for integer division and modulus. 6372 * 6373 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6374 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6375 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6376 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6377 */ 6378 /* binop/2addr vA, vB */ 6379 mov r9, rINST, lsr #8 @ r9<- A+ 6380 mov r3, rINST, lsr #12 @ r3<- B 6381 and r9, r9, #15 6382 GET_VREG(r1, r3) @ r1<- vB 6383 GET_VREG(r0, r9) @ r0<- vA 6384 .if 0 6385 cmp r1, #0 @ is second operand zero? 6386 beq common_errDivideByZero 6387 .endif 6388 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6389 6390 @ optional op; may set condition codes 6391 bl __aeabi_fsub @ r0<- op, r0-r3 changed 6392 GET_INST_OPCODE(ip) @ extract opcode from rINST 6393 SET_VREG(r0, r9) @ vAA<- r0 6394 GOTO_OPCODE(ip) @ jump to next instruction 6395 /* 10-13 instructions */ 6396 6397 6398 /* ------------------------------ */ 6399 .balign 64 6400 .L_OP_MUL_FLOAT_2ADDR: /* 0xc8 */ 6401 /* File: armv5te/OP_MUL_FLOAT_2ADDR.S */ 6402 /* File: armv5te/binop2addr.S */ 6403 /* 6404 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6405 * that specifies an instruction that performs "result = r0 op r1". 6406 * This could be an ARM instruction or a function call. (If the result 6407 * comes back in a register other than r0, you can override "result".) 6408 * 6409 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6410 * vCC (r1). Useful for integer division and modulus. 6411 * 6412 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6413 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6414 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6415 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6416 */ 6417 /* binop/2addr vA, vB */ 6418 mov r9, rINST, lsr #8 @ r9<- A+ 6419 mov r3, rINST, lsr #12 @ r3<- B 6420 and r9, r9, #15 6421 GET_VREG(r1, r3) @ r1<- vB 6422 GET_VREG(r0, r9) @ r0<- vA 6423 .if 0 6424 cmp r1, #0 @ is second operand zero? 6425 beq common_errDivideByZero 6426 .endif 6427 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6428 6429 @ optional op; may set condition codes 6430 bl __aeabi_fmul @ r0<- op, r0-r3 changed 6431 GET_INST_OPCODE(ip) @ extract opcode from rINST 6432 SET_VREG(r0, r9) @ vAA<- r0 6433 GOTO_OPCODE(ip) @ jump to next instruction 6434 /* 10-13 instructions */ 6435 6436 6437 /* ------------------------------ */ 6438 .balign 64 6439 .L_OP_DIV_FLOAT_2ADDR: /* 0xc9 */ 6440 /* File: armv5te/OP_DIV_FLOAT_2ADDR.S */ 6441 /* File: armv5te/binop2addr.S */ 6442 /* 6443 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6444 * that specifies an instruction that performs "result = r0 op r1". 6445 * This could be an ARM instruction or a function call. (If the result 6446 * comes back in a register other than r0, you can override "result".) 6447 * 6448 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6449 * vCC (r1). Useful for integer division and modulus. 6450 * 6451 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6452 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6453 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6454 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6455 */ 6456 /* binop/2addr vA, vB */ 6457 mov r9, rINST, lsr #8 @ r9<- A+ 6458 mov r3, rINST, lsr #12 @ r3<- B 6459 and r9, r9, #15 6460 GET_VREG(r1, r3) @ r1<- vB 6461 GET_VREG(r0, r9) @ r0<- vA 6462 .if 0 6463 cmp r1, #0 @ is second operand zero? 6464 beq common_errDivideByZero 6465 .endif 6466 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6467 6468 @ optional op; may set condition codes 6469 bl __aeabi_fdiv @ r0<- op, r0-r3 changed 6470 GET_INST_OPCODE(ip) @ extract opcode from rINST 6471 SET_VREG(r0, r9) @ vAA<- r0 6472 GOTO_OPCODE(ip) @ jump to next instruction 6473 /* 10-13 instructions */ 6474 6475 6476 /* ------------------------------ */ 6477 .balign 64 6478 .L_OP_REM_FLOAT_2ADDR: /* 0xca */ 6479 /* File: armv5te/OP_REM_FLOAT_2ADDR.S */ 6480 /* EABI doesn't define a float remainder function, but libm does */ 6481 /* File: armv5te/binop2addr.S */ 6482 /* 6483 * Generic 32-bit "/2addr" binary operation. Provide an "instr" line 6484 * that specifies an instruction that performs "result = r0 op r1". 6485 * This could be an ARM instruction or a function call. (If the result 6486 * comes back in a register other than r0, you can override "result".) 6487 * 6488 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6489 * vCC (r1). Useful for integer division and modulus. 6490 * 6491 * For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr, 6492 * rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr, 6493 * shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr, 6494 * sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr 6495 */ 6496 /* binop/2addr vA, vB */ 6497 mov r9, rINST, lsr #8 @ r9<- A+ 6498 mov r3, rINST, lsr #12 @ r3<- B 6499 and r9, r9, #15 6500 GET_VREG(r1, r3) @ r1<- vB 6501 GET_VREG(r0, r9) @ r0<- vA 6502 .if 0 6503 cmp r1, #0 @ is second operand zero? 6504 beq common_errDivideByZero 6505 .endif 6506 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6507 6508 @ optional op; may set condition codes 6509 bl fmodf @ r0<- op, r0-r3 changed 6510 GET_INST_OPCODE(ip) @ extract opcode from rINST 6511 SET_VREG(r0, r9) @ vAA<- r0 6512 GOTO_OPCODE(ip) @ jump to next instruction 6513 /* 10-13 instructions */ 6514 6515 6516 /* ------------------------------ */ 6517 .balign 64 6518 .L_OP_ADD_DOUBLE_2ADDR: /* 0xcb */ 6519 /* File: armv5te/OP_ADD_DOUBLE_2ADDR.S */ 6520 /* File: armv5te/binopWide2addr.S */ 6521 /* 6522 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6523 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6524 * This could be an ARM instruction or a function call. (If the result 6525 * comes back in a register other than r0, you can override "result".) 6526 * 6527 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6528 * vCC (r1). Useful for integer division and modulus. 6529 * 6530 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6531 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6532 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6533 * rem-double/2addr 6534 */ 6535 /* binop/2addr vA, vB */ 6536 mov r9, rINST, lsr #8 @ r9<- A+ 6537 mov r1, rINST, lsr #12 @ r1<- B 6538 and r9, r9, #15 6539 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6540 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6541 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6542 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6543 .if 0 6544 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6545 beq common_errDivideByZero 6546 .endif 6547 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6548 6549 @ optional op; may set condition codes 6550 bl __aeabi_dadd @ result<- op, r0-r3 changed 6551 GET_INST_OPCODE(ip) @ extract opcode from rINST 6552 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6553 GOTO_OPCODE(ip) @ jump to next instruction 6554 /* 12-15 instructions */ 6555 6556 6557 /* ------------------------------ */ 6558 .balign 64 6559 .L_OP_SUB_DOUBLE_2ADDR: /* 0xcc */ 6560 /* File: armv5te/OP_SUB_DOUBLE_2ADDR.S */ 6561 /* File: armv5te/binopWide2addr.S */ 6562 /* 6563 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6564 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6565 * This could be an ARM instruction or a function call. (If the result 6566 * comes back in a register other than r0, you can override "result".) 6567 * 6568 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6569 * vCC (r1). Useful for integer division and modulus. 6570 * 6571 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6572 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6573 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6574 * rem-double/2addr 6575 */ 6576 /* binop/2addr vA, vB */ 6577 mov r9, rINST, lsr #8 @ r9<- A+ 6578 mov r1, rINST, lsr #12 @ r1<- B 6579 and r9, r9, #15 6580 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6581 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6582 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6583 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6584 .if 0 6585 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6586 beq common_errDivideByZero 6587 .endif 6588 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6589 6590 @ optional op; may set condition codes 6591 bl __aeabi_dsub @ result<- op, r0-r3 changed 6592 GET_INST_OPCODE(ip) @ extract opcode from rINST 6593 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6594 GOTO_OPCODE(ip) @ jump to next instruction 6595 /* 12-15 instructions */ 6596 6597 6598 /* ------------------------------ */ 6599 .balign 64 6600 .L_OP_MUL_DOUBLE_2ADDR: /* 0xcd */ 6601 /* File: armv5te/OP_MUL_DOUBLE_2ADDR.S */ 6602 /* File: armv5te/binopWide2addr.S */ 6603 /* 6604 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6605 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6606 * This could be an ARM instruction or a function call. (If the result 6607 * comes back in a register other than r0, you can override "result".) 6608 * 6609 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6610 * vCC (r1). Useful for integer division and modulus. 6611 * 6612 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6613 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6614 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6615 * rem-double/2addr 6616 */ 6617 /* binop/2addr vA, vB */ 6618 mov r9, rINST, lsr #8 @ r9<- A+ 6619 mov r1, rINST, lsr #12 @ r1<- B 6620 and r9, r9, #15 6621 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6622 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6623 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6624 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6625 .if 0 6626 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6627 beq common_errDivideByZero 6628 .endif 6629 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6630 6631 @ optional op; may set condition codes 6632 bl __aeabi_dmul @ result<- op, r0-r3 changed 6633 GET_INST_OPCODE(ip) @ extract opcode from rINST 6634 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6635 GOTO_OPCODE(ip) @ jump to next instruction 6636 /* 12-15 instructions */ 6637 6638 6639 /* ------------------------------ */ 6640 .balign 64 6641 .L_OP_DIV_DOUBLE_2ADDR: /* 0xce */ 6642 /* File: armv5te/OP_DIV_DOUBLE_2ADDR.S */ 6643 /* File: armv5te/binopWide2addr.S */ 6644 /* 6645 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6646 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6647 * This could be an ARM instruction or a function call. (If the result 6648 * comes back in a register other than r0, you can override "result".) 6649 * 6650 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6651 * vCC (r1). Useful for integer division and modulus. 6652 * 6653 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6654 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6655 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6656 * rem-double/2addr 6657 */ 6658 /* binop/2addr vA, vB */ 6659 mov r9, rINST, lsr #8 @ r9<- A+ 6660 mov r1, rINST, lsr #12 @ r1<- B 6661 and r9, r9, #15 6662 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6663 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6664 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6665 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6666 .if 0 6667 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6668 beq common_errDivideByZero 6669 .endif 6670 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6671 6672 @ optional op; may set condition codes 6673 bl __aeabi_ddiv @ result<- op, r0-r3 changed 6674 GET_INST_OPCODE(ip) @ extract opcode from rINST 6675 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6676 GOTO_OPCODE(ip) @ jump to next instruction 6677 /* 12-15 instructions */ 6678 6679 6680 /* ------------------------------ */ 6681 .balign 64 6682 .L_OP_REM_DOUBLE_2ADDR: /* 0xcf */ 6683 /* File: armv5te/OP_REM_DOUBLE_2ADDR.S */ 6684 /* EABI doesn't define a double remainder function, but libm does */ 6685 /* File: armv5te/binopWide2addr.S */ 6686 /* 6687 * Generic 64-bit "/2addr" binary operation. Provide an "instr" line 6688 * that specifies an instruction that performs "result = r0-r1 op r2-r3". 6689 * This could be an ARM instruction or a function call. (If the result 6690 * comes back in a register other than r0, you can override "result".) 6691 * 6692 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6693 * vCC (r1). Useful for integer division and modulus. 6694 * 6695 * For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr, 6696 * and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr, 6697 * sub-double/2addr, mul-double/2addr, div-double/2addr, 6698 * rem-double/2addr 6699 */ 6700 /* binop/2addr vA, vB */ 6701 mov r9, rINST, lsr #8 @ r9<- A+ 6702 mov r1, rINST, lsr #12 @ r1<- B 6703 and r9, r9, #15 6704 add r1, rFP, r1, lsl #2 @ r1<- &fp[B] 6705 add r9, rFP, r9, lsl #2 @ r9<- &fp[A] 6706 ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1 6707 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 6708 .if 0 6709 orrs ip, r2, r3 @ second arg (r2-r3) is zero? 6710 beq common_errDivideByZero 6711 .endif 6712 FETCH_ADVANCE_INST(1) @ advance rPC, load rINST 6713 6714 @ optional op; may set condition codes 6715 bl fmod @ result<- op, r0-r3 changed 6716 GET_INST_OPCODE(ip) @ extract opcode from rINST 6717 stmia r9, {r0,r1} @ vAA/vAA+1<- r0/r1 6718 GOTO_OPCODE(ip) @ jump to next instruction 6719 /* 12-15 instructions */ 6720 6721 6722 /* ------------------------------ */ 6723 .balign 64 6724 .L_OP_ADD_INT_LIT16: /* 0xd0 */ 6725 /* File: armv5te/OP_ADD_INT_LIT16.S */ 6726 /* File: armv5te/binopLit16.S */ 6727 /* 6728 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6729 * that specifies an instruction that performs "result = r0 op r1". 6730 * This could be an ARM instruction or a function call. (If the result 6731 * comes back in a register other than r0, you can override "result".) 6732 * 6733 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6734 * vCC (r1). Useful for integer division and modulus. 6735 * 6736 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6737 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6738 */ 6739 /* binop/lit16 vA, vB, #+CCCC */ 6740 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6741 mov r2, rINST, lsr #12 @ r2<- B 6742 mov r9, rINST, lsr #8 @ r9<- A+ 6743 GET_VREG(r0, r2) @ r0<- vB 6744 and r9, r9, #15 6745 .if 0 6746 cmp r1, #0 @ is second operand zero? 6747 beq common_errDivideByZero 6748 .endif 6749 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6750 6751 add r0, r0, r1 @ r0<- op, r0-r3 changed 6752 GET_INST_OPCODE(ip) @ extract opcode from rINST 6753 SET_VREG(r0, r9) @ vAA<- r0 6754 GOTO_OPCODE(ip) @ jump to next instruction 6755 /* 10-13 instructions */ 6756 6757 6758 /* ------------------------------ */ 6759 .balign 64 6760 .L_OP_RSUB_INT: /* 0xd1 */ 6761 /* File: armv5te/OP_RSUB_INT.S */ 6762 /* this op is "rsub-int", but can be thought of as "rsub-int/lit16" */ 6763 /* File: armv5te/binopLit16.S */ 6764 /* 6765 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6766 * that specifies an instruction that performs "result = r0 op r1". 6767 * This could be an ARM instruction or a function call. (If the result 6768 * comes back in a register other than r0, you can override "result".) 6769 * 6770 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6771 * vCC (r1). Useful for integer division and modulus. 6772 * 6773 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6774 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6775 */ 6776 /* binop/lit16 vA, vB, #+CCCC */ 6777 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6778 mov r2, rINST, lsr #12 @ r2<- B 6779 mov r9, rINST, lsr #8 @ r9<- A+ 6780 GET_VREG(r0, r2) @ r0<- vB 6781 and r9, r9, #15 6782 .if 0 6783 cmp r1, #0 @ is second operand zero? 6784 beq common_errDivideByZero 6785 .endif 6786 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6787 6788 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 6789 GET_INST_OPCODE(ip) @ extract opcode from rINST 6790 SET_VREG(r0, r9) @ vAA<- r0 6791 GOTO_OPCODE(ip) @ jump to next instruction 6792 /* 10-13 instructions */ 6793 6794 6795 /* ------------------------------ */ 6796 .balign 64 6797 .L_OP_MUL_INT_LIT16: /* 0xd2 */ 6798 /* File: armv5te/OP_MUL_INT_LIT16.S */ 6799 /* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 6800 /* File: armv5te/binopLit16.S */ 6801 /* 6802 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6803 * that specifies an instruction that performs "result = r0 op r1". 6804 * This could be an ARM instruction or a function call. (If the result 6805 * comes back in a register other than r0, you can override "result".) 6806 * 6807 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6808 * vCC (r1). Useful for integer division and modulus. 6809 * 6810 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6811 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6812 */ 6813 /* binop/lit16 vA, vB, #+CCCC */ 6814 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6815 mov r2, rINST, lsr #12 @ r2<- B 6816 mov r9, rINST, lsr #8 @ r9<- A+ 6817 GET_VREG(r0, r2) @ r0<- vB 6818 and r9, r9, #15 6819 .if 0 6820 cmp r1, #0 @ is second operand zero? 6821 beq common_errDivideByZero 6822 .endif 6823 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6824 6825 mul r0, r1, r0 @ r0<- op, r0-r3 changed 6826 GET_INST_OPCODE(ip) @ extract opcode from rINST 6827 SET_VREG(r0, r9) @ vAA<- r0 6828 GOTO_OPCODE(ip) @ jump to next instruction 6829 /* 10-13 instructions */ 6830 6831 6832 /* ------------------------------ */ 6833 .balign 64 6834 .L_OP_DIV_INT_LIT16: /* 0xd3 */ 6835 /* File: armv5te/OP_DIV_INT_LIT16.S */ 6836 /* File: armv5te/binopLit16.S */ 6837 /* 6838 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6839 * that specifies an instruction that performs "result = r0 op r1". 6840 * This could be an ARM instruction or a function call. (If the result 6841 * comes back in a register other than r0, you can override "result".) 6842 * 6843 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6844 * vCC (r1). Useful for integer division and modulus. 6845 * 6846 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6847 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6848 */ 6849 /* binop/lit16 vA, vB, #+CCCC */ 6850 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6851 mov r2, rINST, lsr #12 @ r2<- B 6852 mov r9, rINST, lsr #8 @ r9<- A+ 6853 GET_VREG(r0, r2) @ r0<- vB 6854 and r9, r9, #15 6855 .if 1 6856 cmp r1, #0 @ is second operand zero? 6857 beq common_errDivideByZero 6858 .endif 6859 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6860 6861 bl __aeabi_idiv @ r0<- op, r0-r3 changed 6862 GET_INST_OPCODE(ip) @ extract opcode from rINST 6863 SET_VREG(r0, r9) @ vAA<- r0 6864 GOTO_OPCODE(ip) @ jump to next instruction 6865 /* 10-13 instructions */ 6866 6867 6868 /* ------------------------------ */ 6869 .balign 64 6870 .L_OP_REM_INT_LIT16: /* 0xd4 */ 6871 /* File: armv5te/OP_REM_INT_LIT16.S */ 6872 /* idivmod returns quotient in r0 and remainder in r1 */ 6873 /* File: armv5te/binopLit16.S */ 6874 /* 6875 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6876 * that specifies an instruction that performs "result = r0 op r1". 6877 * This could be an ARM instruction or a function call. (If the result 6878 * comes back in a register other than r0, you can override "result".) 6879 * 6880 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6881 * vCC (r1). Useful for integer division and modulus. 6882 * 6883 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6884 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6885 */ 6886 /* binop/lit16 vA, vB, #+CCCC */ 6887 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6888 mov r2, rINST, lsr #12 @ r2<- B 6889 mov r9, rINST, lsr #8 @ r9<- A+ 6890 GET_VREG(r0, r2) @ r0<- vB 6891 and r9, r9, #15 6892 .if 1 6893 cmp r1, #0 @ is second operand zero? 6894 beq common_errDivideByZero 6895 .endif 6896 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6897 6898 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 6899 GET_INST_OPCODE(ip) @ extract opcode from rINST 6900 SET_VREG(r1, r9) @ vAA<- r1 6901 GOTO_OPCODE(ip) @ jump to next instruction 6902 /* 10-13 instructions */ 6903 6904 6905 /* ------------------------------ */ 6906 .balign 64 6907 .L_OP_AND_INT_LIT16: /* 0xd5 */ 6908 /* File: armv5te/OP_AND_INT_LIT16.S */ 6909 /* File: armv5te/binopLit16.S */ 6910 /* 6911 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6912 * that specifies an instruction that performs "result = r0 op r1". 6913 * This could be an ARM instruction or a function call. (If the result 6914 * comes back in a register other than r0, you can override "result".) 6915 * 6916 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6917 * vCC (r1). Useful for integer division and modulus. 6918 * 6919 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6920 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6921 */ 6922 /* binop/lit16 vA, vB, #+CCCC */ 6923 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6924 mov r2, rINST, lsr #12 @ r2<- B 6925 mov r9, rINST, lsr #8 @ r9<- A+ 6926 GET_VREG(r0, r2) @ r0<- vB 6927 and r9, r9, #15 6928 .if 0 6929 cmp r1, #0 @ is second operand zero? 6930 beq common_errDivideByZero 6931 .endif 6932 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6933 6934 and r0, r0, r1 @ r0<- op, r0-r3 changed 6935 GET_INST_OPCODE(ip) @ extract opcode from rINST 6936 SET_VREG(r0, r9) @ vAA<- r0 6937 GOTO_OPCODE(ip) @ jump to next instruction 6938 /* 10-13 instructions */ 6939 6940 6941 /* ------------------------------ */ 6942 .balign 64 6943 .L_OP_OR_INT_LIT16: /* 0xd6 */ 6944 /* File: armv5te/OP_OR_INT_LIT16.S */ 6945 /* File: armv5te/binopLit16.S */ 6946 /* 6947 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6948 * that specifies an instruction that performs "result = r0 op r1". 6949 * This could be an ARM instruction or a function call. (If the result 6950 * comes back in a register other than r0, you can override "result".) 6951 * 6952 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6953 * vCC (r1). Useful for integer division and modulus. 6954 * 6955 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6956 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6957 */ 6958 /* binop/lit16 vA, vB, #+CCCC */ 6959 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6960 mov r2, rINST, lsr #12 @ r2<- B 6961 mov r9, rINST, lsr #8 @ r9<- A+ 6962 GET_VREG(r0, r2) @ r0<- vB 6963 and r9, r9, #15 6964 .if 0 6965 cmp r1, #0 @ is second operand zero? 6966 beq common_errDivideByZero 6967 .endif 6968 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 6969 6970 orr r0, r0, r1 @ r0<- op, r0-r3 changed 6971 GET_INST_OPCODE(ip) @ extract opcode from rINST 6972 SET_VREG(r0, r9) @ vAA<- r0 6973 GOTO_OPCODE(ip) @ jump to next instruction 6974 /* 10-13 instructions */ 6975 6976 6977 /* ------------------------------ */ 6978 .balign 64 6979 .L_OP_XOR_INT_LIT16: /* 0xd7 */ 6980 /* File: armv5te/OP_XOR_INT_LIT16.S */ 6981 /* File: armv5te/binopLit16.S */ 6982 /* 6983 * Generic 32-bit "lit16" binary operation. Provide an "instr" line 6984 * that specifies an instruction that performs "result = r0 op r1". 6985 * This could be an ARM instruction or a function call. (If the result 6986 * comes back in a register other than r0, you can override "result".) 6987 * 6988 * If "chkzero" is set to 1, we perform a divide-by-zero check on 6989 * vCC (r1). Useful for integer division and modulus. 6990 * 6991 * For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16, 6992 * rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16 6993 */ 6994 /* binop/lit16 vA, vB, #+CCCC */ 6995 FETCH_S(r1, 1) @ r1<- ssssCCCC (sign-extended) 6996 mov r2, rINST, lsr #12 @ r2<- B 6997 mov r9, rINST, lsr #8 @ r9<- A+ 6998 GET_VREG(r0, r2) @ r0<- vB 6999 and r9, r9, #15 7000 .if 0 7001 cmp r1, #0 @ is second operand zero? 7002 beq common_errDivideByZero 7003 .endif 7004 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7005 7006 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7007 GET_INST_OPCODE(ip) @ extract opcode from rINST 7008 SET_VREG(r0, r9) @ vAA<- r0 7009 GOTO_OPCODE(ip) @ jump to next instruction 7010 /* 10-13 instructions */ 7011 7012 7013 /* ------------------------------ */ 7014 .balign 64 7015 .L_OP_ADD_INT_LIT8: /* 0xd8 */ 7016 /* File: armv5te/OP_ADD_INT_LIT8.S */ 7017 /* File: armv5te/binopLit8.S */ 7018 /* 7019 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7020 * that specifies an instruction that performs "result = r0 op r1". 7021 * This could be an ARM instruction or a function call. (If the result 7022 * comes back in a register other than r0, you can override "result".) 7023 * 7024 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7025 * vCC (r1). Useful for integer division and modulus. 7026 * 7027 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7028 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7029 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7030 */ 7031 /* binop/lit8 vAA, vBB, #+CC */ 7032 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7033 mov r9, rINST, lsr #8 @ r9<- AA 7034 and r2, r3, #255 @ r2<- BB 7035 GET_VREG(r0, r2) @ r0<- vBB 7036 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7037 .if 0 7038 @cmp r1, #0 @ is second operand zero? 7039 beq common_errDivideByZero 7040 .endif 7041 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7042 7043 @ optional op; may set condition codes 7044 add r0, r0, r1 @ r0<- op, r0-r3 changed 7045 GET_INST_OPCODE(ip) @ extract opcode from rINST 7046 SET_VREG(r0, r9) @ vAA<- r0 7047 GOTO_OPCODE(ip) @ jump to next instruction 7048 /* 10-12 instructions */ 7049 7050 7051 /* ------------------------------ */ 7052 .balign 64 7053 .L_OP_RSUB_INT_LIT8: /* 0xd9 */ 7054 /* File: armv5te/OP_RSUB_INT_LIT8.S */ 7055 /* File: armv5te/binopLit8.S */ 7056 /* 7057 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7058 * that specifies an instruction that performs "result = r0 op r1". 7059 * This could be an ARM instruction or a function call. (If the result 7060 * comes back in a register other than r0, you can override "result".) 7061 * 7062 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7063 * vCC (r1). Useful for integer division and modulus. 7064 * 7065 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7066 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7067 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7068 */ 7069 /* binop/lit8 vAA, vBB, #+CC */ 7070 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7071 mov r9, rINST, lsr #8 @ r9<- AA 7072 and r2, r3, #255 @ r2<- BB 7073 GET_VREG(r0, r2) @ r0<- vBB 7074 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7075 .if 0 7076 @cmp r1, #0 @ is second operand zero? 7077 beq common_errDivideByZero 7078 .endif 7079 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7080 7081 @ optional op; may set condition codes 7082 rsb r0, r0, r1 @ r0<- op, r0-r3 changed 7083 GET_INST_OPCODE(ip) @ extract opcode from rINST 7084 SET_VREG(r0, r9) @ vAA<- r0 7085 GOTO_OPCODE(ip) @ jump to next instruction 7086 /* 10-12 instructions */ 7087 7088 7089 /* ------------------------------ */ 7090 .balign 64 7091 .L_OP_MUL_INT_LIT8: /* 0xda */ 7092 /* File: armv5te/OP_MUL_INT_LIT8.S */ 7093 /* must be "mul r0, r1, r0" -- "r0, r0, r1" is illegal */ 7094 /* File: armv5te/binopLit8.S */ 7095 /* 7096 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7097 * that specifies an instruction that performs "result = r0 op r1". 7098 * This could be an ARM instruction or a function call. (If the result 7099 * comes back in a register other than r0, you can override "result".) 7100 * 7101 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7102 * vCC (r1). Useful for integer division and modulus. 7103 * 7104 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7105 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7106 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7107 */ 7108 /* binop/lit8 vAA, vBB, #+CC */ 7109 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7110 mov r9, rINST, lsr #8 @ r9<- AA 7111 and r2, r3, #255 @ r2<- BB 7112 GET_VREG(r0, r2) @ r0<- vBB 7113 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7114 .if 0 7115 @cmp r1, #0 @ is second operand zero? 7116 beq common_errDivideByZero 7117 .endif 7118 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7119 7120 @ optional op; may set condition codes 7121 mul r0, r1, r0 @ r0<- op, r0-r3 changed 7122 GET_INST_OPCODE(ip) @ extract opcode from rINST 7123 SET_VREG(r0, r9) @ vAA<- r0 7124 GOTO_OPCODE(ip) @ jump to next instruction 7125 /* 10-12 instructions */ 7126 7127 7128 /* ------------------------------ */ 7129 .balign 64 7130 .L_OP_DIV_INT_LIT8: /* 0xdb */ 7131 /* File: armv5te/OP_DIV_INT_LIT8.S */ 7132 /* File: armv5te/binopLit8.S */ 7133 /* 7134 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7135 * that specifies an instruction that performs "result = r0 op r1". 7136 * This could be an ARM instruction or a function call. (If the result 7137 * comes back in a register other than r0, you can override "result".) 7138 * 7139 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7140 * vCC (r1). Useful for integer division and modulus. 7141 * 7142 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7143 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7144 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7145 */ 7146 /* binop/lit8 vAA, vBB, #+CC */ 7147 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7148 mov r9, rINST, lsr #8 @ r9<- AA 7149 and r2, r3, #255 @ r2<- BB 7150 GET_VREG(r0, r2) @ r0<- vBB 7151 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7152 .if 1 7153 @cmp r1, #0 @ is second operand zero? 7154 beq common_errDivideByZero 7155 .endif 7156 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7157 7158 @ optional op; may set condition codes 7159 bl __aeabi_idiv @ r0<- op, r0-r3 changed 7160 GET_INST_OPCODE(ip) @ extract opcode from rINST 7161 SET_VREG(r0, r9) @ vAA<- r0 7162 GOTO_OPCODE(ip) @ jump to next instruction 7163 /* 10-12 instructions */ 7164 7165 7166 /* ------------------------------ */ 7167 .balign 64 7168 .L_OP_REM_INT_LIT8: /* 0xdc */ 7169 /* File: armv5te/OP_REM_INT_LIT8.S */ 7170 /* idivmod returns quotient in r0 and remainder in r1 */ 7171 /* File: armv5te/binopLit8.S */ 7172 /* 7173 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7174 * that specifies an instruction that performs "result = r0 op r1". 7175 * This could be an ARM instruction or a function call. (If the result 7176 * comes back in a register other than r0, you can override "result".) 7177 * 7178 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7179 * vCC (r1). Useful for integer division and modulus. 7180 * 7181 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7182 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7183 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7184 */ 7185 /* binop/lit8 vAA, vBB, #+CC */ 7186 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7187 mov r9, rINST, lsr #8 @ r9<- AA 7188 and r2, r3, #255 @ r2<- BB 7189 GET_VREG(r0, r2) @ r0<- vBB 7190 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7191 .if 1 7192 @cmp r1, #0 @ is second operand zero? 7193 beq common_errDivideByZero 7194 .endif 7195 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7196 7197 @ optional op; may set condition codes 7198 bl __aeabi_idivmod @ r1<- op, r0-r3 changed 7199 GET_INST_OPCODE(ip) @ extract opcode from rINST 7200 SET_VREG(r1, r9) @ vAA<- r1 7201 GOTO_OPCODE(ip) @ jump to next instruction 7202 /* 10-12 instructions */ 7203 7204 7205 /* ------------------------------ */ 7206 .balign 64 7207 .L_OP_AND_INT_LIT8: /* 0xdd */ 7208 /* File: armv5te/OP_AND_INT_LIT8.S */ 7209 /* File: armv5te/binopLit8.S */ 7210 /* 7211 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7212 * that specifies an instruction that performs "result = r0 op r1". 7213 * This could be an ARM instruction or a function call. (If the result 7214 * comes back in a register other than r0, you can override "result".) 7215 * 7216 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7217 * vCC (r1). Useful for integer division and modulus. 7218 * 7219 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7220 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7221 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7222 */ 7223 /* binop/lit8 vAA, vBB, #+CC */ 7224 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7225 mov r9, rINST, lsr #8 @ r9<- AA 7226 and r2, r3, #255 @ r2<- BB 7227 GET_VREG(r0, r2) @ r0<- vBB 7228 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7229 .if 0 7230 @cmp r1, #0 @ is second operand zero? 7231 beq common_errDivideByZero 7232 .endif 7233 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7234 7235 @ optional op; may set condition codes 7236 and r0, r0, r1 @ r0<- op, r0-r3 changed 7237 GET_INST_OPCODE(ip) @ extract opcode from rINST 7238 SET_VREG(r0, r9) @ vAA<- r0 7239 GOTO_OPCODE(ip) @ jump to next instruction 7240 /* 10-12 instructions */ 7241 7242 7243 /* ------------------------------ */ 7244 .balign 64 7245 .L_OP_OR_INT_LIT8: /* 0xde */ 7246 /* File: armv5te/OP_OR_INT_LIT8.S */ 7247 /* File: armv5te/binopLit8.S */ 7248 /* 7249 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7250 * that specifies an instruction that performs "result = r0 op r1". 7251 * This could be an ARM instruction or a function call. (If the result 7252 * comes back in a register other than r0, you can override "result".) 7253 * 7254 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7255 * vCC (r1). Useful for integer division and modulus. 7256 * 7257 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7258 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7259 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7260 */ 7261 /* binop/lit8 vAA, vBB, #+CC */ 7262 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7263 mov r9, rINST, lsr #8 @ r9<- AA 7264 and r2, r3, #255 @ r2<- BB 7265 GET_VREG(r0, r2) @ r0<- vBB 7266 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7267 .if 0 7268 @cmp r1, #0 @ is second operand zero? 7269 beq common_errDivideByZero 7270 .endif 7271 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7272 7273 @ optional op; may set condition codes 7274 orr r0, r0, r1 @ r0<- op, r0-r3 changed 7275 GET_INST_OPCODE(ip) @ extract opcode from rINST 7276 SET_VREG(r0, r9) @ vAA<- r0 7277 GOTO_OPCODE(ip) @ jump to next instruction 7278 /* 10-12 instructions */ 7279 7280 7281 /* ------------------------------ */ 7282 .balign 64 7283 .L_OP_XOR_INT_LIT8: /* 0xdf */ 7284 /* File: armv5te/OP_XOR_INT_LIT8.S */ 7285 /* File: armv5te/binopLit8.S */ 7286 /* 7287 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7288 * that specifies an instruction that performs "result = r0 op r1". 7289 * This could be an ARM instruction or a function call. (If the result 7290 * comes back in a register other than r0, you can override "result".) 7291 * 7292 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7293 * vCC (r1). Useful for integer division and modulus. 7294 * 7295 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7296 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7297 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7298 */ 7299 /* binop/lit8 vAA, vBB, #+CC */ 7300 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7301 mov r9, rINST, lsr #8 @ r9<- AA 7302 and r2, r3, #255 @ r2<- BB 7303 GET_VREG(r0, r2) @ r0<- vBB 7304 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7305 .if 0 7306 @cmp r1, #0 @ is second operand zero? 7307 beq common_errDivideByZero 7308 .endif 7309 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7310 7311 @ optional op; may set condition codes 7312 eor r0, r0, r1 @ r0<- op, r0-r3 changed 7313 GET_INST_OPCODE(ip) @ extract opcode from rINST 7314 SET_VREG(r0, r9) @ vAA<- r0 7315 GOTO_OPCODE(ip) @ jump to next instruction 7316 /* 10-12 instructions */ 7317 7318 7319 /* ------------------------------ */ 7320 .balign 64 7321 .L_OP_SHL_INT_LIT8: /* 0xe0 */ 7322 /* File: armv5te/OP_SHL_INT_LIT8.S */ 7323 /* File: armv5te/binopLit8.S */ 7324 /* 7325 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7326 * that specifies an instruction that performs "result = r0 op r1". 7327 * This could be an ARM instruction or a function call. (If the result 7328 * comes back in a register other than r0, you can override "result".) 7329 * 7330 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7331 * vCC (r1). Useful for integer division and modulus. 7332 * 7333 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7334 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7335 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7336 */ 7337 /* binop/lit8 vAA, vBB, #+CC */ 7338 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7339 mov r9, rINST, lsr #8 @ r9<- AA 7340 and r2, r3, #255 @ r2<- BB 7341 GET_VREG(r0, r2) @ r0<- vBB 7342 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7343 .if 0 7344 @cmp r1, #0 @ is second operand zero? 7345 beq common_errDivideByZero 7346 .endif 7347 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7348 7349 and r1, r1, #31 @ optional op; may set condition codes 7350 mov r0, r0, asl r1 @ r0<- op, r0-r3 changed 7351 GET_INST_OPCODE(ip) @ extract opcode from rINST 7352 SET_VREG(r0, r9) @ vAA<- r0 7353 GOTO_OPCODE(ip) @ jump to next instruction 7354 /* 10-12 instructions */ 7355 7356 7357 /* ------------------------------ */ 7358 .balign 64 7359 .L_OP_SHR_INT_LIT8: /* 0xe1 */ 7360 /* File: armv5te/OP_SHR_INT_LIT8.S */ 7361 /* File: armv5te/binopLit8.S */ 7362 /* 7363 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7364 * that specifies an instruction that performs "result = r0 op r1". 7365 * This could be an ARM instruction or a function call. (If the result 7366 * comes back in a register other than r0, you can override "result".) 7367 * 7368 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7369 * vCC (r1). Useful for integer division and modulus. 7370 * 7371 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7372 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7373 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7374 */ 7375 /* binop/lit8 vAA, vBB, #+CC */ 7376 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7377 mov r9, rINST, lsr #8 @ r9<- AA 7378 and r2, r3, #255 @ r2<- BB 7379 GET_VREG(r0, r2) @ r0<- vBB 7380 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7381 .if 0 7382 @cmp r1, #0 @ is second operand zero? 7383 beq common_errDivideByZero 7384 .endif 7385 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7386 7387 and r1, r1, #31 @ optional op; may set condition codes 7388 mov r0, r0, asr r1 @ r0<- op, r0-r3 changed 7389 GET_INST_OPCODE(ip) @ extract opcode from rINST 7390 SET_VREG(r0, r9) @ vAA<- r0 7391 GOTO_OPCODE(ip) @ jump to next instruction 7392 /* 10-12 instructions */ 7393 7394 7395 /* ------------------------------ */ 7396 .balign 64 7397 .L_OP_USHR_INT_LIT8: /* 0xe2 */ 7398 /* File: armv5te/OP_USHR_INT_LIT8.S */ 7399 /* File: armv5te/binopLit8.S */ 7400 /* 7401 * Generic 32-bit "lit8" binary operation. Provide an "instr" line 7402 * that specifies an instruction that performs "result = r0 op r1". 7403 * This could be an ARM instruction or a function call. (If the result 7404 * comes back in a register other than r0, you can override "result".) 7405 * 7406 * If "chkzero" is set to 1, we perform a divide-by-zero check on 7407 * vCC (r1). Useful for integer division and modulus. 7408 * 7409 * For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8, 7410 * rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8, 7411 * shl-int/lit8, shr-int/lit8, ushr-int/lit8 7412 */ 7413 /* binop/lit8 vAA, vBB, #+CC */ 7414 FETCH_S(r3, 1) @ r3<- ssssCCBB (sign-extended for CC) 7415 mov r9, rINST, lsr #8 @ r9<- AA 7416 and r2, r3, #255 @ r2<- BB 7417 GET_VREG(r0, r2) @ r0<- vBB 7418 movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended) 7419 .if 0 7420 @cmp r1, #0 @ is second operand zero? 7421 beq common_errDivideByZero 7422 .endif 7423 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7424 7425 and r1, r1, #31 @ optional op; may set condition codes 7426 mov r0, r0, lsr r1 @ r0<- op, r0-r3 changed 7427 GET_INST_OPCODE(ip) @ extract opcode from rINST 7428 SET_VREG(r0, r9) @ vAA<- r0 7429 GOTO_OPCODE(ip) @ jump to next instruction 7430 /* 10-12 instructions */ 7431 7432 7433 /* ------------------------------ */ 7434 .balign 64 7435 .L_OP_IGET_VOLATILE: /* 0xe3 */ 7436 /* File: armv5te/OP_IGET_VOLATILE.S */ 7437 /* File: armv5te/OP_IGET.S */ 7438 /* 7439 * General 32-bit instance field get. 7440 * 7441 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7442 */ 7443 /* op vA, vB, field@CCCC */ 7444 mov r0, rINST, lsr #12 @ r0<- B 7445 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7446 FETCH(r1, 1) @ r1<- field ref CCCC 7447 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7448 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7449 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7450 cmp r0, #0 @ is resolved entry null? 7451 bne .LOP_IGET_VOLATILE_finish @ no, already resolved 7452 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7453 EXPORT_PC() @ resolve() could throw 7454 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7455 bl dvmResolveInstField @ r0<- resolved InstField ptr 7456 cmp r0, #0 7457 bne .LOP_IGET_VOLATILE_finish 7458 b common_exceptionThrown 7459 7460 7461 /* ------------------------------ */ 7462 .balign 64 7463 .L_OP_IPUT_VOLATILE: /* 0xe4 */ 7464 /* File: armv5te/OP_IPUT_VOLATILE.S */ 7465 /* File: armv5te/OP_IPUT.S */ 7466 /* 7467 * General 32-bit instance field put. 7468 * 7469 * for: iput, iput-boolean, iput-byte, iput-char, iput-short 7470 */ 7471 /* op vA, vB, field@CCCC */ 7472 mov r0, rINST, lsr #12 @ r0<- B 7473 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7474 FETCH(r1, 1) @ r1<- field ref CCCC 7475 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7476 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7477 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7478 cmp r0, #0 @ is resolved entry null? 7479 bne .LOP_IPUT_VOLATILE_finish @ no, already resolved 7480 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7481 EXPORT_PC() @ resolve() could throw 7482 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7483 bl dvmResolveInstField @ r0<- resolved InstField ptr 7484 cmp r0, #0 @ success? 7485 bne .LOP_IPUT_VOLATILE_finish @ yes, finish up 7486 b common_exceptionThrown 7487 7488 7489 /* ------------------------------ */ 7490 .balign 64 7491 .L_OP_SGET_VOLATILE: /* 0xe5 */ 7492 /* File: armv5te/OP_SGET_VOLATILE.S */ 7493 /* File: armv5te/OP_SGET.S */ 7494 /* 7495 * General 32-bit SGET handler. 7496 * 7497 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 7498 */ 7499 /* op vAA, field@BBBB */ 7500 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7501 FETCH(r1, 1) @ r1<- field ref BBBB 7502 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7503 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7504 cmp r0, #0 @ is resolved entry null? 7505 beq .LOP_SGET_VOLATILE_resolve @ yes, do resolve 7506 .LOP_SGET_VOLATILE_finish: @ field ptr in r0 7507 ldr r1, [r0, #offStaticField_value] @ r1<- field value 7508 SMP_DMB @ acquiring load 7509 mov r2, rINST, lsr #8 @ r2<- AA 7510 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7511 SET_VREG(r1, r2) @ fp[AA]<- r1 7512 GET_INST_OPCODE(ip) @ extract opcode from rINST 7513 GOTO_OPCODE(ip) @ jump to next instruction 7514 7515 7516 /* ------------------------------ */ 7517 .balign 64 7518 .L_OP_SPUT_VOLATILE: /* 0xe6 */ 7519 /* File: armv5te/OP_SPUT_VOLATILE.S */ 7520 /* File: armv5te/OP_SPUT.S */ 7521 /* 7522 * General 32-bit SPUT handler. 7523 * 7524 * for: sput, sput-boolean, sput-byte, sput-char, sput-short 7525 */ 7526 /* op vAA, field@BBBB */ 7527 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7528 FETCH(r1, 1) @ r1<- field ref BBBB 7529 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7530 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7531 cmp r0, #0 @ is resolved entry null? 7532 beq .LOP_SPUT_VOLATILE_resolve @ yes, do resolve 7533 .LOP_SPUT_VOLATILE_finish: @ field ptr in r0 7534 mov r2, rINST, lsr #8 @ r2<- AA 7535 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7536 GET_VREG(r1, r2) @ r1<- fp[AA] 7537 GET_INST_OPCODE(ip) @ extract opcode from rINST 7538 SMP_DMB @ releasing store 7539 str r1, [r0, #offStaticField_value] @ field<- vAA 7540 GOTO_OPCODE(ip) @ jump to next instruction 7541 7542 7543 /* ------------------------------ */ 7544 .balign 64 7545 .L_OP_IGET_OBJECT_VOLATILE: /* 0xe7 */ 7546 /* File: armv5te/OP_IGET_OBJECT_VOLATILE.S */ 7547 /* File: armv5te/OP_IGET.S */ 7548 /* 7549 * General 32-bit instance field get. 7550 * 7551 * for: iget, iget-object, iget-boolean, iget-byte, iget-char, iget-short 7552 */ 7553 /* op vA, vB, field@CCCC */ 7554 mov r0, rINST, lsr #12 @ r0<- B 7555 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7556 FETCH(r1, 1) @ r1<- field ref CCCC 7557 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 7558 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7559 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7560 cmp r0, #0 @ is resolved entry null? 7561 bne .LOP_IGET_OBJECT_VOLATILE_finish @ no, already resolved 7562 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7563 EXPORT_PC() @ resolve() could throw 7564 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7565 bl dvmResolveInstField @ r0<- resolved InstField ptr 7566 cmp r0, #0 7567 bne .LOP_IGET_OBJECT_VOLATILE_finish 7568 b common_exceptionThrown 7569 7570 7571 /* ------------------------------ */ 7572 .balign 64 7573 .L_OP_IGET_WIDE_VOLATILE: /* 0xe8 */ 7574 /* File: armv5te/OP_IGET_WIDE_VOLATILE.S */ 7575 /* File: armv5te/OP_IGET_WIDE.S */ 7576 /* 7577 * Wide 32-bit instance field get. 7578 */ 7579 /* iget-wide vA, vB, field@CCCC */ 7580 mov r0, rINST, lsr #12 @ r0<- B 7581 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7582 FETCH(r1, 1) @ r1<- field ref CCCC 7583 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7584 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7585 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7586 cmp r0, #0 @ is resolved entry null? 7587 bne .LOP_IGET_WIDE_VOLATILE_finish @ no, already resolved 7588 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7589 EXPORT_PC() @ resolve() could throw 7590 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7591 bl dvmResolveInstField @ r0<- resolved InstField ptr 7592 cmp r0, #0 7593 bne .LOP_IGET_WIDE_VOLATILE_finish 7594 b common_exceptionThrown 7595 7596 7597 /* ------------------------------ */ 7598 .balign 64 7599 .L_OP_IPUT_WIDE_VOLATILE: /* 0xe9 */ 7600 /* File: armv5te/OP_IPUT_WIDE_VOLATILE.S */ 7601 /* File: armv5te/OP_IPUT_WIDE.S */ 7602 /* iput-wide vA, vB, field@CCCC */ 7603 mov r0, rINST, lsr #12 @ r0<- B 7604 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 7605 FETCH(r1, 1) @ r1<- field ref CCCC 7606 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pResFields 7607 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 7608 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 7609 cmp r0, #0 @ is resolved entry null? 7610 bne .LOP_IPUT_WIDE_VOLATILE_finish @ no, already resolved 7611 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7612 EXPORT_PC() @ resolve() could throw 7613 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 7614 bl dvmResolveInstField @ r0<- resolved InstField ptr 7615 cmp r0, #0 @ success? 7616 bne .LOP_IPUT_WIDE_VOLATILE_finish @ yes, finish up 7617 b common_exceptionThrown 7618 7619 7620 /* ------------------------------ */ 7621 .balign 64 7622 .L_OP_SGET_WIDE_VOLATILE: /* 0xea */ 7623 /* File: armv5te/OP_SGET_WIDE_VOLATILE.S */ 7624 /* File: armv5te/OP_SGET_WIDE.S */ 7625 /* 7626 * 64-bit SGET handler. 7627 */ 7628 /* sget-wide vAA, field@BBBB */ 7629 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 7630 FETCH(r1, 1) @ r1<- field ref BBBB 7631 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 7632 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 7633 cmp r0, #0 @ is resolved entry null? 7634 beq .LOP_SGET_WIDE_VOLATILE_resolve @ yes, do resolve 7635 .LOP_SGET_WIDE_VOLATILE_finish: 7636 mov r9, rINST, lsr #8 @ r9<- AA 7637 .if 1 7638 add r0, r0, #offStaticField_value @ r0<- pointer to data 7639 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 7640 .else 7641 ldrd r0, [r0, #offStaticField_value] @ r0/r1<- field value (aligned) 7642 .endif 7643 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7644 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7645 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 7646 GET_INST_OPCODE(ip) @ extract opcode from rINST 7647 GOTO_OPCODE(ip) @ jump to next instruction 7648 7649 7650 /* ------------------------------ */ 7651 .balign 64 7652 .L_OP_SPUT_WIDE_VOLATILE: /* 0xeb */ 7653 /* File: armv5te/OP_SPUT_WIDE_VOLATILE.S */ 7654 /* File: armv5te/OP_SPUT_WIDE.S */ 7655 /* 7656 * 64-bit SPUT handler. 7657 */ 7658 /* sput-wide vAA, field@BBBB */ 7659 ldr r0, [rGLUE, #offGlue_methodClassDex] @ r0<- DvmDex 7660 FETCH(r1, 1) @ r1<- field ref BBBB 7661 ldr r0, [r0, #offDvmDex_pResFields] @ r0<- dvmDex->pResFields 7662 mov r9, rINST, lsr #8 @ r9<- AA 7663 ldr r2, [r0, r1, lsl #2] @ r2<- resolved StaticField ptr 7664 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 7665 cmp r2, #0 @ is resolved entry null? 7666 beq .LOP_SPUT_WIDE_VOLATILE_resolve @ yes, do resolve 7667 .LOP_SPUT_WIDE_VOLATILE_finish: @ field ptr in r2, AA in r9 7668 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7669 ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1 7670 GET_INST_OPCODE(r10) @ extract opcode from rINST 7671 .if 1 7672 add r2, r2, #offStaticField_value @ r2<- pointer to data 7673 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 7674 .else 7675 strd r0, [r2, #offStaticField_value] @ field<- vAA/vAA+1 7676 .endif 7677 GOTO_OPCODE(r10) @ jump to next instruction 7678 7679 7680 /* ------------------------------ */ 7681 .balign 64 7682 .L_OP_BREAKPOINT: /* 0xec */ 7683 /* File: armv5te/OP_BREAKPOINT.S */ 7684 /* File: armv5te/unused.S */ 7685 bl common_abort 7686 7687 7688 /* ------------------------------ */ 7689 .balign 64 7690 .L_OP_THROW_VERIFICATION_ERROR: /* 0xed */ 7691 /* File: armv5te/OP_THROW_VERIFICATION_ERROR.S */ 7692 /* 7693 * Handle a throw-verification-error instruction. This throws an 7694 * exception for an error discovered during verification. The 7695 * exception is indicated by AA, with some detail provided by BBBB. 7696 */ 7697 /* op AA, ref@BBBB */ 7698 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 7699 FETCH(r2, 1) @ r2<- BBBB 7700 EXPORT_PC() @ export the PC 7701 mov r1, rINST, lsr #8 @ r1<- AA 7702 bl dvmThrowVerificationError @ always throws 7703 b common_exceptionThrown @ handle exception 7704 7705 /* ------------------------------ */ 7706 .balign 64 7707 .L_OP_EXECUTE_INLINE: /* 0xee */ 7708 /* File: armv5te/OP_EXECUTE_INLINE.S */ 7709 /* 7710 * Execute a "native inline" instruction. 7711 * 7712 * We need to call an InlineOp4Func: 7713 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7714 * 7715 * The first four args are in r0-r3, pointer to return value storage 7716 * is on the stack. The function's return value is a flag that tells 7717 * us if an exception was thrown. 7718 */ 7719 /* [opt] execute-inline vAA, {vC, vD, vE, vF}, inline@BBBB */ 7720 FETCH(r10, 1) @ r10<- BBBB 7721 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7722 EXPORT_PC() @ can throw 7723 sub sp, sp, #8 @ make room for arg, +64 bit align 7724 mov r0, rINST, lsr #12 @ r0<- B 7725 str r1, [sp] @ push &glue->retval 7726 bl .LOP_EXECUTE_INLINE_continue @ make call; will return after 7727 add sp, sp, #8 @ pop stack 7728 cmp r0, #0 @ test boolean result of inline 7729 beq common_exceptionThrown @ returned false, handle exception 7730 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7731 GET_INST_OPCODE(ip) @ extract opcode from rINST 7732 GOTO_OPCODE(ip) @ jump to next instruction 7733 7734 /* ------------------------------ */ 7735 .balign 64 7736 .L_OP_EXECUTE_INLINE_RANGE: /* 0xef */ 7737 /* File: armv5te/OP_EXECUTE_INLINE_RANGE.S */ 7738 /* 7739 * Execute a "native inline" instruction, using "/range" semantics. 7740 * Same idea as execute-inline, but we get the args differently. 7741 * 7742 * We need to call an InlineOp4Func: 7743 * bool (func)(u4 arg0, u4 arg1, u4 arg2, u4 arg3, JValue* pResult) 7744 * 7745 * The first four args are in r0-r3, pointer to return value storage 7746 * is on the stack. The function's return value is a flag that tells 7747 * us if an exception was thrown. 7748 */ 7749 /* [opt] execute-inline/range {vCCCC..v(CCCC+AA-1)}, inline@BBBB */ 7750 FETCH(r10, 1) @ r10<- BBBB 7751 add r1, rGLUE, #offGlue_retval @ r1<- &glue->retval 7752 EXPORT_PC() @ can throw 7753 sub sp, sp, #8 @ make room for arg, +64 bit align 7754 mov r0, rINST, lsr #8 @ r0<- AA 7755 str r1, [sp] @ push &glue->retval 7756 bl .LOP_EXECUTE_INLINE_RANGE_continue @ make call; will return after 7757 add sp, sp, #8 @ pop stack 7758 cmp r0, #0 @ test boolean result of inline 7759 beq common_exceptionThrown @ returned false, handle exception 7760 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 7761 GET_INST_OPCODE(ip) @ extract opcode from rINST 7762 GOTO_OPCODE(ip) @ jump to next instruction 7763 7764 /* ------------------------------ */ 7765 .balign 64 7766 .L_OP_INVOKE_DIRECT_EMPTY: /* 0xf0 */ 7767 /* File: armv5te/OP_INVOKE_DIRECT_EMPTY.S */ 7768 /* 7769 * invoke-direct-empty is a no-op in a "standard" interpreter. 7770 */ 7771 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 7772 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 7773 GOTO_OPCODE(ip) @ execute it 7774 7775 /* ------------------------------ */ 7776 .balign 64 7777 .L_OP_UNUSED_F1: /* 0xf1 */ 7778 /* File: armv5te/OP_UNUSED_F1.S */ 7779 /* File: armv5te/unused.S */ 7780 bl common_abort 7781 7782 7783 /* ------------------------------ */ 7784 .balign 64 7785 .L_OP_IGET_QUICK: /* 0xf2 */ 7786 /* File: armv5te/OP_IGET_QUICK.S */ 7787 /* For: iget-quick, iget-object-quick */ 7788 /* op vA, vB, offset@CCCC */ 7789 mov r2, rINST, lsr #12 @ r2<- B 7790 GET_VREG(r3, r2) @ r3<- object we're operating on 7791 FETCH(r1, 1) @ r1<- field byte offset 7792 cmp r3, #0 @ check object for null 7793 mov r2, rINST, lsr #8 @ r2<- A(+) 7794 beq common_errNullObject @ object was null 7795 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7796 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7797 and r2, r2, #15 7798 GET_INST_OPCODE(ip) @ extract opcode from rINST 7799 SET_VREG(r0, r2) @ fp[A]<- r0 7800 GOTO_OPCODE(ip) @ jump to next instruction 7801 7802 /* ------------------------------ */ 7803 .balign 64 7804 .L_OP_IGET_WIDE_QUICK: /* 0xf3 */ 7805 /* File: armv5te/OP_IGET_WIDE_QUICK.S */ 7806 /* iget-wide-quick vA, vB, offset@CCCC */ 7807 mov r2, rINST, lsr #12 @ r2<- B 7808 GET_VREG(r3, r2) @ r3<- object we're operating on 7809 FETCH(ip, 1) @ ip<- field byte offset 7810 cmp r3, #0 @ check object for null 7811 mov r2, rINST, lsr #8 @ r2<- A(+) 7812 beq common_errNullObject @ object was null 7813 ldrd r0, [r3, ip] @ r0<- obj.field (64 bits, aligned) 7814 and r2, r2, #15 7815 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7816 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 7817 GET_INST_OPCODE(ip) @ extract opcode from rINST 7818 stmia r3, {r0-r1} @ fp[A]<- r0/r1 7819 GOTO_OPCODE(ip) @ jump to next instruction 7820 7821 /* ------------------------------ */ 7822 .balign 64 7823 .L_OP_IGET_OBJECT_QUICK: /* 0xf4 */ 7824 /* File: armv5te/OP_IGET_OBJECT_QUICK.S */ 7825 /* File: armv5te/OP_IGET_QUICK.S */ 7826 /* For: iget-quick, iget-object-quick */ 7827 /* op vA, vB, offset@CCCC */ 7828 mov r2, rINST, lsr #12 @ r2<- B 7829 GET_VREG(r3, r2) @ r3<- object we're operating on 7830 FETCH(r1, 1) @ r1<- field byte offset 7831 cmp r3, #0 @ check object for null 7832 mov r2, rINST, lsr #8 @ r2<- A(+) 7833 beq common_errNullObject @ object was null 7834 ldr r0, [r3, r1] @ r0<- obj.field (always 32 bits) 7835 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7836 and r2, r2, #15 7837 GET_INST_OPCODE(ip) @ extract opcode from rINST 7838 SET_VREG(r0, r2) @ fp[A]<- r0 7839 GOTO_OPCODE(ip) @ jump to next instruction 7840 7841 7842 /* ------------------------------ */ 7843 .balign 64 7844 .L_OP_IPUT_QUICK: /* 0xf5 */ 7845 /* File: armv5te/OP_IPUT_QUICK.S */ 7846 /* For: iput-quick */ 7847 /* op vA, vB, offset@CCCC */ 7848 mov r2, rINST, lsr #12 @ r2<- B 7849 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7850 FETCH(r1, 1) @ r1<- field byte offset 7851 cmp r3, #0 @ check object for null 7852 mov r2, rINST, lsr #8 @ r2<- A(+) 7853 beq common_errNullObject @ object was null 7854 and r2, r2, #15 7855 GET_VREG(r0, r2) @ r0<- fp[A] 7856 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7857 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7858 GET_INST_OPCODE(ip) @ extract opcode from rINST 7859 GOTO_OPCODE(ip) @ jump to next instruction 7860 7861 /* ------------------------------ */ 7862 .balign 64 7863 .L_OP_IPUT_WIDE_QUICK: /* 0xf6 */ 7864 /* File: armv5te/OP_IPUT_WIDE_QUICK.S */ 7865 /* iput-wide-quick vA, vB, offset@CCCC */ 7866 mov r0, rINST, lsr #8 @ r0<- A(+) 7867 mov r1, rINST, lsr #12 @ r1<- B 7868 and r0, r0, #15 7869 GET_VREG(r2, r1) @ r2<- fp[B], the object pointer 7870 add r3, rFP, r0, lsl #2 @ r3<- &fp[A] 7871 cmp r2, #0 @ check object for null 7872 ldmia r3, {r0-r1} @ r0/r1<- fp[A] 7873 beq common_errNullObject @ object was null 7874 FETCH(r3, 1) @ r3<- field byte offset 7875 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7876 strd r0, [r2, r3] @ obj.field (64 bits, aligned)<- r0/r1 7877 GET_INST_OPCODE(ip) @ extract opcode from rINST 7878 GOTO_OPCODE(ip) @ jump to next instruction 7879 7880 /* ------------------------------ */ 7881 .balign 64 7882 .L_OP_IPUT_OBJECT_QUICK: /* 0xf7 */ 7883 /* File: armv5te/OP_IPUT_OBJECT_QUICK.S */ 7884 /* For: iput-object-quick */ 7885 /* op vA, vB, offset@CCCC */ 7886 mov r2, rINST, lsr #12 @ r2<- B 7887 GET_VREG(r3, r2) @ r3<- fp[B], the object pointer 7888 FETCH(r1, 1) @ r1<- field byte offset 7889 cmp r3, #0 @ check object for null 7890 mov r2, rINST, lsr #8 @ r2<- A(+) 7891 beq common_errNullObject @ object was null 7892 and r2, r2, #15 7893 GET_VREG(r0, r2) @ r0<- fp[A] 7894 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 7895 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 7896 str r0, [r3, r1] @ obj.field (always 32 bits)<- r0 7897 cmp r0, #0 7898 strneb r2, [r2, r3, lsr #GC_CARD_SHIFT] @ mark card based on obj head 7899 GET_INST_OPCODE(ip) @ extract opcode from rINST 7900 GOTO_OPCODE(ip) @ jump to next instruction 7901 7902 /* ------------------------------ */ 7903 .balign 64 7904 .L_OP_INVOKE_VIRTUAL_QUICK: /* 0xf8 */ 7905 /* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7906 /* 7907 * Handle an optimized virtual method call. 7908 * 7909 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7910 */ 7911 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7912 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7913 FETCH(r3, 2) @ r3<- FEDC or CCCC 7914 FETCH(r1, 1) @ r1<- BBBB 7915 .if (!0) 7916 and r3, r3, #15 @ r3<- C (or stays CCCC) 7917 .endif 7918 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7919 cmp r2, #0 @ is "this" null? 7920 beq common_errNullObject @ null "this", throw exception 7921 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7922 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7923 EXPORT_PC() @ invoke must export 7924 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7925 bl common_invokeMethodNoRange @ continue on 7926 7927 /* ------------------------------ */ 7928 .balign 64 7929 .L_OP_INVOKE_VIRTUAL_QUICK_RANGE: /* 0xf9 */ 7930 /* File: armv5te/OP_INVOKE_VIRTUAL_QUICK_RANGE.S */ 7931 /* File: armv5te/OP_INVOKE_VIRTUAL_QUICK.S */ 7932 /* 7933 * Handle an optimized virtual method call. 7934 * 7935 * for: [opt] invoke-virtual-quick, invoke-virtual-quick/range 7936 */ 7937 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7938 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7939 FETCH(r3, 2) @ r3<- FEDC or CCCC 7940 FETCH(r1, 1) @ r1<- BBBB 7941 .if (!1) 7942 and r3, r3, #15 @ r3<- C (or stays CCCC) 7943 .endif 7944 GET_VREG(r2, r3) @ r2<- vC ("this" ptr) 7945 cmp r2, #0 @ is "this" null? 7946 beq common_errNullObject @ null "this", throw exception 7947 ldr r2, [r2, #offObject_clazz] @ r2<- thisPtr->clazz 7948 ldr r2, [r2, #offClassObject_vtable] @ r2<- thisPtr->clazz->vtable 7949 EXPORT_PC() @ invoke must export 7950 ldr r0, [r2, r1, lsl #2] @ r3<- vtable[BBBB] 7951 bl common_invokeMethodRange @ continue on 7952 7953 7954 /* ------------------------------ */ 7955 .balign 64 7956 .L_OP_INVOKE_SUPER_QUICK: /* 0xfa */ 7957 /* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7958 /* 7959 * Handle an optimized "super" method call. 7960 * 7961 * for: [opt] invoke-super-quick, invoke-super-quick/range 7962 */ 7963 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7964 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7965 FETCH(r10, 2) @ r10<- GFED or CCCC 7966 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7967 .if (!0) 7968 and r10, r10, #15 @ r10<- D (or stays CCCC) 7969 .endif 7970 FETCH(r1, 1) @ r1<- BBBB 7971 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 7972 EXPORT_PC() @ must export for invoke 7973 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 7974 GET_VREG(r3, r10) @ r3<- "this" 7975 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 7976 cmp r3, #0 @ null "this" ref? 7977 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 7978 beq common_errNullObject @ "this" is null, throw exception 7979 bl common_invokeMethodNoRange @ continue on 7980 7981 /* ------------------------------ */ 7982 .balign 64 7983 .L_OP_INVOKE_SUPER_QUICK_RANGE: /* 0xfb */ 7984 /* File: armv5te/OP_INVOKE_SUPER_QUICK_RANGE.S */ 7985 /* File: armv5te/OP_INVOKE_SUPER_QUICK.S */ 7986 /* 7987 * Handle an optimized "super" method call. 7988 * 7989 * for: [opt] invoke-super-quick, invoke-super-quick/range 7990 */ 7991 /* op vB, {vD, vE, vF, vG, vA}, class@CCCC */ 7992 /* op vAA, {vCCCC..v(CCCC+AA-1)}, meth@BBBB */ 7993 FETCH(r10, 2) @ r10<- GFED or CCCC 7994 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 7995 .if (!1) 7996 and r10, r10, #15 @ r10<- D (or stays CCCC) 7997 .endif 7998 FETCH(r1, 1) @ r1<- BBBB 7999 ldr r2, [r2, #offMethod_clazz] @ r2<- method->clazz 8000 EXPORT_PC() @ must export for invoke 8001 ldr r2, [r2, #offClassObject_super] @ r2<- method->clazz->super 8002 GET_VREG(r3, r10) @ r3<- "this" 8003 ldr r2, [r2, #offClassObject_vtable] @ r2<- ...clazz->super->vtable 8004 cmp r3, #0 @ null "this" ref? 8005 ldr r0, [r2, r1, lsl #2] @ r0<- super->vtable[BBBB] 8006 beq common_errNullObject @ "this" is null, throw exception 8007 bl common_invokeMethodRange @ continue on 8008 8009 8010 /* ------------------------------ */ 8011 .balign 64 8012 .L_OP_IPUT_OBJECT_VOLATILE: /* 0xfc */ 8013 /* File: armv5te/OP_IPUT_OBJECT_VOLATILE.S */ 8014 /* File: armv5te/OP_IPUT_OBJECT.S */ 8015 /* 8016 * 32-bit instance field put. 8017 * 8018 * for: iput-object, iput-object-volatile 8019 */ 8020 /* op vA, vB, field@CCCC */ 8021 mov r0, rINST, lsr #12 @ r0<- B 8022 ldr r3, [rGLUE, #offGlue_methodClassDex] @ r3<- DvmDex 8023 FETCH(r1, 1) @ r1<- field ref CCCC 8024 ldr r2, [r3, #offDvmDex_pResFields] @ r2<- pDvmDex->pResFields 8025 GET_VREG(r9, r0) @ r9<- fp[B], the object pointer 8026 ldr r0, [r2, r1, lsl #2] @ r0<- resolved InstField ptr 8027 cmp r0, #0 @ is resolved entry null? 8028 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ no, already resolved 8029 8: ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 8030 EXPORT_PC() @ resolve() could throw 8031 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 8032 bl dvmResolveInstField @ r0<- resolved InstField ptr 8033 cmp r0, #0 @ success? 8034 bne .LOP_IPUT_OBJECT_VOLATILE_finish @ yes, finish up 8035 b common_exceptionThrown 8036 8037 8038 /* ------------------------------ */ 8039 .balign 64 8040 .L_OP_SGET_OBJECT_VOLATILE: /* 0xfd */ 8041 /* File: armv5te/OP_SGET_OBJECT_VOLATILE.S */ 8042 /* File: armv5te/OP_SGET.S */ 8043 /* 8044 * General 32-bit SGET handler. 8045 * 8046 * for: sget, sget-object, sget-boolean, sget-byte, sget-char, sget-short 8047 */ 8048 /* op vAA, field@BBBB */ 8049 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 8050 FETCH(r1, 1) @ r1<- field ref BBBB 8051 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 8052 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 8053 cmp r0, #0 @ is resolved entry null? 8054 beq .LOP_SGET_OBJECT_VOLATILE_resolve @ yes, do resolve 8055 .LOP_SGET_OBJECT_VOLATILE_finish: @ field ptr in r0 8056 ldr r1, [r0, #offStaticField_value] @ r1<- field value 8057 SMP_DMB @ acquiring load 8058 mov r2, rINST, lsr #8 @ r2<- AA 8059 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8060 SET_VREG(r1, r2) @ fp[AA]<- r1 8061 GET_INST_OPCODE(ip) @ extract opcode from rINST 8062 GOTO_OPCODE(ip) @ jump to next instruction 8063 8064 8065 /* ------------------------------ */ 8066 .balign 64 8067 .L_OP_SPUT_OBJECT_VOLATILE: /* 0xfe */ 8068 /* File: armv5te/OP_SPUT_OBJECT_VOLATILE.S */ 8069 /* File: armv5te/OP_SPUT_OBJECT.S */ 8070 /* 8071 * 32-bit SPUT handler for objects 8072 * 8073 * for: sput-object, sput-object-volatile 8074 */ 8075 /* op vAA, field@BBBB */ 8076 ldr r2, [rGLUE, #offGlue_methodClassDex] @ r2<- DvmDex 8077 FETCH(r1, 1) @ r1<- field ref BBBB 8078 ldr r2, [r2, #offDvmDex_pResFields] @ r2<- dvmDex->pResFields 8079 ldr r0, [r2, r1, lsl #2] @ r0<- resolved StaticField ptr 8080 cmp r0, #0 @ is resolved entry null? 8081 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ no, continue 8082 ldr r9, [rGLUE, #offGlue_method] @ r9<- current method 8083 EXPORT_PC() @ resolve() could throw, so export now 8084 ldr r0, [r9, #offMethod_clazz] @ r0<- method->clazz 8085 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 8086 cmp r0, #0 @ success? 8087 bne .LOP_SPUT_OBJECT_VOLATILE_finish @ yes, finish 8088 b common_exceptionThrown @ no, handle exception 8089 8090 8091 8092 /* ------------------------------ */ 8093 .balign 64 8094 .L_OP_UNUSED_FF: /* 0xff */ 8095 /* File: armv5te/OP_UNUSED_FF.S */ 8096 /* File: armv5te/unused.S */ 8097 bl common_abort 8098 8099 8100 8101 .balign 64 8102 .size dvmAsmInstructionStart, .-dvmAsmInstructionStart 8103 .global dvmAsmInstructionEnd 8104 dvmAsmInstructionEnd: 8105 8106 /* 8107 * =========================================================================== 8108 * Sister implementations 8109 * =========================================================================== 8110 */ 8111 .global dvmAsmSisterStart 8112 .type dvmAsmSisterStart, %function 8113 .text 8114 .balign 4 8115 dvmAsmSisterStart: 8116 8117 /* continuation for OP_CONST_STRING */ 8118 8119 /* 8120 * Continuation if the String has not yet been resolved. 8121 * r1: BBBB (String ref) 8122 * r9: target register 8123 */ 8124 .LOP_CONST_STRING_resolve: 8125 EXPORT_PC() 8126 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8127 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8128 bl dvmResolveString @ r0<- String reference 8129 cmp r0, #0 @ failed? 8130 beq common_exceptionThrown @ yup, handle the exception 8131 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8132 GET_INST_OPCODE(ip) @ extract opcode from rINST 8133 SET_VREG(r0, r9) @ vAA<- r0 8134 GOTO_OPCODE(ip) @ jump to next instruction 8135 8136 /* continuation for OP_CONST_STRING_JUMBO */ 8137 8138 /* 8139 * Continuation if the String has not yet been resolved. 8140 * r1: BBBBBBBB (String ref) 8141 * r9: target register 8142 */ 8143 .LOP_CONST_STRING_JUMBO_resolve: 8144 EXPORT_PC() 8145 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8146 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8147 bl dvmResolveString @ r0<- String reference 8148 cmp r0, #0 @ failed? 8149 beq common_exceptionThrown @ yup, handle the exception 8150 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 8151 GET_INST_OPCODE(ip) @ extract opcode from rINST 8152 SET_VREG(r0, r9) @ vAA<- r0 8153 GOTO_OPCODE(ip) @ jump to next instruction 8154 8155 /* continuation for OP_CONST_CLASS */ 8156 8157 /* 8158 * Continuation if the Class has not yet been resolved. 8159 * r1: BBBB (Class ref) 8160 * r9: target register 8161 */ 8162 .LOP_CONST_CLASS_resolve: 8163 EXPORT_PC() 8164 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8165 mov r2, #1 @ r2<- true 8166 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8167 bl dvmResolveClass @ r0<- Class reference 8168 cmp r0, #0 @ failed? 8169 beq common_exceptionThrown @ yup, handle the exception 8170 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8171 GET_INST_OPCODE(ip) @ extract opcode from rINST 8172 SET_VREG(r0, r9) @ vAA<- r0 8173 GOTO_OPCODE(ip) @ jump to next instruction 8174 8175 /* continuation for OP_CHECK_CAST */ 8176 8177 /* 8178 * Trivial test failed, need to perform full check. This is common. 8179 * r0 holds obj->clazz 8180 * r1 holds class resolved from BBBB 8181 * r9 holds object 8182 */ 8183 .LOP_CHECK_CAST_fullcheck: 8184 bl dvmInstanceofNonTrivial @ r0<- boolean result 8185 cmp r0, #0 @ failed? 8186 bne .LOP_CHECK_CAST_okay @ no, success 8187 8188 @ A cast has failed. We need to throw a ClassCastException with the 8189 @ class of the object that failed to be cast. 8190 EXPORT_PC() @ about to throw 8191 ldr r3, [r9, #offObject_clazz] @ r3<- obj->clazz 8192 ldr r0, .LstrClassCastExceptionPtr 8193 ldr r1, [r3, #offClassObject_descriptor] @ r1<- obj->clazz->descriptor 8194 bl dvmThrowExceptionWithClassMessage 8195 b common_exceptionThrown 8196 8197 /* 8198 * Resolution required. This is the least-likely path. 8199 * 8200 * r2 holds BBBB 8201 * r9 holds object 8202 */ 8203 .LOP_CHECK_CAST_resolve: 8204 EXPORT_PC() @ resolve() could throw 8205 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8206 mov r1, r2 @ r1<- BBBB 8207 mov r2, #0 @ r2<- false 8208 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8209 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8210 cmp r0, #0 @ got null? 8211 beq common_exceptionThrown @ yes, handle exception 8212 mov r1, r0 @ r1<- class resolved from BBB 8213 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8214 b .LOP_CHECK_CAST_resolved @ pick up where we left off 8215 8216 .LstrClassCastExceptionPtr: 8217 .word .LstrClassCastException 8218 8219 /* continuation for OP_INSTANCE_OF */ 8220 8221 /* 8222 * Trivial test failed, need to perform full check. This is common. 8223 * r0 holds obj->clazz 8224 * r1 holds class resolved from BBBB 8225 * r9 holds A 8226 */ 8227 .LOP_INSTANCE_OF_fullcheck: 8228 bl dvmInstanceofNonTrivial @ r0<- boolean result 8229 @ fall through to OP_INSTANCE_OF_store 8230 8231 /* 8232 * r0 holds boolean result 8233 * r9 holds A 8234 */ 8235 .LOP_INSTANCE_OF_store: 8236 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8237 SET_VREG(r0, r9) @ vA<- r0 8238 GET_INST_OPCODE(ip) @ extract opcode from rINST 8239 GOTO_OPCODE(ip) @ jump to next instruction 8240 8241 /* 8242 * Trivial test succeeded, save and bail. 8243 * r9 holds A 8244 */ 8245 .LOP_INSTANCE_OF_trivial: 8246 mov r0, #1 @ indicate success 8247 @ could b OP_INSTANCE_OF_store, but copying is faster and cheaper 8248 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8249 SET_VREG(r0, r9) @ vA<- r0 8250 GET_INST_OPCODE(ip) @ extract opcode from rINST 8251 GOTO_OPCODE(ip) @ jump to next instruction 8252 8253 /* 8254 * Resolution required. This is the least-likely path. 8255 * 8256 * r3 holds BBBB 8257 * r9 holds A 8258 */ 8259 .LOP_INSTANCE_OF_resolve: 8260 EXPORT_PC() @ resolve() could throw 8261 ldr r0, [rGLUE, #offGlue_method] @ r0<- glue->method 8262 mov r1, r3 @ r1<- BBBB 8263 mov r2, #1 @ r2<- true 8264 ldr r0, [r0, #offMethod_clazz] @ r0<- method->clazz 8265 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8266 cmp r0, #0 @ got null? 8267 beq common_exceptionThrown @ yes, handle exception 8268 mov r1, r0 @ r1<- class resolved from BBB 8269 mov r3, rINST, lsr #12 @ r3<- B 8270 GET_VREG(r0, r3) @ r0<- vB (object) 8271 ldr r0, [r0, #offObject_clazz] @ r0<- obj->clazz 8272 b .LOP_INSTANCE_OF_resolved @ pick up where we left off 8273 8274 /* continuation for OP_NEW_INSTANCE */ 8275 8276 .balign 32 @ minimize cache lines 8277 .LOP_NEW_INSTANCE_finish: @ r0=new object 8278 mov r3, rINST, lsr #8 @ r3<- AA 8279 cmp r0, #0 @ failed? 8280 beq common_exceptionThrown @ yes, handle the exception 8281 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8282 GET_INST_OPCODE(ip) @ extract opcode from rINST 8283 SET_VREG(r0, r3) @ vAA<- r0 8284 GOTO_OPCODE(ip) @ jump to next instruction 8285 8286 /* 8287 * Class initialization required. 8288 * 8289 * r0 holds class object 8290 */ 8291 .LOP_NEW_INSTANCE_needinit: 8292 mov r9, r0 @ save r0 8293 bl dvmInitClass @ initialize class 8294 cmp r0, #0 @ check boolean result 8295 mov r0, r9 @ restore r0 8296 bne .LOP_NEW_INSTANCE_initialized @ success, continue 8297 b common_exceptionThrown @ failed, deal with init exception 8298 8299 /* 8300 * Resolution required. This is the least-likely path. 8301 * 8302 * r1 holds BBBB 8303 */ 8304 .LOP_NEW_INSTANCE_resolve: 8305 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8306 mov r2, #0 @ r2<- false 8307 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8308 bl dvmResolveClass @ r0<- resolved ClassObject ptr 8309 cmp r0, #0 @ got null? 8310 bne .LOP_NEW_INSTANCE_resolved @ no, continue 8311 b common_exceptionThrown @ yes, handle exception 8312 8313 .LstrInstantiationErrorPtr: 8314 .word .LstrInstantiationError 8315 8316 /* continuation for OP_NEW_ARRAY */ 8317 8318 8319 /* 8320 * Resolve class. (This is an uncommon case.) 8321 * 8322 * r1 holds array length 8323 * r2 holds class ref CCCC 8324 */ 8325 .LOP_NEW_ARRAY_resolve: 8326 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 8327 mov r9, r1 @ r9<- length (save) 8328 mov r1, r2 @ r1<- CCCC 8329 mov r2, #0 @ r2<- false 8330 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 8331 bl dvmResolveClass @ r0<- call(clazz, ref) 8332 cmp r0, #0 @ got null? 8333 mov r1, r9 @ r1<- length (restore) 8334 beq common_exceptionThrown @ yes, handle exception 8335 @ fall through to OP_NEW_ARRAY_finish 8336 8337 /* 8338 * Finish allocation. 8339 * 8340 * r0 holds class 8341 * r1 holds array length 8342 */ 8343 .LOP_NEW_ARRAY_finish: 8344 mov r2, #ALLOC_DONT_TRACK @ don't track in local refs table 8345 bl dvmAllocArrayByClass @ r0<- call(clazz, length, flags) 8346 cmp r0, #0 @ failed? 8347 mov r2, rINST, lsr #8 @ r2<- A+ 8348 beq common_exceptionThrown @ yes, handle the exception 8349 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8350 and r2, r2, #15 @ r2<- A 8351 GET_INST_OPCODE(ip) @ extract opcode from rINST 8352 SET_VREG(r0, r2) @ vA<- r0 8353 GOTO_OPCODE(ip) @ jump to next instruction 8354 8355 /* continuation for OP_FILLED_NEW_ARRAY */ 8356 8357 /* 8358 * On entry: 8359 * r0 holds array class 8360 * r10 holds AA or BA 8361 */ 8362 .LOP_FILLED_NEW_ARRAY_continue: 8363 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8364 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8365 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 8366 .if 0 8367 mov r1, r10 @ r1<- AA (length) 8368 .else 8369 mov r1, r10, lsr #4 @ r1<- B (length) 8370 .endif 8371 cmp rINST, #'I' @ array of ints? 8372 cmpne rINST, #'L' @ array of objects? 8373 cmpne rINST, #'[' @ array of arrays? 8374 mov r9, r1 @ save length in r9 8375 bne .LOP_FILLED_NEW_ARRAY_notimpl @ no, not handled yet 8376 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8377 cmp r0, #0 @ null return? 8378 beq common_exceptionThrown @ alloc failed, handle exception 8379 8380 FETCH(r1, 2) @ r1<- FEDC or CCCC 8381 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8382 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 8383 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8384 subs r9, r9, #1 @ length--, check for neg 8385 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8386 bmi 2f @ was zero, bail 8387 8388 @ copy values from registers into the array 8389 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8390 .if 0 8391 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 8392 1: ldr r3, [r2], #4 @ r3<- *r2++ 8393 subs r9, r9, #1 @ count-- 8394 str r3, [r0], #4 @ *contents++ = vX 8395 bpl 1b 8396 @ continue at 2 8397 .else 8398 cmp r9, #4 @ length was initially 5? 8399 and r2, r10, #15 @ r2<- A 8400 bne 1f @ <= 4 args, branch 8401 GET_VREG(r3, r2) @ r3<- vA 8402 sub r9, r9, #1 @ count-- 8403 str r3, [r0, #16] @ contents[4] = vA 8404 1: and r2, r1, #15 @ r2<- F/E/D/C 8405 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8406 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8407 subs r9, r9, #1 @ count-- 8408 str r3, [r0], #4 @ *contents++ = vX 8409 bpl 1b 8410 @ continue at 2 8411 .endif 8412 8413 2: 8414 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8415 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8416 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8417 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8418 cmp r1, #'I' @ Is int array? 8419 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head 8420 GOTO_OPCODE(ip) @ execute it 8421 8422 /* 8423 * Throw an exception indicating that we have not implemented this 8424 * mode of filled-new-array. 8425 */ 8426 .LOP_FILLED_NEW_ARRAY_notimpl: 8427 ldr r0, .L_strInternalError 8428 ldr r1, .L_strFilledNewArrayNotImpl 8429 bl dvmThrowException 8430 b common_exceptionThrown 8431 8432 .if (!0) @ define in one or the other, not both 8433 .L_strFilledNewArrayNotImpl: 8434 .word .LstrFilledNewArrayNotImpl 8435 .L_strInternalError: 8436 .word .LstrInternalError 8437 .endif 8438 8439 /* continuation for OP_FILLED_NEW_ARRAY_RANGE */ 8440 8441 /* 8442 * On entry: 8443 * r0 holds array class 8444 * r10 holds AA or BA 8445 */ 8446 .LOP_FILLED_NEW_ARRAY_RANGE_continue: 8447 ldr r3, [r0, #offClassObject_descriptor] @ r3<- arrayClass->descriptor 8448 mov r2, #ALLOC_DONT_TRACK @ r2<- alloc flags 8449 ldrb rINST, [r3, #1] @ rINST<- descriptor[1] 8450 .if 1 8451 mov r1, r10 @ r1<- AA (length) 8452 .else 8453 mov r1, r10, lsr #4 @ r1<- B (length) 8454 .endif 8455 cmp rINST, #'I' @ array of ints? 8456 cmpne rINST, #'L' @ array of objects? 8457 cmpne rINST, #'[' @ array of arrays? 8458 mov r9, r1 @ save length in r9 8459 bne .LOP_FILLED_NEW_ARRAY_RANGE_notimpl @ no, not handled yet 8460 bl dvmAllocArrayByClass @ r0<- call(arClass, length, flags) 8461 cmp r0, #0 @ null return? 8462 beq common_exceptionThrown @ alloc failed, handle exception 8463 8464 FETCH(r1, 2) @ r1<- FEDC or CCCC 8465 str r0, [rGLUE, #offGlue_retval] @ retval.l <- new array 8466 str rINST, [rGLUE, #offGlue_retval+4] @ retval.h <- type 8467 add r0, r0, #offArrayObject_contents @ r0<- newArray->contents 8468 subs r9, r9, #1 @ length--, check for neg 8469 FETCH_ADVANCE_INST(3) @ advance to next instr, load rINST 8470 bmi 2f @ was zero, bail 8471 8472 @ copy values from registers into the array 8473 @ r0=array, r1=CCCC/FEDC, r9=length (from AA or B), r10=AA/BA 8474 .if 1 8475 add r2, rFP, r1, lsl #2 @ r2<- &fp[CCCC] 8476 1: ldr r3, [r2], #4 @ r3<- *r2++ 8477 subs r9, r9, #1 @ count-- 8478 str r3, [r0], #4 @ *contents++ = vX 8479 bpl 1b 8480 @ continue at 2 8481 .else 8482 cmp r9, #4 @ length was initially 5? 8483 and r2, r10, #15 @ r2<- A 8484 bne 1f @ <= 4 args, branch 8485 GET_VREG(r3, r2) @ r3<- vA 8486 sub r9, r9, #1 @ count-- 8487 str r3, [r0, #16] @ contents[4] = vA 8488 1: and r2, r1, #15 @ r2<- F/E/D/C 8489 GET_VREG(r3, r2) @ r3<- vF/vE/vD/vC 8490 mov r1, r1, lsr #4 @ r1<- next reg in low 4 8491 subs r9, r9, #1 @ count-- 8492 str r3, [r0], #4 @ *contents++ = vX 8493 bpl 1b 8494 @ continue at 2 8495 .endif 8496 8497 2: 8498 ldr r0, [rGLUE, #offGlue_retval] @ r0<- object 8499 ldr r1, [rGLUE, #offGlue_retval+4] @ r1<- type 8500 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8501 GET_INST_OPCODE(ip) @ ip<- opcode from rINST 8502 cmp r1, #'I' @ Is int array? 8503 strneb r2, [r2, r0, lsr #GC_CARD_SHIFT] @ Mark card based on object head 8504 GOTO_OPCODE(ip) @ execute it 8505 8506 /* 8507 * Throw an exception indicating that we have not implemented this 8508 * mode of filled-new-array. 8509 */ 8510 .LOP_FILLED_NEW_ARRAY_RANGE_notimpl: 8511 ldr r0, .L_strInternalError 8512 ldr r1, .L_strFilledNewArrayNotImpl 8513 bl dvmThrowException 8514 b common_exceptionThrown 8515 8516 .if (!1) @ define in one or the other, not both 8517 .L_strFilledNewArrayNotImpl: 8518 .word .LstrFilledNewArrayNotImpl 8519 .L_strInternalError: 8520 .word .LstrInternalError 8521 .endif 8522 8523 /* continuation for OP_CMPL_FLOAT */ 8524 8525 @ Test for NaN with a second comparison. EABI forbids testing bit 8526 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8527 @ make the library call. 8528 .LOP_CMPL_FLOAT_gt_or_nan: 8529 mov r1, r9 @ reverse order 8530 mov r0, r10 8531 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8532 @bleq common_abort 8533 movcc r1, #1 @ (greater than) r1<- 1 8534 bcc .LOP_CMPL_FLOAT_finish 8535 mvn r1, #0 @ r1<- 1 or -1 for NaN 8536 b .LOP_CMPL_FLOAT_finish 8537 8538 8539 #if 0 /* "clasic" form */ 8540 FETCH(r0, 1) @ r0<- CCBB 8541 and r2, r0, #255 @ r2<- BB 8542 mov r3, r0, lsr #8 @ r3<- CC 8543 GET_VREG(r9, r2) @ r9<- vBB 8544 GET_VREG(r10, r3) @ r10<- vCC 8545 mov r0, r9 @ r0<- vBB 8546 mov r1, r10 @ r1<- vCC 8547 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8548 cmp r0, #0 @ equal? 8549 movne r1, #0 @ yes, result is 0 8550 bne OP_CMPL_FLOAT_finish 8551 mov r0, r9 @ r0<- vBB 8552 mov r1, r10 @ r1<- vCC 8553 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8554 cmp r0, #0 @ less than? 8555 b OP_CMPL_FLOAT_continue 8556 @%break 8557 8558 OP_CMPL_FLOAT_continue: 8559 mvnne r1, #0 @ yes, result is -1 8560 bne OP_CMPL_FLOAT_finish 8561 mov r0, r9 @ r0<- vBB 8562 mov r1, r10 @ r1<- vCC 8563 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8564 cmp r0, #0 @ greater than? 8565 beq OP_CMPL_FLOAT_nan @ no, must be NaN 8566 mov r1, #1 @ yes, result is 1 8567 @ fall through to _finish 8568 8569 OP_CMPL_FLOAT_finish: 8570 mov r3, rINST, lsr #8 @ r3<- AA 8571 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8572 SET_VREG(r1, r3) @ vAA<- r1 8573 GET_INST_OPCODE(ip) @ extract opcode from rINST 8574 GOTO_OPCODE(ip) @ jump to next instruction 8575 8576 /* 8577 * This is expected to be uncommon, so we double-branch (once to here, 8578 * again back to _finish). 8579 */ 8580 OP_CMPL_FLOAT_nan: 8581 mvn r1, #0 @ r1<- 1 or -1 for NaN 8582 b OP_CMPL_FLOAT_finish 8583 8584 #endif 8585 8586 /* continuation for OP_CMPG_FLOAT */ 8587 8588 @ Test for NaN with a second comparison. EABI forbids testing bit 8589 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8590 @ make the library call. 8591 .LOP_CMPG_FLOAT_gt_or_nan: 8592 mov r1, r9 @ reverse order 8593 mov r0, r10 8594 bl __aeabi_cfcmple @ r0<- Z set if eq, C clear if < 8595 @bleq common_abort 8596 movcc r1, #1 @ (greater than) r1<- 1 8597 bcc .LOP_CMPG_FLOAT_finish 8598 mov r1, #1 @ r1<- 1 or -1 for NaN 8599 b .LOP_CMPG_FLOAT_finish 8600 8601 8602 #if 0 /* "clasic" form */ 8603 FETCH(r0, 1) @ r0<- CCBB 8604 and r2, r0, #255 @ r2<- BB 8605 mov r3, r0, lsr #8 @ r3<- CC 8606 GET_VREG(r9, r2) @ r9<- vBB 8607 GET_VREG(r10, r3) @ r10<- vCC 8608 mov r0, r9 @ r0<- vBB 8609 mov r1, r10 @ r1<- vCC 8610 bl __aeabi_fcmpeq @ r0<- (vBB == vCC) 8611 cmp r0, #0 @ equal? 8612 movne r1, #0 @ yes, result is 0 8613 bne OP_CMPG_FLOAT_finish 8614 mov r0, r9 @ r0<- vBB 8615 mov r1, r10 @ r1<- vCC 8616 bl __aeabi_fcmplt @ r0<- (vBB < vCC) 8617 cmp r0, #0 @ less than? 8618 b OP_CMPG_FLOAT_continue 8619 @%break 8620 8621 OP_CMPG_FLOAT_continue: 8622 mvnne r1, #0 @ yes, result is -1 8623 bne OP_CMPG_FLOAT_finish 8624 mov r0, r9 @ r0<- vBB 8625 mov r1, r10 @ r1<- vCC 8626 bl __aeabi_fcmpgt @ r0<- (vBB > vCC) 8627 cmp r0, #0 @ greater than? 8628 beq OP_CMPG_FLOAT_nan @ no, must be NaN 8629 mov r1, #1 @ yes, result is 1 8630 @ fall through to _finish 8631 8632 OP_CMPG_FLOAT_finish: 8633 mov r3, rINST, lsr #8 @ r3<- AA 8634 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8635 SET_VREG(r1, r3) @ vAA<- r1 8636 GET_INST_OPCODE(ip) @ extract opcode from rINST 8637 GOTO_OPCODE(ip) @ jump to next instruction 8638 8639 /* 8640 * This is expected to be uncommon, so we double-branch (once to here, 8641 * again back to _finish). 8642 */ 8643 OP_CMPG_FLOAT_nan: 8644 mov r1, #1 @ r1<- 1 or -1 for NaN 8645 b OP_CMPG_FLOAT_finish 8646 8647 #endif 8648 8649 /* continuation for OP_CMPL_DOUBLE */ 8650 8651 @ Test for NaN with a second comparison. EABI forbids testing bit 8652 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8653 @ make the library call. 8654 .LOP_CMPL_DOUBLE_gt_or_nan: 8655 ldmia r10, {r0-r1} @ reverse order 8656 ldmia r9, {r2-r3} 8657 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8658 @bleq common_abort 8659 movcc r1, #1 @ (greater than) r1<- 1 8660 bcc .LOP_CMPL_DOUBLE_finish 8661 mvn r1, #0 @ r1<- 1 or -1 for NaN 8662 b .LOP_CMPL_DOUBLE_finish 8663 8664 /* continuation for OP_CMPG_DOUBLE */ 8665 8666 @ Test for NaN with a second comparison. EABI forbids testing bit 8667 @ patterns, and we can't represent 0x7fc00000 in immediate form, so 8668 @ make the library call. 8669 .LOP_CMPG_DOUBLE_gt_or_nan: 8670 ldmia r10, {r0-r1} @ reverse order 8671 ldmia r9, {r2-r3} 8672 bl __aeabi_cdcmple @ r0<- Z set if eq, C clear if < 8673 @bleq common_abort 8674 movcc r1, #1 @ (greater than) r1<- 1 8675 bcc .LOP_CMPG_DOUBLE_finish 8676 mov r1, #1 @ r1<- 1 or -1 for NaN 8677 b .LOP_CMPG_DOUBLE_finish 8678 8679 /* continuation for OP_CMP_LONG */ 8680 8681 .LOP_CMP_LONG_less: 8682 mvn r1, #0 @ r1<- -1 8683 @ Want to cond code the next mov so we can avoid branch, but don't see it; 8684 @ instead, we just replicate the tail end. 8685 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8686 SET_VREG(r1, r9) @ vAA<- r1 8687 GET_INST_OPCODE(ip) @ extract opcode from rINST 8688 GOTO_OPCODE(ip) @ jump to next instruction 8689 8690 .LOP_CMP_LONG_greater: 8691 mov r1, #1 @ r1<- 1 8692 @ fall through to _finish 8693 8694 .LOP_CMP_LONG_finish: 8695 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8696 SET_VREG(r1, r9) @ vAA<- r1 8697 GET_INST_OPCODE(ip) @ extract opcode from rINST 8698 GOTO_OPCODE(ip) @ jump to next instruction 8699 8700 /* continuation for OP_AGET_WIDE */ 8701 8702 .LOP_AGET_WIDE_finish: 8703 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8704 ldrd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8705 add r9, rFP, r9, lsl #2 @ r9<- &fp[AA] 8706 GET_INST_OPCODE(ip) @ extract opcode from rINST 8707 stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3 8708 GOTO_OPCODE(ip) @ jump to next instruction 8709 8710 /* continuation for OP_APUT_WIDE */ 8711 8712 .LOP_APUT_WIDE_finish: 8713 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8714 ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1 8715 GET_INST_OPCODE(ip) @ extract opcode from rINST 8716 strd r2, [r0, #offArrayObject_contents] @ r2/r3<- vBB[vCC] 8717 GOTO_OPCODE(ip) @ jump to next instruction 8718 8719 /* continuation for OP_APUT_OBJECT */ 8720 /* 8721 * On entry: 8722 * rINST = vBB (arrayObj) 8723 * r9 = vAA (obj) 8724 * r10 = offset into array (vBB + vCC * width) 8725 */ 8726 .LOP_APUT_OBJECT_finish: 8727 cmp r9, #0 @ storing null reference? 8728 beq .LOP_APUT_OBJECT_skip_check @ yes, skip type checks 8729 ldr r0, [r9, #offObject_clazz] @ r0<- obj->clazz 8730 ldr r1, [rINST, #offObject_clazz] @ r1<- arrayObj->clazz 8731 bl dvmCanPutArrayElement @ test object type vs. array type 8732 cmp r0, #0 @ okay? 8733 beq common_errArrayStore @ no 8734 mov r1, rINST @ r1<- arrayObj 8735 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8736 ldr r2, [rGLUE, #offGlue_cardTable] @ get biased CT base 8737 add r10, #offArrayObject_contents @ r0<- pointer to slot 8738 GET_INST_OPCODE(ip) @ extract opcode from rINST 8739 str r9, [r10] @ vBB[vCC]<- vAA 8740 strb r2, [r2, r1, lsr #GC_CARD_SHIFT] @ mark card using object head 8741 GOTO_OPCODE(ip) @ jump to next instruction 8742 .LOP_APUT_OBJECT_skip_check: 8743 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8744 GET_INST_OPCODE(ip) @ extract opcode from rINST 8745 str r9, [r10, #offArrayObject_contents] @ vBB[vCC]<- vAA 8746 GOTO_OPCODE(ip) @ jump to next instruction 8747 8748 /* continuation for OP_IGET */ 8749 8750 /* 8751 * Currently: 8752 * r0 holds resolved field 8753 * r9 holds object 8754 */ 8755 .LOP_IGET_finish: 8756 @bl common_squeak0 8757 cmp r9, #0 @ check object for null 8758 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8759 beq common_errNullObject @ object was null 8760 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8761 @ no-op @ acquiring load 8762 mov r2, rINST, lsr #8 @ r2<- A+ 8763 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8764 and r2, r2, #15 @ r2<- A 8765 GET_INST_OPCODE(ip) @ extract opcode from rINST 8766 SET_VREG(r0, r2) @ fp[A]<- r0 8767 GOTO_OPCODE(ip) @ jump to next instruction 8768 8769 /* continuation for OP_IGET_WIDE */ 8770 8771 /* 8772 * Currently: 8773 * r0 holds resolved field 8774 * r9 holds object 8775 */ 8776 .LOP_IGET_WIDE_finish: 8777 cmp r9, #0 @ check object for null 8778 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8779 beq common_errNullObject @ object was null 8780 .if 0 8781 add r0, r9, r3 @ r0<- address of field 8782 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 8783 .else 8784 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 8785 .endif 8786 mov r2, rINST, lsr #8 @ r2<- A+ 8787 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8788 and r2, r2, #15 @ r2<- A 8789 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 8790 GET_INST_OPCODE(ip) @ extract opcode from rINST 8791 stmia r3, {r0-r1} @ fp[A]<- r0/r1 8792 GOTO_OPCODE(ip) @ jump to next instruction 8793 8794 /* continuation for OP_IGET_OBJECT */ 8795 8796 /* 8797 * Currently: 8798 * r0 holds resolved field 8799 * r9 holds object 8800 */ 8801 .LOP_IGET_OBJECT_finish: 8802 @bl common_squeak0 8803 cmp r9, #0 @ check object for null 8804 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8805 beq common_errNullObject @ object was null 8806 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8807 @ no-op @ acquiring load 8808 mov r2, rINST, lsr #8 @ r2<- A+ 8809 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8810 and r2, r2, #15 @ r2<- A 8811 GET_INST_OPCODE(ip) @ extract opcode from rINST 8812 SET_VREG(r0, r2) @ fp[A]<- r0 8813 GOTO_OPCODE(ip) @ jump to next instruction 8814 8815 /* continuation for OP_IGET_BOOLEAN */ 8816 8817 /* 8818 * Currently: 8819 * r0 holds resolved field 8820 * r9 holds object 8821 */ 8822 .LOP_IGET_BOOLEAN_finish: 8823 @bl common_squeak1 8824 cmp r9, #0 @ check object for null 8825 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8826 beq common_errNullObject @ object was null 8827 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8828 @ no-op @ acquiring load 8829 mov r2, rINST, lsr #8 @ r2<- A+ 8830 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8831 and r2, r2, #15 @ r2<- A 8832 GET_INST_OPCODE(ip) @ extract opcode from rINST 8833 SET_VREG(r0, r2) @ fp[A]<- r0 8834 GOTO_OPCODE(ip) @ jump to next instruction 8835 8836 /* continuation for OP_IGET_BYTE */ 8837 8838 /* 8839 * Currently: 8840 * r0 holds resolved field 8841 * r9 holds object 8842 */ 8843 .LOP_IGET_BYTE_finish: 8844 @bl common_squeak2 8845 cmp r9, #0 @ check object for null 8846 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8847 beq common_errNullObject @ object was null 8848 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8849 @ no-op @ acquiring load 8850 mov r2, rINST, lsr #8 @ r2<- A+ 8851 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8852 and r2, r2, #15 @ r2<- A 8853 GET_INST_OPCODE(ip) @ extract opcode from rINST 8854 SET_VREG(r0, r2) @ fp[A]<- r0 8855 GOTO_OPCODE(ip) @ jump to next instruction 8856 8857 /* continuation for OP_IGET_CHAR */ 8858 8859 /* 8860 * Currently: 8861 * r0 holds resolved field 8862 * r9 holds object 8863 */ 8864 .LOP_IGET_CHAR_finish: 8865 @bl common_squeak3 8866 cmp r9, #0 @ check object for null 8867 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8868 beq common_errNullObject @ object was null 8869 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8870 @ no-op @ acquiring load 8871 mov r2, rINST, lsr #8 @ r2<- A+ 8872 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8873 and r2, r2, #15 @ r2<- A 8874 GET_INST_OPCODE(ip) @ extract opcode from rINST 8875 SET_VREG(r0, r2) @ fp[A]<- r0 8876 GOTO_OPCODE(ip) @ jump to next instruction 8877 8878 /* continuation for OP_IGET_SHORT */ 8879 8880 /* 8881 * Currently: 8882 * r0 holds resolved field 8883 * r9 holds object 8884 */ 8885 .LOP_IGET_SHORT_finish: 8886 @bl common_squeak4 8887 cmp r9, #0 @ check object for null 8888 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8889 beq common_errNullObject @ object was null 8890 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 8891 @ no-op @ acquiring load 8892 mov r2, rINST, lsr #8 @ r2<- A+ 8893 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8894 and r2, r2, #15 @ r2<- A 8895 GET_INST_OPCODE(ip) @ extract opcode from rINST 8896 SET_VREG(r0, r2) @ fp[A]<- r0 8897 GOTO_OPCODE(ip) @ jump to next instruction 8898 8899 /* continuation for OP_IPUT */ 8900 8901 /* 8902 * Currently: 8903 * r0 holds resolved field 8904 * r9 holds object 8905 */ 8906 .LOP_IPUT_finish: 8907 @bl common_squeak0 8908 mov r1, rINST, lsr #8 @ r1<- A+ 8909 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8910 and r1, r1, #15 @ r1<- A 8911 cmp r9, #0 @ check object for null 8912 GET_VREG(r0, r1) @ r0<- fp[A] 8913 beq common_errNullObject @ object was null 8914 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8915 GET_INST_OPCODE(ip) @ extract opcode from rINST 8916 @ no-op @ releasing store 8917 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8918 GOTO_OPCODE(ip) @ jump to next instruction 8919 8920 /* continuation for OP_IPUT_WIDE */ 8921 8922 /* 8923 * Currently: 8924 * r0 holds resolved field 8925 * r9 holds object 8926 */ 8927 .LOP_IPUT_WIDE_finish: 8928 mov r2, rINST, lsr #8 @ r2<- A+ 8929 cmp r9, #0 @ check object for null 8930 and r2, r2, #15 @ r2<- A 8931 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8932 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 8933 beq common_errNullObject @ object was null 8934 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8935 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 8936 GET_INST_OPCODE(r10) @ extract opcode from rINST 8937 .if 0 8938 add r2, r9, r3 @ r2<- target address 8939 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 8940 .else 8941 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 8942 .endif 8943 GOTO_OPCODE(r10) @ jump to next instruction 8944 8945 /* continuation for OP_IPUT_OBJECT */ 8946 8947 /* 8948 * Currently: 8949 * r0 holds resolved field 8950 * r9 holds object 8951 */ 8952 .LOP_IPUT_OBJECT_finish: 8953 @bl common_squeak0 8954 mov r1, rINST, lsr #8 @ r1<- A+ 8955 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8956 and r1, r1, #15 @ r1<- A 8957 cmp r9, #0 @ check object for null 8958 GET_VREG(r0, r1) @ r0<- fp[A] 8959 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 8960 beq common_errNullObject @ object was null 8961 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8962 GET_INST_OPCODE(ip) @ extract opcode from rINST 8963 @ no-op @ releasing store 8964 str r0, [r9, r3] @ obj.field (32 bits)<- r0 8965 cmp r0, #0 @ stored a null reference? 8966 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 8967 GOTO_OPCODE(ip) @ jump to next instruction 8968 8969 /* continuation for OP_IPUT_BOOLEAN */ 8970 8971 /* 8972 * Currently: 8973 * r0 holds resolved field 8974 * r9 holds object 8975 */ 8976 .LOP_IPUT_BOOLEAN_finish: 8977 @bl common_squeak1 8978 mov r1, rINST, lsr #8 @ r1<- A+ 8979 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 8980 and r1, r1, #15 @ r1<- A 8981 cmp r9, #0 @ check object for null 8982 GET_VREG(r0, r1) @ r0<- fp[A] 8983 beq common_errNullObject @ object was null 8984 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 8985 GET_INST_OPCODE(ip) @ extract opcode from rINST 8986 @ no-op @ releasing store 8987 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 8988 GOTO_OPCODE(ip) @ jump to next instruction 8989 8990 /* continuation for OP_IPUT_BYTE */ 8991 8992 /* 8993 * Currently: 8994 * r0 holds resolved field 8995 * r9 holds object 8996 */ 8997 .LOP_IPUT_BYTE_finish: 8998 @bl common_squeak2 8999 mov r1, rINST, lsr #8 @ r1<- A+ 9000 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9001 and r1, r1, #15 @ r1<- A 9002 cmp r9, #0 @ check object for null 9003 GET_VREG(r0, r1) @ r0<- fp[A] 9004 beq common_errNullObject @ object was null 9005 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9006 GET_INST_OPCODE(ip) @ extract opcode from rINST 9007 @ no-op @ releasing store 9008 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9009 GOTO_OPCODE(ip) @ jump to next instruction 9010 9011 /* continuation for OP_IPUT_CHAR */ 9012 9013 /* 9014 * Currently: 9015 * r0 holds resolved field 9016 * r9 holds object 9017 */ 9018 .LOP_IPUT_CHAR_finish: 9019 @bl common_squeak3 9020 mov r1, rINST, lsr #8 @ r1<- A+ 9021 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9022 and r1, r1, #15 @ r1<- A 9023 cmp r9, #0 @ check object for null 9024 GET_VREG(r0, r1) @ r0<- fp[A] 9025 beq common_errNullObject @ object was null 9026 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9027 GET_INST_OPCODE(ip) @ extract opcode from rINST 9028 @ no-op @ releasing store 9029 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9030 GOTO_OPCODE(ip) @ jump to next instruction 9031 9032 /* continuation for OP_IPUT_SHORT */ 9033 9034 /* 9035 * Currently: 9036 * r0 holds resolved field 9037 * r9 holds object 9038 */ 9039 .LOP_IPUT_SHORT_finish: 9040 @bl common_squeak4 9041 mov r1, rINST, lsr #8 @ r1<- A+ 9042 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9043 and r1, r1, #15 @ r1<- A 9044 cmp r9, #0 @ check object for null 9045 GET_VREG(r0, r1) @ r0<- fp[A] 9046 beq common_errNullObject @ object was null 9047 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9048 GET_INST_OPCODE(ip) @ extract opcode from rINST 9049 @ no-op @ releasing store 9050 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9051 GOTO_OPCODE(ip) @ jump to next instruction 9052 9053 /* continuation for OP_SGET */ 9054 9055 /* 9056 * Continuation if the field has not yet been resolved. 9057 * r1: BBBB field ref 9058 */ 9059 .LOP_SGET_resolve: 9060 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9061 EXPORT_PC() @ resolve() could throw, so export now 9062 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9063 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9064 cmp r0, #0 @ success? 9065 bne .LOP_SGET_finish @ yes, finish 9066 b common_exceptionThrown @ no, handle exception 9067 9068 /* continuation for OP_SGET_WIDE */ 9069 9070 /* 9071 * Continuation if the field has not yet been resolved. 9072 * r1: BBBB field ref 9073 * 9074 * Returns StaticField pointer in r0. 9075 */ 9076 .LOP_SGET_WIDE_resolve: 9077 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9078 EXPORT_PC() @ resolve() could throw, so export now 9079 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9080 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9081 cmp r0, #0 @ success? 9082 bne .LOP_SGET_WIDE_finish @ yes, finish 9083 b common_exceptionThrown @ no, handle exception 9084 9085 /* continuation for OP_SGET_OBJECT */ 9086 9087 /* 9088 * Continuation if the field has not yet been resolved. 9089 * r1: BBBB field ref 9090 */ 9091 .LOP_SGET_OBJECT_resolve: 9092 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9093 EXPORT_PC() @ resolve() could throw, so export now 9094 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9095 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9096 cmp r0, #0 @ success? 9097 bne .LOP_SGET_OBJECT_finish @ yes, finish 9098 b common_exceptionThrown @ no, handle exception 9099 9100 /* continuation for OP_SGET_BOOLEAN */ 9101 9102 /* 9103 * Continuation if the field has not yet been resolved. 9104 * r1: BBBB field ref 9105 */ 9106 .LOP_SGET_BOOLEAN_resolve: 9107 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9108 EXPORT_PC() @ resolve() could throw, so export now 9109 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9110 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9111 cmp r0, #0 @ success? 9112 bne .LOP_SGET_BOOLEAN_finish @ yes, finish 9113 b common_exceptionThrown @ no, handle exception 9114 9115 /* continuation for OP_SGET_BYTE */ 9116 9117 /* 9118 * Continuation if the field has not yet been resolved. 9119 * r1: BBBB field ref 9120 */ 9121 .LOP_SGET_BYTE_resolve: 9122 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9123 EXPORT_PC() @ resolve() could throw, so export now 9124 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9125 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9126 cmp r0, #0 @ success? 9127 bne .LOP_SGET_BYTE_finish @ yes, finish 9128 b common_exceptionThrown @ no, handle exception 9129 9130 /* continuation for OP_SGET_CHAR */ 9131 9132 /* 9133 * Continuation if the field has not yet been resolved. 9134 * r1: BBBB field ref 9135 */ 9136 .LOP_SGET_CHAR_resolve: 9137 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9138 EXPORT_PC() @ resolve() could throw, so export now 9139 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9140 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9141 cmp r0, #0 @ success? 9142 bne .LOP_SGET_CHAR_finish @ yes, finish 9143 b common_exceptionThrown @ no, handle exception 9144 9145 /* continuation for OP_SGET_SHORT */ 9146 9147 /* 9148 * Continuation if the field has not yet been resolved. 9149 * r1: BBBB field ref 9150 */ 9151 .LOP_SGET_SHORT_resolve: 9152 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9153 EXPORT_PC() @ resolve() could throw, so export now 9154 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9155 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9156 cmp r0, #0 @ success? 9157 bne .LOP_SGET_SHORT_finish @ yes, finish 9158 b common_exceptionThrown @ no, handle exception 9159 9160 /* continuation for OP_SPUT */ 9161 9162 /* 9163 * Continuation if the field has not yet been resolved. 9164 * r1: BBBB field ref 9165 */ 9166 .LOP_SPUT_resolve: 9167 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9168 EXPORT_PC() @ resolve() could throw, so export now 9169 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9170 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9171 cmp r0, #0 @ success? 9172 bne .LOP_SPUT_finish @ yes, finish 9173 b common_exceptionThrown @ no, handle exception 9174 9175 /* continuation for OP_SPUT_WIDE */ 9176 9177 /* 9178 * Continuation if the field has not yet been resolved. 9179 * r1: BBBB field ref 9180 * r9: &fp[AA] 9181 * 9182 * Returns StaticField pointer in r2. 9183 */ 9184 .LOP_SPUT_WIDE_resolve: 9185 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9186 EXPORT_PC() @ resolve() could throw, so export now 9187 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9188 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9189 cmp r0, #0 @ success? 9190 mov r2, r0 @ copy to r2 9191 bne .LOP_SPUT_WIDE_finish @ yes, finish 9192 b common_exceptionThrown @ no, handle exception 9193 9194 /* continuation for OP_SPUT_OBJECT */ 9195 .LOP_SPUT_OBJECT_finish: @ field ptr in r0 9196 mov r2, rINST, lsr #8 @ r2<- AA 9197 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9198 GET_VREG(r1, r2) @ r1<- fp[AA] 9199 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9200 ldr r9, [r0, #offField_clazz] @ r9<- field->clazz 9201 GET_INST_OPCODE(ip) @ extract opcode from rINST 9202 @ no-op @ releasing store 9203 str r1, [r0, #offStaticField_value] @ field<- vAA 9204 cmp r1, #0 @ stored a null object? 9205 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head 9206 GOTO_OPCODE(ip) @ jump to next instruction 9207 9208 /* continuation for OP_SPUT_BOOLEAN */ 9209 9210 /* 9211 * Continuation if the field has not yet been resolved. 9212 * r1: BBBB field ref 9213 */ 9214 .LOP_SPUT_BOOLEAN_resolve: 9215 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9216 EXPORT_PC() @ resolve() could throw, so export now 9217 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9218 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9219 cmp r0, #0 @ success? 9220 bne .LOP_SPUT_BOOLEAN_finish @ yes, finish 9221 b common_exceptionThrown @ no, handle exception 9222 9223 /* continuation for OP_SPUT_BYTE */ 9224 9225 /* 9226 * Continuation if the field has not yet been resolved. 9227 * r1: BBBB field ref 9228 */ 9229 .LOP_SPUT_BYTE_resolve: 9230 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9231 EXPORT_PC() @ resolve() could throw, so export now 9232 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9233 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9234 cmp r0, #0 @ success? 9235 bne .LOP_SPUT_BYTE_finish @ yes, finish 9236 b common_exceptionThrown @ no, handle exception 9237 9238 /* continuation for OP_SPUT_CHAR */ 9239 9240 /* 9241 * Continuation if the field has not yet been resolved. 9242 * r1: BBBB field ref 9243 */ 9244 .LOP_SPUT_CHAR_resolve: 9245 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9246 EXPORT_PC() @ resolve() could throw, so export now 9247 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9248 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9249 cmp r0, #0 @ success? 9250 bne .LOP_SPUT_CHAR_finish @ yes, finish 9251 b common_exceptionThrown @ no, handle exception 9252 9253 /* continuation for OP_SPUT_SHORT */ 9254 9255 /* 9256 * Continuation if the field has not yet been resolved. 9257 * r1: BBBB field ref 9258 */ 9259 .LOP_SPUT_SHORT_resolve: 9260 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9261 EXPORT_PC() @ resolve() could throw, so export now 9262 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9263 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9264 cmp r0, #0 @ success? 9265 bne .LOP_SPUT_SHORT_finish @ yes, finish 9266 b common_exceptionThrown @ no, handle exception 9267 9268 /* continuation for OP_INVOKE_VIRTUAL */ 9269 9270 /* 9271 * At this point: 9272 * r0 = resolved base method 9273 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9274 */ 9275 .LOP_INVOKE_VIRTUAL_continue: 9276 GET_VREG(r1, r10) @ r1<- "this" ptr 9277 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9278 cmp r1, #0 @ is "this" null? 9279 beq common_errNullObject @ null "this", throw exception 9280 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9281 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9282 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9283 bl common_invokeMethodNoRange @ continue on 9284 9285 /* continuation for OP_INVOKE_SUPER */ 9286 9287 /* 9288 * At this point: 9289 * r0 = resolved base method 9290 * r9 = method->clazz 9291 */ 9292 .LOP_INVOKE_SUPER_continue: 9293 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9294 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9295 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9296 EXPORT_PC() @ must export for invoke 9297 cmp r2, r3 @ compare (methodIndex, vtableCount) 9298 bcs .LOP_INVOKE_SUPER_nsm @ method not present in superclass 9299 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9300 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9301 bl common_invokeMethodNoRange @ continue on 9302 9303 .LOP_INVOKE_SUPER_resolve: 9304 mov r0, r9 @ r0<- method->clazz 9305 mov r2, #METHOD_VIRTUAL @ resolver method type 9306 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9307 cmp r0, #0 @ got null? 9308 bne .LOP_INVOKE_SUPER_continue @ no, continue 9309 b common_exceptionThrown @ yes, handle exception 9310 9311 /* 9312 * Throw a NoSuchMethodError with the method name as the message. 9313 * r0 = resolved base method 9314 */ 9315 .LOP_INVOKE_SUPER_nsm: 9316 ldr r1, [r0, #offMethod_name] @ r1<- method name 9317 b common_errNoSuchMethod 9318 9319 /* continuation for OP_INVOKE_DIRECT */ 9320 9321 /* 9322 * On entry: 9323 * r1 = reference (BBBB or CCCC) 9324 * r10 = "this" register 9325 */ 9326 .LOP_INVOKE_DIRECT_resolve: 9327 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9328 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9329 mov r2, #METHOD_DIRECT @ resolver method type 9330 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9331 cmp r0, #0 @ got null? 9332 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9333 bne .LOP_INVOKE_DIRECT_finish @ no, continue 9334 b common_exceptionThrown @ yes, handle exception 9335 9336 /* continuation for OP_INVOKE_VIRTUAL_RANGE */ 9337 9338 /* 9339 * At this point: 9340 * r0 = resolved base method 9341 * r10 = C or CCCC (index of first arg, which is the "this" ptr) 9342 */ 9343 .LOP_INVOKE_VIRTUAL_RANGE_continue: 9344 GET_VREG(r1, r10) @ r1<- "this" ptr 9345 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9346 cmp r1, #0 @ is "this" null? 9347 beq common_errNullObject @ null "this", throw exception 9348 ldr r3, [r1, #offObject_clazz] @ r1<- thisPtr->clazz 9349 ldr r3, [r3, #offClassObject_vtable] @ r3<- thisPtr->clazz->vtable 9350 ldr r0, [r3, r2, lsl #2] @ r3<- vtable[methodIndex] 9351 bl common_invokeMethodRange @ continue on 9352 9353 /* continuation for OP_INVOKE_SUPER_RANGE */ 9354 9355 /* 9356 * At this point: 9357 * r0 = resolved base method 9358 * r9 = method->clazz 9359 */ 9360 .LOP_INVOKE_SUPER_RANGE_continue: 9361 ldr r1, [r9, #offClassObject_super] @ r1<- method->clazz->super 9362 ldrh r2, [r0, #offMethod_methodIndex] @ r2<- baseMethod->methodIndex 9363 ldr r3, [r1, #offClassObject_vtableCount] @ r3<- super->vtableCount 9364 EXPORT_PC() @ must export for invoke 9365 cmp r2, r3 @ compare (methodIndex, vtableCount) 9366 bcs .LOP_INVOKE_SUPER_RANGE_nsm @ method not present in superclass 9367 ldr r1, [r1, #offClassObject_vtable] @ r1<- ...clazz->super->vtable 9368 ldr r0, [r1, r2, lsl #2] @ r3<- vtable[methodIndex] 9369 bl common_invokeMethodRange @ continue on 9370 9371 .LOP_INVOKE_SUPER_RANGE_resolve: 9372 mov r0, r9 @ r0<- method->clazz 9373 mov r2, #METHOD_VIRTUAL @ resolver method type 9374 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9375 cmp r0, #0 @ got null? 9376 bne .LOP_INVOKE_SUPER_RANGE_continue @ no, continue 9377 b common_exceptionThrown @ yes, handle exception 9378 9379 /* 9380 * Throw a NoSuchMethodError with the method name as the message. 9381 * r0 = resolved base method 9382 */ 9383 .LOP_INVOKE_SUPER_RANGE_nsm: 9384 ldr r1, [r0, #offMethod_name] @ r1<- method name 9385 b common_errNoSuchMethod 9386 9387 /* continuation for OP_INVOKE_DIRECT_RANGE */ 9388 9389 /* 9390 * On entry: 9391 * r1 = reference (BBBB or CCCC) 9392 * r10 = "this" register 9393 */ 9394 .LOP_INVOKE_DIRECT_RANGE_resolve: 9395 ldr r3, [rGLUE, #offGlue_method] @ r3<- glue->method 9396 ldr r0, [r3, #offMethod_clazz] @ r0<- method->clazz 9397 mov r2, #METHOD_DIRECT @ resolver method type 9398 bl dvmResolveMethod @ r0<- call(clazz, ref, flags) 9399 cmp r0, #0 @ got null? 9400 GET_VREG(r2, r10) @ r2<- "this" ptr (reload) 9401 bne .LOP_INVOKE_DIRECT_RANGE_finish @ no, continue 9402 b common_exceptionThrown @ yes, handle exception 9403 9404 /* continuation for OP_FLOAT_TO_LONG */ 9405 /* 9406 * Convert the float in r0 to a long in r0/r1. 9407 * 9408 * We have to clip values to long min/max per the specification. The 9409 * expected common case is a "reasonable" value that converts directly 9410 * to modest integer. The EABI convert function isn't doing this for us. 9411 */ 9412 f2l_doconv: 9413 stmfd sp!, {r4, lr} 9414 mov r1, #0x5f000000 @ (float)maxlong 9415 mov r4, r0 9416 bl __aeabi_fcmpge @ is arg >= maxlong? 9417 cmp r0, #0 @ nonzero == yes 9418 mvnne r0, #0 @ return maxlong (7fffffff) 9419 mvnne r1, #0x80000000 9420 ldmnefd sp!, {r4, pc} 9421 9422 mov r0, r4 @ recover arg 9423 mov r1, #0xdf000000 @ (float)minlong 9424 bl __aeabi_fcmple @ is arg <= minlong? 9425 cmp r0, #0 @ nonzero == yes 9426 movne r0, #0 @ return minlong (80000000) 9427 movne r1, #0x80000000 9428 ldmnefd sp!, {r4, pc} 9429 9430 mov r0, r4 @ recover arg 9431 mov r1, r4 9432 bl __aeabi_fcmpeq @ is arg == self? 9433 cmp r0, #0 @ zero == no 9434 moveq r1, #0 @ return zero for NaN 9435 ldmeqfd sp!, {r4, pc} 9436 9437 mov r0, r4 @ recover arg 9438 bl __aeabi_f2lz @ convert float to long 9439 ldmfd sp!, {r4, pc} 9440 9441 /* continuation for OP_DOUBLE_TO_LONG */ 9442 /* 9443 * Convert the double in r0/r1 to a long in r0/r1. 9444 * 9445 * We have to clip values to long min/max per the specification. The 9446 * expected common case is a "reasonable" value that converts directly 9447 * to modest integer. The EABI convert function isn't doing this for us. 9448 */ 9449 d2l_doconv: 9450 stmfd sp!, {r4, r5, lr} @ save regs 9451 mov r3, #0x43000000 @ maxlong, as a double (high word) 9452 add r3, #0x00e00000 @ 0x43e00000 9453 mov r2, #0 @ maxlong, as a double (low word) 9454 sub sp, sp, #4 @ align for EABI 9455 mov r4, r0 @ save a copy of r0 9456 mov r5, r1 @ and r1 9457 bl __aeabi_dcmpge @ is arg >= maxlong? 9458 cmp r0, #0 @ nonzero == yes 9459 mvnne r0, #0 @ return maxlong (7fffffffffffffff) 9460 mvnne r1, #0x80000000 9461 bne 1f 9462 9463 mov r0, r4 @ recover arg 9464 mov r1, r5 9465 mov r3, #0xc3000000 @ minlong, as a double (high word) 9466 add r3, #0x00e00000 @ 0xc3e00000 9467 mov r2, #0 @ minlong, as a double (low word) 9468 bl __aeabi_dcmple @ is arg <= minlong? 9469 cmp r0, #0 @ nonzero == yes 9470 movne r0, #0 @ return minlong (8000000000000000) 9471 movne r1, #0x80000000 9472 bne 1f 9473 9474 mov r0, r4 @ recover arg 9475 mov r1, r5 9476 mov r2, r4 @ compare against self 9477 mov r3, r5 9478 bl __aeabi_dcmpeq @ is arg == self? 9479 cmp r0, #0 @ zero == no 9480 moveq r1, #0 @ return zero for NaN 9481 beq 1f 9482 9483 mov r0, r4 @ recover arg 9484 mov r1, r5 9485 bl __aeabi_d2lz @ convert double to long 9486 9487 1: 9488 add sp, sp, #4 9489 ldmfd sp!, {r4, r5, pc} 9490 9491 /* continuation for OP_MUL_LONG */ 9492 9493 .LOP_MUL_LONG_finish: 9494 GET_INST_OPCODE(ip) @ extract opcode from rINST 9495 stmia r0, {r9-r10} @ vAA/vAA+1<- r9/r10 9496 GOTO_OPCODE(ip) @ jump to next instruction 9497 9498 /* continuation for OP_SHL_LONG */ 9499 9500 .LOP_SHL_LONG_finish: 9501 mov r0, r0, asl r2 @ r0<- r0 << r2 9502 GET_INST_OPCODE(ip) @ extract opcode from rINST 9503 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9504 GOTO_OPCODE(ip) @ jump to next instruction 9505 9506 /* continuation for OP_SHR_LONG */ 9507 9508 .LOP_SHR_LONG_finish: 9509 mov r1, r1, asr r2 @ r1<- r1 >> r2 9510 GET_INST_OPCODE(ip) @ extract opcode from rINST 9511 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9512 GOTO_OPCODE(ip) @ jump to next instruction 9513 9514 /* continuation for OP_USHR_LONG */ 9515 9516 .LOP_USHR_LONG_finish: 9517 mov r1, r1, lsr r2 @ r1<- r1 >>> r2 9518 GET_INST_OPCODE(ip) @ extract opcode from rINST 9519 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9520 GOTO_OPCODE(ip) @ jump to next instruction 9521 9522 /* continuation for OP_SHL_LONG_2ADDR */ 9523 9524 .LOP_SHL_LONG_2ADDR_finish: 9525 GET_INST_OPCODE(ip) @ extract opcode from rINST 9526 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9527 GOTO_OPCODE(ip) @ jump to next instruction 9528 9529 /* continuation for OP_SHR_LONG_2ADDR */ 9530 9531 .LOP_SHR_LONG_2ADDR_finish: 9532 GET_INST_OPCODE(ip) @ extract opcode from rINST 9533 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9534 GOTO_OPCODE(ip) @ jump to next instruction 9535 9536 /* continuation for OP_USHR_LONG_2ADDR */ 9537 9538 .LOP_USHR_LONG_2ADDR_finish: 9539 GET_INST_OPCODE(ip) @ extract opcode from rINST 9540 stmia r9, {r0-r1} @ vAA/vAA+1<- r0/r1 9541 GOTO_OPCODE(ip) @ jump to next instruction 9542 9543 /* continuation for OP_IGET_VOLATILE */ 9544 9545 /* 9546 * Currently: 9547 * r0 holds resolved field 9548 * r9 holds object 9549 */ 9550 .LOP_IGET_VOLATILE_finish: 9551 @bl common_squeak0 9552 cmp r9, #0 @ check object for null 9553 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9554 beq common_errNullObject @ object was null 9555 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9556 SMP_DMB @ acquiring load 9557 mov r2, rINST, lsr #8 @ r2<- A+ 9558 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9559 and r2, r2, #15 @ r2<- A 9560 GET_INST_OPCODE(ip) @ extract opcode from rINST 9561 SET_VREG(r0, r2) @ fp[A]<- r0 9562 GOTO_OPCODE(ip) @ jump to next instruction 9563 9564 /* continuation for OP_IPUT_VOLATILE */ 9565 9566 /* 9567 * Currently: 9568 * r0 holds resolved field 9569 * r9 holds object 9570 */ 9571 .LOP_IPUT_VOLATILE_finish: 9572 @bl common_squeak0 9573 mov r1, rINST, lsr #8 @ r1<- A+ 9574 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9575 and r1, r1, #15 @ r1<- A 9576 cmp r9, #0 @ check object for null 9577 GET_VREG(r0, r1) @ r0<- fp[A] 9578 beq common_errNullObject @ object was null 9579 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9580 GET_INST_OPCODE(ip) @ extract opcode from rINST 9581 SMP_DMB @ releasing store 9582 str r0, [r9, r3] @ obj.field (8/16/32 bits)<- r0 9583 GOTO_OPCODE(ip) @ jump to next instruction 9584 9585 /* continuation for OP_SGET_VOLATILE */ 9586 9587 /* 9588 * Continuation if the field has not yet been resolved. 9589 * r1: BBBB field ref 9590 */ 9591 .LOP_SGET_VOLATILE_resolve: 9592 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9593 EXPORT_PC() @ resolve() could throw, so export now 9594 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9595 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9596 cmp r0, #0 @ success? 9597 bne .LOP_SGET_VOLATILE_finish @ yes, finish 9598 b common_exceptionThrown @ no, handle exception 9599 9600 /* continuation for OP_SPUT_VOLATILE */ 9601 9602 /* 9603 * Continuation if the field has not yet been resolved. 9604 * r1: BBBB field ref 9605 */ 9606 .LOP_SPUT_VOLATILE_resolve: 9607 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9608 EXPORT_PC() @ resolve() could throw, so export now 9609 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9610 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9611 cmp r0, #0 @ success? 9612 bne .LOP_SPUT_VOLATILE_finish @ yes, finish 9613 b common_exceptionThrown @ no, handle exception 9614 9615 /* continuation for OP_IGET_OBJECT_VOLATILE */ 9616 9617 /* 9618 * Currently: 9619 * r0 holds resolved field 9620 * r9 holds object 9621 */ 9622 .LOP_IGET_OBJECT_VOLATILE_finish: 9623 @bl common_squeak0 9624 cmp r9, #0 @ check object for null 9625 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9626 beq common_errNullObject @ object was null 9627 ldr r0, [r9, r3] @ r0<- obj.field (8/16/32 bits) 9628 SMP_DMB @ acquiring load 9629 mov r2, rINST, lsr #8 @ r2<- A+ 9630 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9631 and r2, r2, #15 @ r2<- A 9632 GET_INST_OPCODE(ip) @ extract opcode from rINST 9633 SET_VREG(r0, r2) @ fp[A]<- r0 9634 GOTO_OPCODE(ip) @ jump to next instruction 9635 9636 /* continuation for OP_IGET_WIDE_VOLATILE */ 9637 9638 /* 9639 * Currently: 9640 * r0 holds resolved field 9641 * r9 holds object 9642 */ 9643 .LOP_IGET_WIDE_VOLATILE_finish: 9644 cmp r9, #0 @ check object for null 9645 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9646 beq common_errNullObject @ object was null 9647 .if 1 9648 add r0, r9, r3 @ r0<- address of field 9649 bl dvmQuasiAtomicRead64 @ r0/r1<- contents of field 9650 .else 9651 ldrd r0, [r9, r3] @ r0/r1<- obj.field (64-bit align ok) 9652 .endif 9653 mov r2, rINST, lsr #8 @ r2<- A+ 9654 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9655 and r2, r2, #15 @ r2<- A 9656 add r3, rFP, r2, lsl #2 @ r3<- &fp[A] 9657 GET_INST_OPCODE(ip) @ extract opcode from rINST 9658 stmia r3, {r0-r1} @ fp[A]<- r0/r1 9659 GOTO_OPCODE(ip) @ jump to next instruction 9660 9661 /* continuation for OP_IPUT_WIDE_VOLATILE */ 9662 9663 /* 9664 * Currently: 9665 * r0 holds resolved field 9666 * r9 holds object 9667 */ 9668 .LOP_IPUT_WIDE_VOLATILE_finish: 9669 mov r2, rINST, lsr #8 @ r2<- A+ 9670 cmp r9, #0 @ check object for null 9671 and r2, r2, #15 @ r2<- A 9672 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9673 add r2, rFP, r2, lsl #2 @ r3<- &fp[A] 9674 beq common_errNullObject @ object was null 9675 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9676 ldmia r2, {r0-r1} @ r0/r1<- fp[A] 9677 GET_INST_OPCODE(r10) @ extract opcode from rINST 9678 .if 1 9679 add r2, r9, r3 @ r2<- target address 9680 bl dvmQuasiAtomicSwap64 @ stores r0/r1 into addr r2 9681 .else 9682 strd r0, [r9, r3] @ obj.field (64 bits, aligned)<- r0/r1 9683 .endif 9684 GOTO_OPCODE(r10) @ jump to next instruction 9685 9686 /* continuation for OP_SGET_WIDE_VOLATILE */ 9687 9688 /* 9689 * Continuation if the field has not yet been resolved. 9690 * r1: BBBB field ref 9691 * 9692 * Returns StaticField pointer in r0. 9693 */ 9694 .LOP_SGET_WIDE_VOLATILE_resolve: 9695 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9696 EXPORT_PC() @ resolve() could throw, so export now 9697 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9698 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9699 cmp r0, #0 @ success? 9700 bne .LOP_SGET_WIDE_VOLATILE_finish @ yes, finish 9701 b common_exceptionThrown @ no, handle exception 9702 9703 /* continuation for OP_SPUT_WIDE_VOLATILE */ 9704 9705 /* 9706 * Continuation if the field has not yet been resolved. 9707 * r1: BBBB field ref 9708 * r9: &fp[AA] 9709 * 9710 * Returns StaticField pointer in r2. 9711 */ 9712 .LOP_SPUT_WIDE_VOLATILE_resolve: 9713 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9714 EXPORT_PC() @ resolve() could throw, so export now 9715 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9716 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9717 cmp r0, #0 @ success? 9718 mov r2, r0 @ copy to r2 9719 bne .LOP_SPUT_WIDE_VOLATILE_finish @ yes, finish 9720 b common_exceptionThrown @ no, handle exception 9721 9722 /* continuation for OP_EXECUTE_INLINE */ 9723 9724 /* 9725 * Extract args, call function. 9726 * r0 = #of args (0-4) 9727 * r10 = call index 9728 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9729 * 9730 * Other ideas: 9731 * - Use a jump table from the main piece to jump directly into the 9732 * AND/LDR pairs. Costs a data load, saves a branch. 9733 * - Have five separate pieces that do the loading, so we can work the 9734 * interleave a little better. Increases code size. 9735 */ 9736 .LOP_EXECUTE_INLINE_continue: 9737 rsb r0, r0, #4 @ r0<- 4-r0 9738 FETCH(r9, 2) @ r9<- FEDC 9739 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9740 bl common_abort @ (skipped due to ARM prefetch) 9741 4: and ip, r9, #0xf000 @ isolate F 9742 ldr r3, [rFP, ip, lsr #10] @ r3<- vF (shift right 12, left 2) 9743 3: and ip, r9, #0x0f00 @ isolate E 9744 ldr r2, [rFP, ip, lsr #6] @ r2<- vE 9745 2: and ip, r9, #0x00f0 @ isolate D 9746 ldr r1, [rFP, ip, lsr #2] @ r1<- vD 9747 1: and ip, r9, #0x000f @ isolate C 9748 ldr r0, [rFP, ip, lsl #2] @ r0<- vC 9749 0: 9750 ldr r9, .LOP_EXECUTE_INLINE_table @ table of InlineOperation 9751 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9752 @ (not reached) 9753 9754 .LOP_EXECUTE_INLINE_table: 9755 .word gDvmInlineOpsTable 9756 9757 /* continuation for OP_EXECUTE_INLINE_RANGE */ 9758 9759 /* 9760 * Extract args, call function. 9761 * r0 = #of args (0-4) 9762 * r10 = call index 9763 * lr = return addr, above [DO NOT bl out of here w/o preserving LR] 9764 */ 9765 .LOP_EXECUTE_INLINE_RANGE_continue: 9766 rsb r0, r0, #4 @ r0<- 4-r0 9767 FETCH(r9, 2) @ r9<- CCCC 9768 add pc, pc, r0, lsl #3 @ computed goto, 2 instrs each 9769 bl common_abort @ (skipped due to ARM prefetch) 9770 4: add ip, r9, #3 @ base+3 9771 GET_VREG(r3, ip) @ r3<- vBase[3] 9772 3: add ip, r9, #2 @ base+2 9773 GET_VREG(r2, ip) @ r2<- vBase[2] 9774 2: add ip, r9, #1 @ base+1 9775 GET_VREG(r1, ip) @ r1<- vBase[1] 9776 1: add ip, r9, #0 @ (nop) 9777 GET_VREG(r0, ip) @ r0<- vBase[0] 9778 0: 9779 ldr r9, .LOP_EXECUTE_INLINE_RANGE_table @ table of InlineOperation 9780 LDR_PC "[r9, r10, lsl #4]" @ sizeof=16, "func" is first entry 9781 @ (not reached) 9782 9783 .LOP_EXECUTE_INLINE_RANGE_table: 9784 .word gDvmInlineOpsTable 9785 9786 /* continuation for OP_IPUT_OBJECT_VOLATILE */ 9787 9788 /* 9789 * Currently: 9790 * r0 holds resolved field 9791 * r9 holds object 9792 */ 9793 .LOP_IPUT_OBJECT_VOLATILE_finish: 9794 @bl common_squeak0 9795 mov r1, rINST, lsr #8 @ r1<- A+ 9796 ldr r3, [r0, #offInstField_byteOffset] @ r3<- byte offset of field 9797 and r1, r1, #15 @ r1<- A 9798 cmp r9, #0 @ check object for null 9799 GET_VREG(r0, r1) @ r0<- fp[A] 9800 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9801 beq common_errNullObject @ object was null 9802 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9803 GET_INST_OPCODE(ip) @ extract opcode from rINST 9804 SMP_DMB @ releasing store 9805 str r0, [r9, r3] @ obj.field (32 bits)<- r0 9806 cmp r0, #0 @ stored a null reference? 9807 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card if not 9808 GOTO_OPCODE(ip) @ jump to next instruction 9809 9810 /* continuation for OP_SGET_OBJECT_VOLATILE */ 9811 9812 /* 9813 * Continuation if the field has not yet been resolved. 9814 * r1: BBBB field ref 9815 */ 9816 .LOP_SGET_OBJECT_VOLATILE_resolve: 9817 ldr r2, [rGLUE, #offGlue_method] @ r2<- current method 9818 EXPORT_PC() @ resolve() could throw, so export now 9819 ldr r0, [r2, #offMethod_clazz] @ r0<- method->clazz 9820 bl dvmResolveStaticField @ r0<- resolved StaticField ptr 9821 cmp r0, #0 @ success? 9822 bne .LOP_SGET_OBJECT_VOLATILE_finish @ yes, finish 9823 b common_exceptionThrown @ no, handle exception 9824 9825 /* continuation for OP_SPUT_OBJECT_VOLATILE */ 9826 .LOP_SPUT_OBJECT_VOLATILE_finish: @ field ptr in r0 9827 mov r2, rINST, lsr #8 @ r2<- AA 9828 FETCH_ADVANCE_INST(2) @ advance rPC, load rINST 9829 GET_VREG(r1, r2) @ r1<- fp[AA] 9830 ldr r2, [rGLUE, #offGlue_cardTable] @ r2<- card table base 9831 ldr r9, [r0, #offField_clazz] @ r9<- field->clazz 9832 GET_INST_OPCODE(ip) @ extract opcode from rINST 9833 SMP_DMB @ releasing store 9834 str r1, [r0, #offStaticField_value] @ field<- vAA 9835 cmp r1, #0 @ stored a null object? 9836 strneb r2, [r2, r9, lsr #GC_CARD_SHIFT] @ mark card based on obj head 9837 GOTO_OPCODE(ip) @ jump to next instruction 9838 9839 .size dvmAsmSisterStart, .-dvmAsmSisterStart 9840 .global dvmAsmSisterEnd 9841 dvmAsmSisterEnd: 9842 9843 /* File: armv5te/footer.S */ 9844 9845 /* 9846 * =========================================================================== 9847 * Common subroutines and data 9848 * =========================================================================== 9849 */ 9850 9851 9852 9853 .text 9854 .align 2 9855 9856 #if defined(WITH_JIT) 9857 #if defined(WITH_SELF_VERIFICATION) 9858 .global dvmJitToInterpPunt 9859 dvmJitToInterpPunt: 9860 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9861 mov r2,#kSVSPunt @ r2<- interpreter entry point 9862 mov r3, #0 9863 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9864 b jitSVShadowRunEnd @ doesn't return 9865 9866 .global dvmJitToInterpSingleStep 9867 dvmJitToInterpSingleStep: 9868 str lr,[rGLUE,#offGlue_jitResumeNPC] 9869 str r1,[rGLUE,#offGlue_jitResumeDPC] 9870 mov r2,#kSVSSingleStep @ r2<- interpreter entry point 9871 b jitSVShadowRunEnd @ doesn't return 9872 9873 .global dvmJitToInterpNoChainNoProfile 9874 dvmJitToInterpNoChainNoProfile: 9875 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9876 mov r0,rPC @ pass our target PC 9877 mov r2,#kSVSNoProfile @ r2<- interpreter entry point 9878 mov r3, #0 @ 0 means !inJitCodeCache 9879 str r3, [r10, #offThread_inJitCodeCache] @ back to the interp land 9880 b jitSVShadowRunEnd @ doesn't return 9881 9882 .global dvmJitToInterpTraceSelectNoChain 9883 dvmJitToInterpTraceSelectNoChain: 9884 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9885 mov r0,rPC @ pass our target PC 9886 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9887 mov r3, #0 @ 0 means !inJitCodeCache 9888 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9889 b jitSVShadowRunEnd @ doesn't return 9890 9891 .global dvmJitToInterpTraceSelect 9892 dvmJitToInterpTraceSelect: 9893 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9894 ldr r0,[lr, #-1] @ pass our target PC 9895 mov r2,#kSVSTraceSelect @ r2<- interpreter entry point 9896 mov r3, #0 @ 0 means !inJitCodeCache 9897 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9898 b jitSVShadowRunEnd @ doesn't return 9899 9900 .global dvmJitToInterpBackwardBranch 9901 dvmJitToInterpBackwardBranch: 9902 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9903 ldr r0,[lr, #-1] @ pass our target PC 9904 mov r2,#kSVSBackwardBranch @ r2<- interpreter entry point 9905 mov r3, #0 @ 0 means !inJitCodeCache 9906 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9907 b jitSVShadowRunEnd @ doesn't return 9908 9909 .global dvmJitToInterpNormal 9910 dvmJitToInterpNormal: 9911 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9912 ldr r0,[lr, #-1] @ pass our target PC 9913 mov r2,#kSVSNormal @ r2<- interpreter entry point 9914 mov r3, #0 @ 0 means !inJitCodeCache 9915 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9916 b jitSVShadowRunEnd @ doesn't return 9917 9918 .global dvmJitToInterpNoChain 9919 dvmJitToInterpNoChain: 9920 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9921 mov r0,rPC @ pass our target PC 9922 mov r2,#kSVSNoChain @ r2<- interpreter entry point 9923 mov r3, #0 @ 0 means !inJitCodeCache 9924 str r3, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9925 b jitSVShadowRunEnd @ doesn't return 9926 #else 9927 /* 9928 * Return from the translation cache to the interpreter when the compiler is 9929 * having issues translating/executing a Dalvik instruction. We have to skip 9930 * the code cache lookup otherwise it is possible to indefinitely bouce 9931 * between the interpreter and the code cache if the instruction that fails 9932 * to be compiled happens to be at a trace start. 9933 */ 9934 .global dvmJitToInterpPunt 9935 dvmJitToInterpPunt: 9936 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9937 mov rPC, r0 9938 #if defined(WITH_JIT_TUNING) 9939 mov r0,lr 9940 bl dvmBumpPunt; 9941 #endif 9942 EXPORT_PC() 9943 mov r0, #0 9944 str r0, [r10, #offThread_inJitCodeCache] @ Back to the interp land 9945 adrl rIBASE, dvmAsmInstructionStart 9946 FETCH_INST() 9947 GET_INST_OPCODE(ip) 9948 GOTO_OPCODE(ip) 9949 9950 /* 9951 * Return to the interpreter to handle a single instruction. 9952 * On entry: 9953 * r0 <= PC 9954 * r1 <= PC of resume instruction 9955 * lr <= resume point in translation 9956 */ 9957 .global dvmJitToInterpSingleStep 9958 dvmJitToInterpSingleStep: 9959 str lr,[rGLUE,#offGlue_jitResumeNPC] 9960 str r1,[rGLUE,#offGlue_jitResumeDPC] 9961 mov r1,#kInterpEntryInstr 9962 @ enum is 4 byte in aapcs-EABI 9963 str r1, [rGLUE, #offGlue_entryPoint] 9964 mov rPC,r0 9965 EXPORT_PC() 9966 9967 adrl rIBASE, dvmAsmInstructionStart 9968 mov r2,#kJitSingleStep @ Ask for single step and then revert 9969 str r2,[rGLUE,#offGlue_jitState] 9970 mov r1,#1 @ set changeInterp to bail to debug interp 9971 b common_gotoBail 9972 9973 /* 9974 * Return from the translation cache and immediately request 9975 * a translation for the exit target. Commonly used for callees. 9976 */ 9977 .global dvmJitToInterpTraceSelectNoChain 9978 dvmJitToInterpTraceSelectNoChain: 9979 #if defined(WITH_JIT_TUNING) 9980 bl dvmBumpNoChain 9981 #endif 9982 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 9983 mov r0,rPC 9984 bl dvmJitGetCodeAddr @ Is there a translation? 9985 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 9986 mov r1, rPC @ arg1 of translation may need this 9987 mov lr, #0 @ in case target is HANDLER_INTERPRET 9988 cmp r0,#0 @ !0 means translation exists 9989 bxne r0 @ continue native execution if so 9990 b 2f @ branch over to use the interpreter 9991 9992 /* 9993 * Return from the translation cache and immediately request 9994 * a translation for the exit target. Commonly used following 9995 * invokes. 9996 */ 9997 .global dvmJitToInterpTraceSelect 9998 dvmJitToInterpTraceSelect: 9999 ldr rPC,[lr, #-1] @ get our target PC 10000 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 10001 add rINST,lr,#-5 @ save start of chain branch 10002 add rINST, #-4 @ .. which is 9 bytes back 10003 mov r0,rPC 10004 bl dvmJitGetCodeAddr @ Is there a translation? 10005 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 10006 cmp r0,#0 10007 beq 2f 10008 mov r1,rINST 10009 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 10010 mov r1, rPC @ arg1 of translation may need this 10011 mov lr, #0 @ in case target is HANDLER_INTERPRET 10012 cmp r0,#0 @ successful chain? 10013 bxne r0 @ continue native execution 10014 b toInterpreter @ didn't chain - resume with interpreter 10015 10016 /* No translation, so request one if profiling isn't disabled*/ 10017 2: 10018 adrl rIBASE, dvmAsmInstructionStart 10019 GET_JIT_PROF_TABLE(r0) 10020 FETCH_INST() 10021 cmp r0, #0 10022 movne r2,#kJitTSelectRequestHot @ ask for trace selection 10023 bne common_selectTrace 10024 GET_INST_OPCODE(ip) 10025 GOTO_OPCODE(ip) 10026 10027 /* 10028 * Return from the translation cache to the interpreter. 10029 * The return was done with a BLX from thumb mode, and 10030 * the following 32-bit word contains the target rPC value. 10031 * Note that lr (r14) will have its low-order bit set to denote 10032 * its thumb-mode origin. 10033 * 10034 * We'll need to stash our lr origin away, recover the new 10035 * target and then check to see if there is a translation available 10036 * for our new target. If so, we do a translation chain and 10037 * go back to native execution. Otherwise, it's back to the 10038 * interpreter (after treating this entry as a potential 10039 * trace start). 10040 */ 10041 .global dvmJitToInterpNormal 10042 dvmJitToInterpNormal: 10043 ldr rPC,[lr, #-1] @ get our target PC 10044 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 10045 add rINST,lr,#-5 @ save start of chain branch 10046 add rINST,#-4 @ .. which is 9 bytes back 10047 #if defined(WITH_JIT_TUNING) 10048 bl dvmBumpNormal 10049 #endif 10050 mov r0,rPC 10051 bl dvmJitGetCodeAddr @ Is there a translation? 10052 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 10053 cmp r0,#0 10054 beq toInterpreter @ go if not, otherwise do chain 10055 mov r1,rINST 10056 bl dvmJitChain @ r0<- dvmJitChain(codeAddr,chainAddr) 10057 mov r1, rPC @ arg1 of translation may need this 10058 mov lr, #0 @ in case target is HANDLER_INTERPRET 10059 cmp r0,#0 @ successful chain? 10060 bxne r0 @ continue native execution 10061 b toInterpreter @ didn't chain - resume with interpreter 10062 10063 /* 10064 * Return from the translation cache to the interpreter to do method invocation. 10065 * Check if translation exists for the callee, but don't chain to it. 10066 */ 10067 .global dvmJitToInterpNoChainNoProfile 10068 dvmJitToInterpNoChainNoProfile: 10069 #if defined(WITH_JIT_TUNING) 10070 bl dvmBumpNoChain 10071 #endif 10072 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 10073 mov r0,rPC 10074 bl dvmJitGetCodeAddr @ Is there a translation? 10075 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 10076 mov r1, rPC @ arg1 of translation may need this 10077 mov lr, #0 @ in case target is HANDLER_INTERPRET 10078 cmp r0,#0 10079 bxne r0 @ continue native execution if so 10080 EXPORT_PC() 10081 adrl rIBASE, dvmAsmInstructionStart 10082 FETCH_INST() 10083 GET_INST_OPCODE(ip) @ extract opcode from rINST 10084 GOTO_OPCODE(ip) @ jump to next instruction 10085 10086 /* 10087 * Return from the translation cache to the interpreter to do method invocation. 10088 * Check if translation exists for the callee, but don't chain to it. 10089 */ 10090 .global dvmJitToInterpNoChain 10091 dvmJitToInterpNoChain: 10092 #if defined(WITH_JIT_TUNING) 10093 bl dvmBumpNoChain 10094 #endif 10095 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 10096 mov r0,rPC 10097 bl dvmJitGetCodeAddr @ Is there a translation? 10098 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 10099 mov r1, rPC @ arg1 of translation may need this 10100 mov lr, #0 @ in case target is HANDLER_INTERPRET 10101 cmp r0,#0 10102 bxne r0 @ continue native execution if so 10103 #endif 10104 10105 /* 10106 * No translation, restore interpreter regs and start interpreting. 10107 * rGLUE & rFP were preserved in the translated code, and rPC has 10108 * already been restored by the time we get here. We'll need to set 10109 * up rIBASE & rINST, and load the address of the JitTable into r0. 10110 */ 10111 toInterpreter: 10112 EXPORT_PC() 10113 adrl rIBASE, dvmAsmInstructionStart 10114 FETCH_INST() 10115 GET_JIT_PROF_TABLE(r0) 10116 @ NOTE: intended fallthrough 10117 10118 /* 10119 * Common code to update potential trace start counter, and initiate 10120 * a trace-build if appropriate. On entry, rPC should point to the 10121 * next instruction to execute, and rINST should be already loaded with 10122 * the next opcode word, and r0 holds a pointer to the jit profile 10123 * table (pJitProfTable). 10124 */ 10125 common_testUpdateProfile: 10126 cmp r0,#0 10127 GET_INST_OPCODE(ip) 10128 GOTO_OPCODE_IFEQ(ip) @ if not profiling, fallthrough otherwise */ 10129 10130 common_updateProfile: 10131 eor r3,rPC,rPC,lsr #12 @ cheap, but fast hash function 10132 lsl r3,r3,#(32 - JIT_PROF_SIZE_LOG_2) @ shift out excess bits 10133 ldrb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ get counter 10134 GET_INST_OPCODE(ip) 10135 subs r1,r1,#1 @ decrement counter 10136 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ and store it 10137 GOTO_OPCODE_IFNE(ip) @ if not threshold, fallthrough otherwise */ 10138 10139 /* 10140 * Here, we switch to the debug interpreter to request 10141 * trace selection. First, though, check to see if there 10142 * is already a native translation in place (and, if so, 10143 * jump to it now). 10144 */ 10145 GET_JIT_THRESHOLD(r1) 10146 ldr r10, [rGLUE, #offGlue_self] @ callee saved r10 <- glue->self 10147 strb r1,[r0,r3,lsr #(32 - JIT_PROF_SIZE_LOG_2)] @ reset counter 10148 EXPORT_PC() 10149 mov r0,rPC 10150 bl dvmJitGetCodeAddr @ r0<- dvmJitGetCodeAddr(rPC) 10151 str r0, [r10, #offThread_inJitCodeCache] @ set the inJitCodeCache flag 10152 mov r1, rPC @ arg1 of translation may need this 10153 mov lr, #0 @ in case target is HANDLER_INTERPRET 10154 cmp r0,#0 10155 #if !defined(WITH_SELF_VERIFICATION) 10156 bxne r0 @ jump to the translation 10157 mov r2,#kJitTSelectRequest @ ask for trace selection 10158 @ fall-through to common_selectTrace 10159 #else 10160 moveq r2,#kJitTSelectRequest @ ask for trace selection 10161 beq common_selectTrace 10162 /* 10163 * At this point, we have a target translation. However, if 10164 * that translation is actually the interpret-only pseudo-translation 10165 * we want to treat it the same as no translation. 10166 */ 10167 mov r10, r0 @ save target 10168 bl dvmCompilerGetInterpretTemplate 10169 cmp r0, r10 @ special case? 10170 bne jitSVShadowRunStart @ set up self verification shadow space 10171 @ Need to clear the inJitCodeCache flag 10172 ldr r10, [rGLUE, #offGlue_self] @ r10 <- glue->self 10173 mov r3, #0 @ 0 means not in the JIT code cache 10174 str r3, [r10, #offThread_inJitCodeCache] @ back to the interp land 10175 GET_INST_OPCODE(ip) 10176 GOTO_OPCODE(ip) 10177 /* no return */ 10178 #endif 10179 10180 /* 10181 * On entry: 10182 * r2 is jit state, e.g. kJitTSelectRequest or kJitTSelectRequestHot 10183 */ 10184 common_selectTrace: 10185 str r2,[rGLUE,#offGlue_jitState] 10186 mov r2,#kInterpEntryInstr @ normal entry reason 10187 str r2,[rGLUE,#offGlue_entryPoint] 10188 mov r1,#1 @ set changeInterp 10189 b common_gotoBail 10190 10191 #if defined(WITH_SELF_VERIFICATION) 10192 /* 10193 * Save PC and registers to shadow memory for self verification mode 10194 * before jumping to native translation. 10195 * On entry: 10196 * rPC, rFP, rGLUE: the values that they should contain 10197 * r10: the address of the target translation. 10198 */ 10199 jitSVShadowRunStart: 10200 mov r0,rPC @ r0<- program counter 10201 mov r1,rFP @ r1<- frame pointer 10202 mov r2,rGLUE @ r2<- InterpState pointer 10203 mov r3,r10 @ r3<- target translation 10204 bl dvmSelfVerificationSaveState @ save registers to shadow space 10205 ldr rFP,[r0,#offShadowSpace_shadowFP] @ rFP<- fp in shadow space 10206 add rGLUE,r0,#offShadowSpace_interpState @ rGLUE<- rGLUE in shadow space 10207 bx r10 @ jump to the translation 10208 10209 /* 10210 * Restore PC, registers, and interpState to original values 10211 * before jumping back to the interpreter. 10212 */ 10213 jitSVShadowRunEnd: 10214 mov r1,rFP @ pass ending fp 10215 bl dvmSelfVerificationRestoreState @ restore pc and fp values 10216 ldr rPC,[r0,#offShadowSpace_startPC] @ restore PC 10217 ldr rFP,[r0,#offShadowSpace_fp] @ restore FP 10218 ldr rGLUE,[r0,#offShadowSpace_glue] @ restore InterpState 10219 ldr r1,[r0,#offShadowSpace_svState] @ get self verification state 10220 cmp r1,#0 @ check for punt condition 10221 beq 1f 10222 mov r2,#kJitSelfVerification @ ask for self verification 10223 str r2,[rGLUE,#offGlue_jitState] 10224 mov r2,#kInterpEntryInstr @ normal entry reason 10225 str r2,[rGLUE,#offGlue_entryPoint] 10226 mov r1,#1 @ set changeInterp 10227 b common_gotoBail 10228 10229 1: @ exit to interpreter without check 10230 EXPORT_PC() 10231 adrl rIBASE, dvmAsmInstructionStart 10232 FETCH_INST() 10233 GET_INST_OPCODE(ip) 10234 GOTO_OPCODE(ip) 10235 #endif 10236 10237 #endif 10238 10239 /* 10240 * Common code when a backward branch is taken. 10241 * 10242 * TODO: we could avoid a branch by just setting r0 and falling through 10243 * into the common_periodicChecks code, and having a test on r0 at the 10244 * end determine if we should return to the caller or update & branch to 10245 * the next instr. 10246 * 10247 * On entry: 10248 * r9 is PC adjustment *in bytes* 10249 */ 10250 common_backwardBranch: 10251 mov r0, #kInterpEntryInstr 10252 bl common_periodicChecks 10253 #if defined(WITH_JIT) 10254 GET_JIT_PROF_TABLE(r0) 10255 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 10256 cmp r0,#0 10257 bne common_updateProfile 10258 GET_INST_OPCODE(ip) 10259 GOTO_OPCODE(ip) 10260 #else 10261 FETCH_ADVANCE_INST_RB(r9) @ update rPC, load rINST 10262 GET_INST_OPCODE(ip) @ extract opcode from rINST 10263 GOTO_OPCODE(ip) @ jump to next instruction 10264 #endif 10265 10266 10267 /* 10268 * Need to see if the thread needs to be suspended or debugger/profiler 10269 * activity has begun. If so, we suspend the thread or side-exit to 10270 * the debug interpreter as appropriate. 10271 * 10272 * The common case is no activity on any of these, so we want to figure 10273 * that out quickly. If something is up, we can then sort out what. 10274 * 10275 * We want to be fast if the VM was built without debugger or profiler 10276 * support, but we also need to recognize that the system is usually 10277 * shipped with both of these enabled. 10278 * 10279 * TODO: reduce this so we're just checking a single location. 10280 * 10281 * On entry: 10282 * r0 is reentry type, e.g. kInterpEntryInstr (for debugger/profiling) 10283 * r9 is trampoline PC adjustment *in bytes* 10284 */ 10285 common_periodicChecks: 10286 ldr r3, [rGLUE, #offGlue_pSelfSuspendCount] @ r3<- &suspendCount 10287 10288 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 10289 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 10290 10291 ldr ip, [r3] @ ip<- suspendCount (int) 10292 10293 cmp r1, #0 @ debugger enabled? 10294 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 10295 ldr r2, [r2] @ r2<- activeProfilers (int) 10296 orrne ip, ip, r1 @ ip<- suspendCount | debuggerActive 10297 orrs ip, ip, r2 @ ip<- suspend|debugger|profiler; set Z 10298 10299 bxeq lr @ all zero, return 10300 10301 /* 10302 * One or more interesting events have happened. Figure out what. 10303 * 10304 * If debugging or profiling are compiled in, we need to disambiguate. 10305 * 10306 * r0 still holds the reentry type. 10307 */ 10308 ldr ip, [r3] @ ip<- suspendCount (int) 10309 cmp ip, #0 @ want suspend? 10310 beq 1f @ no, must be debugger/profiler 10311 10312 stmfd sp!, {r0, lr} @ preserve r0 and lr 10313 #if defined(WITH_JIT) 10314 /* 10315 * Refresh the Jit's cached copy of profile table pointer. This pointer 10316 * doubles as the Jit's on/off switch. 10317 */ 10318 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ r3<-&gDvmJit.pJitProfTable 10319 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 10320 ldr r3, [r3] @ r3 <- pJitProfTable 10321 EXPORT_PC() @ need for precise GC 10322 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh Jit's on/off switch 10323 #else 10324 ldr r0, [rGLUE, #offGlue_self] @ r0<- glue->self 10325 EXPORT_PC() @ need for precise GC 10326 #endif 10327 bl dvmCheckSuspendPending @ do full check, suspend if necessary 10328 ldmfd sp!, {r0, lr} @ restore r0 and lr 10329 10330 /* 10331 * Reload the debugger/profiler enable flags. We're checking to see 10332 * if either of these got set while we were suspended. 10333 * 10334 * We can't really avoid the #ifdefs here, because the fields don't 10335 * exist when the feature is disabled. 10336 */ 10337 ldr r1, [rGLUE, #offGlue_pDebuggerActive] @ r1<- &debuggerActive 10338 cmp r1, #0 @ debugger enabled? 10339 ldrneb r1, [r1] @ yes, r1<- debuggerActive (boolean) 10340 ldr r2, [rGLUE, #offGlue_pActiveProfilers] @ r2<- &activeProfilers 10341 ldr r2, [r2] @ r2<- activeProfilers (int) 10342 10343 orrs r1, r1, r2 10344 beq 2f 10345 10346 1: @ debugger/profiler enabled, bail out; glue->entryPoint was set above 10347 str r0, [rGLUE, #offGlue_entryPoint] @ store r0, need for debug/prof 10348 add rPC, rPC, r9 @ update rPC 10349 mov r1, #1 @ "want switch" = true 10350 b common_gotoBail @ side exit 10351 10352 2: 10353 bx lr @ nothing to do, return 10354 10355 10356 /* 10357 * The equivalent of "goto bail", this calls through the "bail handler". 10358 * 10359 * State registers will be saved to the "glue" area before bailing. 10360 * 10361 * On entry: 10362 * r1 is "bool changeInterp", indicating if we want to switch to the 10363 * other interpreter or just bail all the way out 10364 */ 10365 common_gotoBail: 10366 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10367 mov r0, rGLUE @ r0<- glue ptr 10368 b dvmMterpStdBail @ call(glue, changeInterp) 10369 10370 @add r1, r1, #1 @ using (boolean+1) 10371 @add r0, rGLUE, #offGlue_jmpBuf @ r0<- &glue->jmpBuf 10372 @bl _longjmp @ does not return 10373 @bl common_abort 10374 10375 10376 /* 10377 * Common code for method invocation with range. 10378 * 10379 * On entry: 10380 * r0 is "Method* methodToCall", the method we're trying to call 10381 */ 10382 common_invokeMethodRange: 10383 .LinvokeNewRange: 10384 @ prepare to copy args to "outs" area of current frame 10385 movs r2, rINST, lsr #8 @ r2<- AA (arg count) -- test for zero 10386 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10387 beq .LinvokeArgsDone @ if no args, skip the rest 10388 FETCH(r1, 2) @ r1<- CCCC 10389 10390 @ r0=methodToCall, r1=CCCC, r2=count, r10=outs 10391 @ (very few methods have > 10 args; could unroll for common cases) 10392 add r3, rFP, r1, lsl #2 @ r3<- &fp[CCCC] 10393 sub r10, r10, r2, lsl #2 @ r10<- "outs" area, for call args 10394 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 10395 1: ldr r1, [r3], #4 @ val = *fp++ 10396 subs r2, r2, #1 @ count-- 10397 str r1, [r10], #4 @ *outs++ = val 10398 bne 1b @ ...while count != 0 10399 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10400 b .LinvokeArgsDone 10401 10402 /* 10403 * Common code for method invocation without range. 10404 * 10405 * On entry: 10406 * r0 is "Method* methodToCall", the method we're trying to call 10407 */ 10408 common_invokeMethodNoRange: 10409 .LinvokeNewNoRange: 10410 @ prepare to copy args to "outs" area of current frame 10411 movs r2, rINST, lsr #12 @ r2<- B (arg count) -- test for zero 10412 SAVEAREA_FROM_FP(r10, rFP) @ r10<- stack save area 10413 FETCH(r1, 2) @ r1<- GFED (load here to hide latency) 10414 ldrh r9, [r0, #offMethod_registersSize] @ r9<- methodToCall->regsSize 10415 ldrh r3, [r0, #offMethod_outsSize] @ r3<- methodToCall->outsSize 10416 beq .LinvokeArgsDone 10417 10418 @ r0=methodToCall, r1=GFED, r3=outSize, r2=count, r9=regSize, r10=outs 10419 .LinvokeNonRange: 10420 rsb r2, r2, #5 @ r2<- 5-r2 10421 add pc, pc, r2, lsl #4 @ computed goto, 4 instrs each 10422 bl common_abort @ (skipped due to ARM prefetch) 10423 5: and ip, rINST, #0x0f00 @ isolate A 10424 ldr r2, [rFP, ip, lsr #6] @ r2<- vA (shift right 8, left 2) 10425 mov r0, r0 @ nop 10426 str r2, [r10, #-4]! @ *--outs = vA 10427 4: and ip, r1, #0xf000 @ isolate G 10428 ldr r2, [rFP, ip, lsr #10] @ r2<- vG (shift right 12, left 2) 10429 mov r0, r0 @ nop 10430 str r2, [r10, #-4]! @ *--outs = vG 10431 3: and ip, r1, #0x0f00 @ isolate F 10432 ldr r2, [rFP, ip, lsr #6] @ r2<- vF 10433 mov r0, r0 @ nop 10434 str r2, [r10, #-4]! @ *--outs = vF 10435 2: and ip, r1, #0x00f0 @ isolate E 10436 ldr r2, [rFP, ip, lsr #2] @ r2<- vE 10437 mov r0, r0 @ nop 10438 str r2, [r10, #-4]! @ *--outs = vE 10439 1: and ip, r1, #0x000f @ isolate D 10440 ldr r2, [rFP, ip, lsl #2] @ r2<- vD 10441 mov r0, r0 @ nop 10442 str r2, [r10, #-4]! @ *--outs = vD 10443 0: @ fall through to .LinvokeArgsDone 10444 10445 .LinvokeArgsDone: @ r0=methodToCall, r3=outSize, r9=regSize 10446 ldr r2, [r0, #offMethod_insns] @ r2<- method->insns 10447 ldr rINST, [r0, #offMethod_clazz] @ rINST<- method->clazz 10448 @ find space for the new stack frame, check for overflow 10449 SAVEAREA_FROM_FP(r1, rFP) @ r1<- stack save area 10450 sub r1, r1, r9, lsl #2 @ r1<- newFp (old savearea - regsSize) 10451 SAVEAREA_FROM_FP(r10, r1) @ r10<- newSaveArea 10452 @ bl common_dumpRegs 10453 ldr r9, [rGLUE, #offGlue_interpStackEnd] @ r9<- interpStackEnd 10454 sub r3, r10, r3, lsl #2 @ r3<- bottom (newsave - outsSize) 10455 cmp r3, r9 @ bottom < interpStackEnd? 10456 ldr r3, [r0, #offMethod_accessFlags] @ r3<- methodToCall->accessFlags 10457 blo .LstackOverflow @ yes, this frame will overflow stack 10458 10459 @ set up newSaveArea 10460 #ifdef EASY_GDB 10461 SAVEAREA_FROM_FP(ip, rFP) @ ip<- stack save area 10462 str ip, [r10, #offStackSaveArea_prevSave] 10463 #endif 10464 str rFP, [r10, #offStackSaveArea_prevFrame] 10465 str rPC, [r10, #offStackSaveArea_savedPc] 10466 #if defined(WITH_JIT) 10467 mov r9, #0 10468 str r9, [r10, #offStackSaveArea_returnAddr] 10469 #endif 10470 str r0, [r10, #offStackSaveArea_method] 10471 tst r3, #ACC_NATIVE 10472 bne .LinvokeNative 10473 10474 /* 10475 stmfd sp!, {r0-r3} 10476 bl common_printNewline 10477 mov r0, rFP 10478 mov r1, #0 10479 bl dvmDumpFp 10480 ldmfd sp!, {r0-r3} 10481 stmfd sp!, {r0-r3} 10482 mov r0, r1 10483 mov r1, r10 10484 bl dvmDumpFp 10485 bl common_printNewline 10486 ldmfd sp!, {r0-r3} 10487 */ 10488 10489 ldrh r9, [r2] @ r9 <- load INST from new PC 10490 ldr r3, [rINST, #offClassObject_pDvmDex] @ r3<- method->clazz->pDvmDex 10491 mov rPC, r2 @ publish new rPC 10492 ldr r2, [rGLUE, #offGlue_self] @ r2<- glue->self 10493 10494 @ Update "glue" values for the new method 10495 @ r0=methodToCall, r1=newFp, r2=self, r3=newMethodClass, r9=newINST 10496 str r0, [rGLUE, #offGlue_method] @ glue->method = methodToCall 10497 str r3, [rGLUE, #offGlue_methodClassDex] @ glue->methodClassDex = ... 10498 #if defined(WITH_JIT) 10499 GET_JIT_PROF_TABLE(r0) 10500 mov rFP, r1 @ fp = newFp 10501 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10502 mov rINST, r9 @ publish new rINST 10503 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10504 cmp r0,#0 10505 bne common_updateProfile 10506 GOTO_OPCODE(ip) @ jump to next instruction 10507 #else 10508 mov rFP, r1 @ fp = newFp 10509 GET_PREFETCHED_OPCODE(ip, r9) @ extract prefetched opcode from r9 10510 mov rINST, r9 @ publish new rINST 10511 str r1, [r2, #offThread_curFrame] @ self->curFrame = newFp 10512 GOTO_OPCODE(ip) @ jump to next instruction 10513 #endif 10514 10515 .LinvokeNative: 10516 @ Prep for the native call 10517 @ r0=methodToCall, r1=newFp, r10=newSaveArea 10518 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10519 ldr r9, [r3, #offThread_jniLocal_topCookie] @ r9<- thread->localRef->... 10520 str r1, [r3, #offThread_curFrame] @ self->curFrame = newFp 10521 str r9, [r10, #offStackSaveArea_localRefCookie] @newFp->localRefCookie=top 10522 mov r9, r3 @ r9<- glue->self (preserve) 10523 10524 mov r2, r0 @ r2<- methodToCall 10525 mov r0, r1 @ r0<- newFp (points to args) 10526 add r1, rGLUE, #offGlue_retval @ r1<- &retval 10527 10528 #ifdef ASSIST_DEBUGGER 10529 /* insert fake function header to help gdb find the stack frame */ 10530 b .Lskip 10531 .type dalvik_mterp, %function 10532 dalvik_mterp: 10533 .fnstart 10534 MTERP_ENTRY1 10535 MTERP_ENTRY2 10536 .Lskip: 10537 #endif 10538 10539 @mov lr, pc @ set return addr 10540 @ldr pc, [r2, #offMethod_nativeFunc] @ pc<- methodToCall->nativeFunc 10541 LDR_PC_LR "[r2, #offMethod_nativeFunc]" 10542 10543 #if defined(WITH_JIT) 10544 ldr r3, [rGLUE, #offGlue_ppJitProfTable] @ Refresh Jit's on/off status 10545 #endif 10546 10547 @ native return; r9=self, r10=newSaveArea 10548 @ equivalent to dvmPopJniLocals 10549 ldr r0, [r10, #offStackSaveArea_localRefCookie] @ r0<- saved top 10550 ldr r1, [r9, #offThread_exception] @ check for exception 10551 #if defined(WITH_JIT) 10552 ldr r3, [r3] @ r3 <- gDvmJit.pProfTable 10553 #endif 10554 str rFP, [r9, #offThread_curFrame] @ self->curFrame = fp 10555 cmp r1, #0 @ null? 10556 str r0, [r9, #offThread_jniLocal_topCookie] @ new top <- old top 10557 #if defined(WITH_JIT) 10558 str r3, [rGLUE, #offGlue_pJitProfTable] @ refresh cached on/off switch 10559 #endif 10560 bne common_exceptionThrown @ no, handle exception 10561 10562 FETCH_ADVANCE_INST(3) @ advance rPC, load rINST 10563 GET_INST_OPCODE(ip) @ extract opcode from rINST 10564 GOTO_OPCODE(ip) @ jump to next instruction 10565 10566 .LstackOverflow: @ r0=methodToCall 10567 mov r1, r0 @ r1<- methodToCall 10568 ldr r0, [rGLUE, #offGlue_self] @ r0<- self 10569 bl dvmHandleStackOverflow 10570 b common_exceptionThrown 10571 #ifdef ASSIST_DEBUGGER 10572 .fnend 10573 #endif 10574 10575 10576 /* 10577 * Common code for method invocation, calling through "glue code". 10578 * 10579 * TODO: now that we have range and non-range invoke handlers, this 10580 * needs to be split into two. Maybe just create entry points 10581 * that set r9 and jump here? 10582 * 10583 * On entry: 10584 * r0 is "Method* methodToCall", the method we're trying to call 10585 * r9 is "bool methodCallRange", indicating if this is a /range variant 10586 */ 10587 .if 0 10588 .LinvokeOld: 10589 sub sp, sp, #8 @ space for args + pad 10590 FETCH(ip, 2) @ ip<- FEDC or CCCC 10591 mov r2, r0 @ A2<- methodToCall 10592 mov r0, rGLUE @ A0<- glue 10593 SAVE_PC_FP_TO_GLUE() @ export state to "glue" 10594 mov r1, r9 @ A1<- methodCallRange 10595 mov r3, rINST, lsr #8 @ A3<- AA 10596 str ip, [sp, #0] @ A4<- ip 10597 bl dvmMterp_invokeMethod @ call the C invokeMethod 10598 add sp, sp, #8 @ remove arg area 10599 b common_resumeAfterGlueCall @ continue to next instruction 10600 .endif 10601 10602 10603 10604 /* 10605 * Common code for handling a return instruction. 10606 * 10607 * This does not return. 10608 */ 10609 common_returnFromMethod: 10610 .LreturnNew: 10611 mov r0, #kInterpEntryReturn 10612 mov r9, #0 10613 bl common_periodicChecks 10614 10615 SAVEAREA_FROM_FP(r0, rFP) @ r0<- saveArea (old) 10616 ldr rFP, [r0, #offStackSaveArea_prevFrame] @ fp = saveArea->prevFrame 10617 ldr r9, [r0, #offStackSaveArea_savedPc] @ r9 = saveArea->savedPc 10618 ldr r2, [rFP, #(offStackSaveArea_method - sizeofStackSaveArea)] 10619 @ r2<- method we're returning to 10620 ldr r3, [rGLUE, #offGlue_self] @ r3<- glue->self 10621 cmp r2, #0 @ is this a break frame? 10622 ldrne r10, [r2, #offMethod_clazz] @ r10<- method->clazz 10623 mov r1, #0 @ "want switch" = false 10624 beq common_gotoBail @ break frame, bail out completely 10625 10626 PREFETCH_ADVANCE_INST(rINST, r9, 3) @ advance r9, update new rINST 10627 str r2, [rGLUE, #offGlue_method]@ glue->method = newSave->method 10628 ldr r1, [r10, #offClassObject_pDvmDex] @ r1<- method->clazz->pDvmDex 10629 str rFP, [r3, #offThread_curFrame] @ self->curFrame = fp 10630 #if defined(WITH_JIT) 10631 ldr r10, [r0, #offStackSaveArea_returnAddr] @ r10 = saveArea->returnAddr 10632 mov rPC, r9 @ publish new rPC 10633 str r1, [rGLUE, #offGlue_methodClassDex] 10634 str r10, [r3, #offThread_inJitCodeCache] @ may return to JIT'ed land 10635 cmp r10, #0 @ caller is compiled code 10636 blxne r10 10637 GET_INST_OPCODE(ip) @ extract opcode from rINST 10638 GOTO_OPCODE(ip) @ jump to next instruction 10639 #else 10640 GET_INST_OPCODE(ip) @ extract opcode from rINST 10641 mov rPC, r9 @ publish new rPC 10642 str r1, [rGLUE, #offGlue_methodClassDex] 10643 GOTO_OPCODE(ip) @ jump to next instruction 10644 #endif 10645 10646 /* 10647 * Return handling, calls through "glue code". 10648 */ 10649 .if 0 10650 .LreturnOld: 10651 SAVE_PC_FP_TO_GLUE() @ export state 10652 mov r0, rGLUE @ arg to function 10653 bl dvmMterp_returnFromMethod 10654 b common_resumeAfterGlueCall 10655 .endif 10656 10657 10658 /* 10659 * Somebody has thrown an exception. Handle it. 10660 * 10661 * If the exception processing code returns to us (instead of falling 10662 * out of the interpreter), continue with whatever the next instruction 10663 * now happens to be. 10664 * 10665 * This does not return. 10666 */ 10667 .global dvmMterpCommonExceptionThrown 10668 dvmMterpCommonExceptionThrown: 10669 common_exceptionThrown: 10670 .LexceptionNew: 10671 mov r0, #kInterpEntryThrow 10672 mov r9, #0 10673 bl common_periodicChecks 10674 10675 ldr r10, [rGLUE, #offGlue_self] @ r10<- glue->self 10676 ldr r9, [r10, #offThread_exception] @ r9<- self->exception 10677 mov r1, r10 @ r1<- self 10678 mov r0, r9 @ r0<- exception 10679 bl dvmAddTrackedAlloc @ don't let the exception be GCed 10680 mov r3, #0 @ r3<- NULL 10681 str r3, [r10, #offThread_exception] @ self->exception = NULL 10682 10683 /* set up args and a local for "&fp" */ 10684 /* (str sp, [sp, #-4]! would be perfect here, but is discouraged) */ 10685 str rFP, [sp, #-4]! @ *--sp = fp 10686 mov ip, sp @ ip<- &fp 10687 mov r3, #0 @ r3<- false 10688 str ip, [sp, #-4]! @ *--sp = &fp 10689 ldr r1, [rGLUE, #offGlue_method] @ r1<- glue->method 10690 mov r0, r10 @ r0<- self 10691 ldr r1, [r1, #offMethod_insns] @ r1<- method->insns 10692 mov r2, r9 @ r2<- exception 10693 sub r1, rPC, r1 @ r1<- pc - method->insns 10694 mov r1, r1, asr #1 @ r1<- offset in code units 10695 10696 /* call, r0 gets catchRelPc (a code-unit offset) */ 10697 bl dvmFindCatchBlock @ call(self, relPc, exc, scan?, &fp) 10698 10699 /* fix earlier stack overflow if necessary; may trash rFP */ 10700 ldrb r1, [r10, #offThread_stackOverflowed] 10701 cmp r1, #0 @ did we overflow earlier? 10702 beq 1f @ no, skip ahead 10703 mov rFP, r0 @ save relPc result in rFP 10704 mov r0, r10 @ r0<- self 10705 mov r1, r9 @ r1<- exception 10706 bl dvmCleanupStackOverflow @ call(self) 10707 mov r0, rFP @ restore result 10708 1: 10709 10710 /* update frame pointer and check result from dvmFindCatchBlock */ 10711 ldr rFP, [sp, #4] @ retrieve the updated rFP 10712 cmp r0, #0 @ is catchRelPc < 0? 10713 add sp, sp, #8 @ restore stack 10714 bmi .LnotCaughtLocally 10715 10716 /* adjust locals to match self->curFrame and updated PC */ 10717 SAVEAREA_FROM_FP(r1, rFP) @ r1<- new save area 10718 ldr r1, [r1, #offStackSaveArea_method] @ r1<- new method 10719 str r1, [rGLUE, #offGlue_method] @ glue->method = new method 10720 ldr r2, [r1, #offMethod_clazz] @ r2<- method->clazz 10721 ldr r3, [r1, #offMethod_insns] @ r3<- method->insns 10722 ldr r2, [r2, #offClassObject_pDvmDex] @ r2<- method->clazz->pDvmDex 10723 add rPC, r3, r0, asl #1 @ rPC<- method->insns + catchRelPc 10724 str r2, [rGLUE, #offGlue_methodClassDex] @ glue->pDvmDex = meth... 10725 10726 /* release the tracked alloc on the exception */ 10727 mov r0, r9 @ r0<- exception 10728 mov r1, r10 @ r1<- self 10729 bl dvmReleaseTrackedAlloc @ release the exception 10730 10731 /* restore the exception if the handler wants it */ 10732 FETCH_INST() @ load rINST from rPC 10733 GET_INST_OPCODE(ip) @ extract opcode from rINST 10734 cmp ip, #OP_MOVE_EXCEPTION @ is it "move-exception"? 10735 streq r9, [r10, #offThread_exception] @ yes, restore the exception 10736 GOTO_OPCODE(ip) @ jump to next instruction 10737 10738 .LnotCaughtLocally: @ r9=exception, r10=self 10739 /* fix stack overflow if necessary */ 10740 ldrb r1, [r10, #offThread_stackOverflowed] 10741 cmp r1, #0 @ did we overflow earlier? 10742 movne r0, r10 @ if yes: r0<- self 10743 movne r1, r9 @ if yes: r1<- exception 10744 blne dvmCleanupStackOverflow @ if yes: call(self) 10745 10746 @ may want to show "not caught locally" debug messages here 10747 #if DVM_SHOW_EXCEPTION >= 2 10748 /* call __android_log_print(prio, tag, format, ...) */ 10749 /* "Exception %s from %s:%d not caught locally" */ 10750 @ dvmLineNumFromPC(method, pc - method->insns) 10751 ldr r0, [rGLUE, #offGlue_method] 10752 ldr r1, [r0, #offMethod_insns] 10753 sub r1, rPC, r1 10754 asr r1, r1, #1 10755 bl dvmLineNumFromPC 10756 str r0, [sp, #-4]! 10757 @ dvmGetMethodSourceFile(method) 10758 ldr r0, [rGLUE, #offGlue_method] 10759 bl dvmGetMethodSourceFile 10760 str r0, [sp, #-4]! 10761 @ exception->clazz->descriptor 10762 ldr r3, [r9, #offObject_clazz] 10763 ldr r3, [r3, #offClassObject_descriptor] 10764 @ 10765 ldr r2, strExceptionNotCaughtLocally 10766 ldr r1, strLogTag 10767 mov r0, #3 @ LOG_DEBUG 10768 bl __android_log_print 10769 #endif 10770 str r9, [r10, #offThread_exception] @ restore exception 10771 mov r0, r9 @ r0<- exception 10772 mov r1, r10 @ r1<- self 10773 bl dvmReleaseTrackedAlloc @ release the exception 10774 mov r1, #0 @ "want switch" = false 10775 b common_gotoBail @ bail out 10776 10777 10778 /* 10779 * Exception handling, calls through "glue code". 10780 */ 10781 .if 0 10782 .LexceptionOld: 10783 SAVE_PC_FP_TO_GLUE() @ export state 10784 mov r0, rGLUE @ arg to function 10785 bl dvmMterp_exceptionThrown 10786 b common_resumeAfterGlueCall 10787 .endif 10788 10789 10790 /* 10791 * After returning from a "glued" function, pull out the updated 10792 * values and start executing at the next instruction. 10793 */ 10794 common_resumeAfterGlueCall: 10795 LOAD_PC_FP_FROM_GLUE() @ pull rPC and rFP out of glue 10796 FETCH_INST() @ load rINST from rPC 10797 GET_INST_OPCODE(ip) @ extract opcode from rINST 10798 GOTO_OPCODE(ip) @ jump to next instruction 10799 10800 /* 10801 * Invalid array index. 10802 */ 10803 common_errArrayIndex: 10804 EXPORT_PC() 10805 ldr r0, strArrayIndexException 10806 mov r1, #0 10807 bl dvmThrowException 10808 b common_exceptionThrown 10809 10810 /* 10811 * Invalid array value. 10812 */ 10813 common_errArrayStore: 10814 EXPORT_PC() 10815 ldr r0, strArrayStoreException 10816 mov r1, #0 10817 bl dvmThrowException 10818 b common_exceptionThrown 10819 10820 /* 10821 * Integer divide or mod by zero. 10822 */ 10823 common_errDivideByZero: 10824 EXPORT_PC() 10825 ldr r0, strArithmeticException 10826 ldr r1, strDivideByZero 10827 bl dvmThrowException 10828 b common_exceptionThrown 10829 10830 /* 10831 * Attempt to allocate an array with a negative size. 10832 */ 10833 common_errNegativeArraySize: 10834 EXPORT_PC() 10835 ldr r0, strNegativeArraySizeException 10836 mov r1, #0 10837 bl dvmThrowException 10838 b common_exceptionThrown 10839 10840 /* 10841 * Invocation of a non-existent method. 10842 */ 10843 common_errNoSuchMethod: 10844 EXPORT_PC() 10845 ldr r0, strNoSuchMethodError 10846 mov r1, #0 10847 bl dvmThrowException 10848 b common_exceptionThrown 10849 10850 /* 10851 * We encountered a null object when we weren't expecting one. We 10852 * export the PC, throw a NullPointerException, and goto the exception 10853 * processing code. 10854 */ 10855 common_errNullObject: 10856 EXPORT_PC() 10857 ldr r0, strNullPointerException 10858 mov r1, #0 10859 bl dvmThrowException 10860 b common_exceptionThrown 10861 10862 /* 10863 * For debugging, cause an immediate fault. The source address will 10864 * be in lr (use a bl instruction to jump here). 10865 */ 10866 common_abort: 10867 ldr pc, .LdeadFood 10868 .LdeadFood: 10869 .word 0xdeadf00d 10870 10871 /* 10872 * Spit out a "we were here", preserving all registers. (The attempt 10873 * to save ip won't work, but we need to save an even number of 10874 * registers for EABI 64-bit stack alignment.) 10875 */ 10876 .macro SQUEAK num 10877 common_squeak\num: 10878 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10879 ldr r0, strSqueak 10880 mov r1, #\num 10881 bl printf 10882 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10883 bx lr 10884 .endm 10885 10886 SQUEAK 0 10887 SQUEAK 1 10888 SQUEAK 2 10889 SQUEAK 3 10890 SQUEAK 4 10891 SQUEAK 5 10892 10893 /* 10894 * Spit out the number in r0, preserving registers. 10895 */ 10896 common_printNum: 10897 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10898 mov r1, r0 10899 ldr r0, strSqueak 10900 bl printf 10901 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10902 bx lr 10903 10904 /* 10905 * Print a newline, preserving registers. 10906 */ 10907 common_printNewline: 10908 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10909 ldr r0, strNewline 10910 bl printf 10911 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10912 bx lr 10913 10914 /* 10915 * Print the 32-bit quantity in r0 as a hex value, preserving registers. 10916 */ 10917 common_printHex: 10918 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10919 mov r1, r0 10920 ldr r0, strPrintHex 10921 bl printf 10922 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10923 bx lr 10924 10925 /* 10926 * Print the 64-bit quantity in r0-r1, preserving registers. 10927 */ 10928 common_printLong: 10929 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10930 mov r3, r1 10931 mov r2, r0 10932 ldr r0, strPrintLong 10933 bl printf 10934 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10935 bx lr 10936 10937 /* 10938 * Print full method info. Pass the Method* in r0. Preserves regs. 10939 */ 10940 common_printMethod: 10941 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10942 bl dvmMterpPrintMethod 10943 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10944 bx lr 10945 10946 /* 10947 * Call a C helper function that dumps regs and possibly some 10948 * additional info. Requires the C function to be compiled in. 10949 */ 10950 .if 0 10951 common_dumpRegs: 10952 stmfd sp!, {r0, r1, r2, r3, ip, lr} 10953 bl dvmMterpDumpArmRegs 10954 ldmfd sp!, {r0, r1, r2, r3, ip, lr} 10955 bx lr 10956 .endif 10957 10958 #if 0 10959 /* 10960 * Experiment on VFP mode. 10961 * 10962 * uint32_t setFPSCR(uint32_t val, uint32_t mask) 10963 * 10964 * Updates the bits specified by "mask", setting them to the values in "val". 10965 */ 10966 setFPSCR: 10967 and r0, r0, r1 @ make sure no stray bits are set 10968 fmrx r2, fpscr @ get VFP reg 10969 mvn r1, r1 @ bit-invert mask 10970 and r2, r2, r1 @ clear masked bits 10971 orr r2, r2, r0 @ set specified bits 10972 fmxr fpscr, r2 @ set VFP reg 10973 mov r0, r2 @ return new value 10974 bx lr 10975 10976 .align 2 10977 .global dvmConfigureFP 10978 .type dvmConfigureFP, %function 10979 dvmConfigureFP: 10980 stmfd sp!, {ip, lr} 10981 /* 0x03000000 sets DN/FZ */ 10982 /* 0x00009f00 clears the six exception enable flags */ 10983 bl common_squeak0 10984 mov r0, #0x03000000 @ r0<- 0x03000000 10985 add r1, r0, #0x9f00 @ r1<- 0x03009f00 10986 bl setFPSCR 10987 ldmfd sp!, {ip, pc} 10988 #endif 10989 10990 10991 /* 10992 * String references, must be close to the code that uses them. 10993 */ 10994 .align 2 10995 strArithmeticException: 10996 .word .LstrArithmeticException 10997 strArrayIndexException: 10998 .word .LstrArrayIndexException 10999 strArrayStoreException: 11000 .word .LstrArrayStoreException 11001 strDivideByZero: 11002 .word .LstrDivideByZero 11003 strNegativeArraySizeException: 11004 .word .LstrNegativeArraySizeException 11005 strNoSuchMethodError: 11006 .word .LstrNoSuchMethodError 11007 strNullPointerException: 11008 .word .LstrNullPointerException 11009 11010 strLogTag: 11011 .word .LstrLogTag 11012 strExceptionNotCaughtLocally: 11013 .word .LstrExceptionNotCaughtLocally 11014 11015 strNewline: 11016 .word .LstrNewline 11017 strSqueak: 11018 .word .LstrSqueak 11019 strPrintHex: 11020 .word .LstrPrintHex 11021 strPrintLong: 11022 .word .LstrPrintLong 11023 11024 /* 11025 * Zero-terminated ASCII string data. 11026 * 11027 * On ARM we have two choices: do like gcc does, and LDR from a .word 11028 * with the address, or use an ADR pseudo-op to get the address 11029 * directly. ADR saves 4 bytes and an indirection, but it's using a 11030 * PC-relative addressing mode and hence has a limited range, which 11031 * makes it not work well with mergeable string sections. 11032 */ 11033 .section .rodata.str1.4,"aMS",%progbits,1 11034 11035 .LstrBadEntryPoint: 11036 .asciz "Bad entry point %d\n" 11037 .LstrArithmeticException: 11038 .asciz "Ljava/lang/ArithmeticException;" 11039 .LstrArrayIndexException: 11040 .asciz "Ljava/lang/ArrayIndexOutOfBoundsException;" 11041 .LstrArrayStoreException: 11042 .asciz "Ljava/lang/ArrayStoreException;" 11043 .LstrClassCastException: 11044 .asciz "Ljava/lang/ClassCastException;" 11045 .LstrDivideByZero: 11046 .asciz "divide by zero" 11047 .LstrFilledNewArrayNotImpl: 11048 .asciz "filled-new-array only implemented for objects and 'int'" 11049 .LstrInternalError: 11050 .asciz "Ljava/lang/InternalError;" 11051 .LstrInstantiationError: 11052 .asciz "Ljava/lang/InstantiationError;" 11053 .LstrNegativeArraySizeException: 11054 .asciz "Ljava/lang/NegativeArraySizeException;" 11055 .LstrNoSuchMethodError: 11056 .asciz "Ljava/lang/NoSuchMethodError;" 11057 .LstrNullPointerException: 11058 .asciz "Ljava/lang/NullPointerException;" 11059 11060 .LstrLogTag: 11061 .asciz "mterp" 11062 .LstrExceptionNotCaughtLocally: 11063 .asciz "Exception %s from %s:%d not caught locally\n" 11064 11065 .LstrNewline: 11066 .asciz "\n" 11067 .LstrSqueak: 11068 .asciz "<%d>" 11069 .LstrPrintHex: 11070 .asciz "<0x%x>" 11071 .LstrPrintLong: 11072 .asciz "<%lld>" 11073 11074