1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "asm_support_arm64.S" 18 19 #include "arch/quick_alloc_entrypoints.S" 20 21 22 /* 23 * Macro that sets up the callee save frame to conform with 24 * Runtime::CreateCalleeSaveMethod(kSaveAll) 25 */ 26 .macro SETUP_SAVE_ALL_CALLEE_SAVE_FRAME 27 adrp xIP0, :got:_ZN3art7Runtime9instance_E 28 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 29 30 // Our registers aren't intermixed - just spill in order. 31 ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . 32 33 // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . 34 THIS_LOAD_REQUIRES_READ_BARRIER 35 ldr xIP0, [xIP0, RUNTIME_SAVE_ALL_CALLEE_SAVE_FRAME_OFFSET ] 36 37 sub sp, sp, #176 38 .cfi_adjust_cfa_offset 176 39 40 // Ugly compile-time check, but we only have the preprocessor. 41 #if (FRAME_SIZE_SAVE_ALL_CALLEE_SAVE != 176) 42 #error "SAVE_ALL_CALLEE_SAVE_FRAME(ARM64) size not as expected." 43 #endif 44 45 // FP callee-saves 46 stp d8, d9, [sp, #8] 47 stp d10, d11, [sp, #24] 48 stp d12, d13, [sp, #40] 49 stp d14, d15, [sp, #56] 50 51 // Reserved registers 52 stp xSELF, xSUSPEND, [sp, #72] 53 .cfi_rel_offset x18, 72 54 .cfi_rel_offset x19, 80 55 56 // callee-saves 57 stp x20, x21, [sp, #88] 58 .cfi_rel_offset x20, 88 59 .cfi_rel_offset x21, 96 60 61 stp x22, x23, [sp, #104] 62 .cfi_rel_offset x22, 104 63 .cfi_rel_offset x23, 112 64 65 stp x24, x25, [sp, #120] 66 .cfi_rel_offset x24, 120 67 .cfi_rel_offset x25, 128 68 69 stp x26, x27, [sp, #136] 70 .cfi_rel_offset x26, 136 71 .cfi_rel_offset x27, 144 72 73 stp x28, x29, [sp, #152] 74 .cfi_rel_offset x28, 152 75 .cfi_rel_offset x29, 160 76 77 str xLR, [sp, #168] 78 .cfi_rel_offset x30, 168 79 80 // Loads appropriate callee-save-method 81 str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs] 82 .endm 83 84 /* 85 * Macro that sets up the callee save frame to conform with 86 * Runtime::CreateCalleeSaveMethod(kRefsOnly). 87 */ 88 .macro SETUP_REF_ONLY_CALLEE_SAVE_FRAME 89 adrp xIP0, :got:_ZN3art7Runtime9instance_E 90 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 91 92 // Our registers aren't intermixed - just spill in order. 93 ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . 94 95 // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . 96 THIS_LOAD_REQUIRES_READ_BARRIER 97 ldr xIP0, [xIP0, RUNTIME_REFS_ONLY_CALLEE_SAVE_FRAME_OFFSET ] 98 99 sub sp, sp, #96 100 .cfi_adjust_cfa_offset 96 101 102 // Ugly compile-time check, but we only have the preprocessor. 103 #if (FRAME_SIZE_REFS_ONLY_CALLEE_SAVE != 96) 104 #error "REFS_ONLY_CALLEE_SAVE_FRAME(ARM64) size not as expected." 105 #endif 106 107 // Callee-saves 108 stp x20, x21, [sp, #8] 109 .cfi_rel_offset x20, 8 110 .cfi_rel_offset x21, 16 111 112 stp x22, x23, [sp, #24] 113 .cfi_rel_offset x22, 24 114 .cfi_rel_offset x23, 32 115 116 stp x24, x25, [sp, #40] 117 .cfi_rel_offset x24, 40 118 .cfi_rel_offset x25, 48 119 120 stp x26, x27, [sp, #56] 121 .cfi_rel_offset x26, 56 122 .cfi_rel_offset x27, 64 123 124 stp x28, x29, [sp, #72] 125 .cfi_rel_offset x28, 72 126 .cfi_rel_offset x29, 80 127 128 // LR 129 str xLR, [sp, #88] 130 .cfi_rel_offset x30, 88 131 132 // Save xSELF to xETR. 133 mov xETR, xSELF 134 135 // Loads appropriate callee-save-method 136 str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs] 137 .endm 138 139 // TODO: Probably no need to restore registers preserved by aapcs64. 140 .macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 141 // Restore xSELF. 142 mov xSELF, xETR 143 144 // Callee-saves 145 ldp x20, x21, [sp, #8] 146 .cfi_restore x20 147 .cfi_restore x21 148 149 ldp x22, x23, [sp, #24] 150 .cfi_restore x22 151 .cfi_restore x23 152 153 ldp x24, x25, [sp, #40] 154 .cfi_restore x24 155 .cfi_restore x25 156 157 ldp x26, x27, [sp, #56] 158 .cfi_restore x26 159 .cfi_restore x27 160 161 ldp x28, x29, [sp, #72] 162 .cfi_restore x28 163 .cfi_restore x29 164 165 // LR 166 ldr xLR, [sp, #88] 167 .cfi_restore x30 168 169 add sp, sp, #96 170 .cfi_adjust_cfa_offset -96 171 .endm 172 173 .macro POP_REF_ONLY_CALLEE_SAVE_FRAME 174 // Restore xSELF as it might be scratched. 175 mov xSELF, xETR 176 // ETR 177 ldr xETR, [sp, #16] 178 .cfi_restore x21 179 180 add sp, sp, #96 181 .cfi_adjust_cfa_offset -96 182 .endm 183 184 .macro RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN 185 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 186 ret 187 .endm 188 189 190 .macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL 191 sub sp, sp, #224 192 .cfi_adjust_cfa_offset 224 193 194 // Ugly compile-time check, but we only have the preprocessor. 195 #if (FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE != 224) 196 #error "REFS_AND_ARGS_CALLEE_SAVE_FRAME(ARM64) size not as expected." 197 #endif 198 199 // FP args 200 stp d0, d1, [sp, #16] 201 stp d2, d3, [sp, #32] 202 stp d4, d5, [sp, #48] 203 stp d6, d7, [sp, #64] 204 205 // args and x20(callee-save) 206 stp x1, x2, [sp, #80] 207 .cfi_rel_offset x1, 80 208 .cfi_rel_offset x2, 88 209 210 stp x3, x4, [sp, #96] 211 .cfi_rel_offset x3, 96 212 .cfi_rel_offset x4, 104 213 214 stp x5, x6, [sp, #112] 215 .cfi_rel_offset x5, 112 216 .cfi_rel_offset x6, 120 217 218 stp x7, x20, [sp, #128] 219 .cfi_rel_offset x7, 128 220 .cfi_rel_offset x20, 136 221 222 // Callee-saves. 223 stp x21, x22, [sp, #144] 224 .cfi_rel_offset x21, 144 225 .cfi_rel_offset x22, 152 226 227 stp x23, x24, [sp, #160] 228 .cfi_rel_offset x23, 160 229 .cfi_rel_offset x24, 168 230 231 stp x25, x26, [sp, #176] 232 .cfi_rel_offset x25, 176 233 .cfi_rel_offset x26, 184 234 235 stp x27, x28, [sp, #192] 236 .cfi_rel_offset x27, 192 237 .cfi_rel_offset x28, 200 238 239 // x29(callee-save) and LR 240 stp x29, xLR, [sp, #208] 241 .cfi_rel_offset x29, 208 242 .cfi_rel_offset x30, 216 243 244 // Save xSELF to xETR. 245 mov xETR, xSELF 246 .endm 247 248 /* 249 * Macro that sets up the callee save frame to conform with 250 * Runtime::CreateCalleeSaveMethod(kRefsAndArgs). 251 * 252 * TODO This is probably too conservative - saving FP & LR. 253 */ 254 .macro SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME 255 adrp xIP0, :got:_ZN3art7Runtime9instance_E 256 ldr xIP0, [xIP0, #:got_lo12:_ZN3art7Runtime9instance_E] 257 258 // Our registers aren't intermixed - just spill in order. 259 ldr xIP0, [xIP0] // xIP0 = & (art::Runtime * art::Runtime.instance_) . 260 261 // xIP0 = (ArtMethod*) Runtime.instance_.callee_save_methods[kRefAndArgs] . 262 THIS_LOAD_REQUIRES_READ_BARRIER 263 ldr xIP0, [xIP0, RUNTIME_REF_AND_ARGS_CALLEE_SAVE_FRAME_OFFSET ] 264 265 SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL 266 267 str xIP0, [sp] // Store ArtMethod* Runtime::callee_save_methods_[kRefsAndArgs] 268 .endm 269 270 // TODO: Probably no need to restore registers preserved by aapcs64. 271 .macro RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME 272 // Restore xSELF. 273 mov xSELF, xETR 274 275 // FP args 276 ldp d0, d1, [sp, #16] 277 ldp d2, d3, [sp, #32] 278 ldp d4, d5, [sp, #48] 279 ldp d6, d7, [sp, #64] 280 281 // args and x20(callee-save) 282 ldp x1, x2, [sp, #80] 283 .cfi_restore x1 284 .cfi_restore x2 285 286 ldp x3, x4, [sp, #96] 287 .cfi_restore x3 288 .cfi_restore x4 289 290 ldp x5, x6, [sp, #112] 291 .cfi_restore x5 292 .cfi_restore x6 293 294 ldp x7, x20, [sp, #128] 295 .cfi_restore x7 296 .cfi_restore x20 297 298 // Callee-saves. 299 ldp x21, x22, [sp, #144] 300 .cfi_restore x21 301 .cfi_restore x22 302 303 ldp x23, x24, [sp, #160] 304 .cfi_restore x23 305 .cfi_restore x24 306 307 ldp x25, x26, [sp, #176] 308 .cfi_restore x25 309 .cfi_restore x26 310 311 ldp x27, x28, [sp, #192] 312 .cfi_restore x27 313 .cfi_restore x28 314 315 // x29(callee-save) and LR 316 ldp x29, xLR, [sp, #208] 317 .cfi_restore x29 318 .cfi_restore x30 319 320 add sp, sp, #224 321 .cfi_adjust_cfa_offset -224 322 .endm 323 324 .macro RETURN_IF_RESULT_IS_ZERO 325 cbnz x0, 1f // result non-zero branch over 326 ret // return 327 1: 328 .endm 329 330 .macro RETURN_IF_RESULT_IS_NON_ZERO 331 cbz x0, 1f // result zero branch over 332 ret // return 333 1: 334 .endm 335 336 /* 337 * Macro that set calls through to artDeliverPendingExceptionFromCode, where the pending 338 * exception is Thread::Current()->exception_ 339 */ 340 .macro DELIVER_PENDING_EXCEPTION 341 SETUP_SAVE_ALL_CALLEE_SAVE_FRAME 342 mov x0, xSELF 343 mov x1, sp 344 345 // Point of no return. 346 b artDeliverPendingExceptionFromCode // artDeliverPendingExceptionFromCode(Thread*, SP) 347 brk 0 // Unreached 348 .endm 349 350 .macro RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg 351 ldr \reg, [xSELF, # THREAD_EXCEPTION_OFFSET] // Get exception field. 352 cbnz \reg, 1f 353 ret 354 1: 355 DELIVER_PENDING_EXCEPTION 356 .endm 357 358 .macro RETURN_OR_DELIVER_PENDING_EXCEPTION 359 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG xIP0 360 .endm 361 362 // Same as above with x1. This is helpful in stubs that want to avoid clobbering another register. 363 .macro RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 364 RETURN_OR_DELIVER_PENDING_EXCEPTION_REG x1 365 .endm 366 367 .macro RETURN_IF_W0_IS_ZERO_OR_DELIVER 368 cbnz w0, 1f // result non-zero branch over 369 ret // return 370 1: 371 DELIVER_PENDING_EXCEPTION 372 .endm 373 374 .macro NO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 375 .extern \cxx_name 376 ENTRY \c_name 377 SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context 378 mov x0, xSELF // pass Thread::Current 379 mov x1, sp // pass SP 380 b \cxx_name // \cxx_name(Thread*, SP) 381 END \c_name 382 .endm 383 384 .macro ONE_ARG_RUNTIME_EXCEPTION c_name, cxx_name 385 .extern \cxx_name 386 ENTRY \c_name 387 SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context. 388 mov x1, xSELF // pass Thread::Current. 389 mov x2, sp // pass SP. 390 b \cxx_name // \cxx_name(arg, Thread*, SP). 391 brk 0 392 END \c_name 393 .endm 394 395 .macro TWO_ARG_RUNTIME_EXCEPTION c_name, cxx_name 396 .extern \cxx_name 397 ENTRY \c_name 398 SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context 399 mov x2, xSELF // pass Thread::Current 400 mov x3, sp // pass SP 401 b \cxx_name // \cxx_name(arg1, arg2, Thread*, SP) 402 brk 0 403 END \c_name 404 .endm 405 406 /* 407 * Called by managed code, saves callee saves and then calls artThrowException 408 * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception. 409 */ 410 ONE_ARG_RUNTIME_EXCEPTION art_quick_deliver_exception, artDeliverExceptionFromCode 411 412 /* 413 * Called by managed code to create and deliver a NullPointerException. 414 */ 415 NO_ARG_RUNTIME_EXCEPTION art_quick_throw_null_pointer_exception, artThrowNullPointerExceptionFromCode 416 417 /* 418 * Called by managed code to create and deliver an ArithmeticException. 419 */ 420 NO_ARG_RUNTIME_EXCEPTION art_quick_throw_div_zero, artThrowDivZeroFromCode 421 422 /* 423 * Called by managed code to create and deliver an ArrayIndexOutOfBoundsException. Arg1 holds 424 * index, arg2 holds limit. 425 */ 426 TWO_ARG_RUNTIME_EXCEPTION art_quick_throw_array_bounds, artThrowArrayBoundsFromCode 427 428 /* 429 * Called by managed code to create and deliver a StackOverflowError. 430 */ 431 NO_ARG_RUNTIME_EXCEPTION art_quick_throw_stack_overflow, artThrowStackOverflowFromCode 432 433 /* 434 * Called by managed code to create and deliver a NoSuchMethodError. 435 */ 436 ONE_ARG_RUNTIME_EXCEPTION art_quick_throw_no_such_method, artThrowNoSuchMethodFromCode 437 438 /* 439 * All generated callsites for interface invokes and invocation slow paths will load arguments 440 * as usual - except instead of loading arg0/x0 with the target Method*, arg0/x0 will contain 441 * the method_idx. This wrapper will save arg1-arg3, load the caller's Method*, align the 442 * stack and call the appropriate C helper. 443 * NOTE: "this" is first visible argument of the target, and so can be found in arg1/x1. 444 * 445 * The helper will attempt to locate the target and return a 128-bit result in x0/x1 consisting 446 * of the target Method* in x0 and method->code_ in x1. 447 * 448 * If unsuccessful, the helper will return NULL/????. There will be a pending exception in the 449 * thread and we branch to another stub to deliver it. 450 * 451 * On success this wrapper will restore arguments and *jump* to the target, leaving the lr 452 * pointing back to the original caller. 453 * 454 * Adapted from ARM32 code. 455 * 456 * Clobbers xIP0. 457 */ 458 .macro INVOKE_TRAMPOLINE c_name, cxx_name 459 .extern \cxx_name 460 ENTRY \c_name 461 SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // save callee saves in case allocation triggers GC 462 // Helper signature is always 463 // (method_idx, *this_object, *caller_method, *self, sp) 464 465 ldr w2, [sp, #FRAME_SIZE_REFS_AND_ARGS_CALLEE_SAVE] // pass caller Method* 466 mov x3, xSELF // pass Thread::Current 467 mov x4, sp 468 bl \cxx_name // (method_idx, this, caller, Thread*, SP) 469 mov xIP0, x1 // save Method*->code_ 470 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME 471 cbz x0, 1f // did we find the target? if not go to exception delivery 472 br xIP0 // tail call to target 473 1: 474 DELIVER_PENDING_EXCEPTION 475 END \c_name 476 .endm 477 478 INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline, artInvokeInterfaceTrampoline 479 INVOKE_TRAMPOLINE art_quick_invoke_interface_trampoline_with_access_check, artInvokeInterfaceTrampolineWithAccessCheck 480 481 INVOKE_TRAMPOLINE art_quick_invoke_static_trampoline_with_access_check, artInvokeStaticTrampolineWithAccessCheck 482 INVOKE_TRAMPOLINE art_quick_invoke_direct_trampoline_with_access_check, artInvokeDirectTrampolineWithAccessCheck 483 INVOKE_TRAMPOLINE art_quick_invoke_super_trampoline_with_access_check, artInvokeSuperTrampolineWithAccessCheck 484 INVOKE_TRAMPOLINE art_quick_invoke_virtual_trampoline_with_access_check, artInvokeVirtualTrampolineWithAccessCheck 485 486 487 .macro INVOKE_STUB_CREATE_FRAME 488 489 SAVE_SIZE=6*8 // x4, x5, xSUSPEND, SP, LR & FP saved. 490 SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE 491 492 493 mov x9, sp // Save stack pointer. 494 .cfi_register sp,x9 495 496 add x10, x2, # SAVE_SIZE_AND_METHOD // calculate size of frame. 497 sub x10, sp, x10 // Calculate SP position - saves + ArtMethod* + args 498 and x10, x10, # ~0xf // Enforce 16 byte stack alignment. 499 mov sp, x10 // Set new SP. 500 501 sub x10, x9, #SAVE_SIZE // Calculate new FP (later). Done here as we must move SP 502 .cfi_def_cfa_register x10 // before this. 503 .cfi_adjust_cfa_offset SAVE_SIZE 504 505 stp x9, xSUSPEND, [x10, #32] // Save old stack pointer and xSUSPEND 506 .cfi_rel_offset sp, 32 507 .cfi_rel_offset x19, 40 508 509 stp x4, x5, [x10, #16] // Save result and shorty addresses. 510 .cfi_rel_offset x4, 16 511 .cfi_rel_offset x5, 24 512 513 stp xFP, xLR, [x10] // Store LR & FP. 514 .cfi_rel_offset x29, 0 515 .cfi_rel_offset x30, 8 516 517 mov xFP, x10 // Use xFP now, as it's callee-saved. 518 .cfi_def_cfa_register x29 519 mov xSELF, x3 // Move thread pointer into SELF register. 520 mov wSUSPEND, #SUSPEND_CHECK_INTERVAL // reset wSUSPEND to suspend check interval 521 522 // Copy arguments into stack frame. 523 // Use simple copy routine for now. 524 // 4 bytes per slot. 525 // X1 - source address 526 // W2 - args length 527 // X9 - destination address. 528 // W10 - temporary 529 add x9, sp, #4 // Destination address is bottom of stack + NULL. 530 531 // Use \@ to differentiate between macro invocations. 532 .LcopyParams\@: 533 cmp w2, #0 534 beq .LendCopyParams\@ 535 sub w2, w2, #4 // Need 65536 bytes of range. 536 ldr w10, [x1, x2] 537 str w10, [x9, x2] 538 539 b .LcopyParams\@ 540 541 .LendCopyParams\@: 542 543 // Store NULL into StackReference<Method>* at bottom of frame. 544 str wzr, [sp] 545 546 #if (STACK_REFERENCE_SIZE != 4) 547 #error "STACK_REFERENCE_SIZE(ARM64) size not as expected." 548 #endif 549 .endm 550 551 .macro INVOKE_STUB_CALL_AND_RETURN 552 553 // load method-> METHOD_QUICK_CODE_OFFSET 554 ldr x9, [x0 , #METHOD_QUICK_CODE_OFFSET] 555 // Branch to method. 556 blr x9 557 558 // Restore return value address and shorty address. 559 ldp x4,x5, [xFP, #16] 560 .cfi_restore x4 561 .cfi_restore x5 562 563 // Store result (w0/x0/s0/d0) appropriately, depending on resultType. 564 ldrb w10, [x5] 565 566 // Don't set anything for a void type. 567 cmp w10, #'V' 568 beq .Lexit_art_quick_invoke_stub\@ 569 570 cmp w10, #'D' 571 bne .Lreturn_is_float\@ 572 str d0, [x4] 573 b .Lexit_art_quick_invoke_stub\@ 574 575 .Lreturn_is_float\@: 576 cmp w10, #'F' 577 bne .Lreturn_is_int\@ 578 str s0, [x4] 579 b .Lexit_art_quick_invoke_stub\@ 580 581 // Just store x0. Doesn't matter if it is 64 or 32 bits. 582 .Lreturn_is_int\@: 583 str x0, [x4] 584 585 .Lexit_art_quick_invoke_stub\@: 586 ldp x2, xSUSPEND, [xFP, #32] // Restore stack pointer and xSUSPEND. 587 .cfi_restore x19 588 mov sp, x2 589 .cfi_restore sp 590 591 ldp xFP, xLR, [xFP] // Restore old frame pointer and link register. 592 .cfi_restore x29 593 .cfi_restore x30 594 595 ret 596 597 .endm 598 599 600 /* 601 * extern"C" void art_quick_invoke_stub(ArtMethod *method, x0 602 * uint32_t *args, x1 603 * uint32_t argsize, w2 604 * Thread *self, x3 605 * JValue *result, x4 606 * char *shorty); x5 607 * +----------------------+ 608 * | | 609 * | C/C++ frame | 610 * | LR'' | 611 * | FP'' | <- SP' 612 * +----------------------+ 613 * +----------------------+ 614 * | x19 | <- Used as wSUSPEND, won't be restored by managed code. 615 * | SP' | 616 * | X5 | 617 * | X4 | Saved registers 618 * | LR' | 619 * | FP' | <- FP 620 * +----------------------+ 621 * | uint32_t out[n-1] | 622 * | : : | Outs 623 * | uint32_t out[0] | 624 * | StackRef<ArtMethod> | <- SP value=null 625 * +----------------------+ 626 * 627 * Outgoing registers: 628 * x0 - Method* 629 * x1-x7 - integer parameters. 630 * d0-d7 - Floating point parameters. 631 * xSELF = self 632 * wSUSPEND = suspend count 633 * SP = & of ArtMethod* 634 * x1 = "this" pointer. 635 * 636 */ 637 ENTRY art_quick_invoke_stub 638 // Spill registers as per AACPS64 calling convention. 639 INVOKE_STUB_CREATE_FRAME 640 641 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 642 // Parse the passed shorty to determine which register to load. 643 // Load addresses for routines that load WXSD registers. 644 adr x11, .LstoreW2 645 adr x12, .LstoreX2 646 adr x13, .LstoreS0 647 adr x14, .LstoreD0 648 649 // Initialize routine offsets to 0 for integers and floats. 650 // x8 for integers, x15 for floating point. 651 mov x8, #0 652 mov x15, #0 653 654 add x10, x5, #1 // Load shorty address, plus one to skip return value. 655 ldr w1, [x9],#4 // Load "this" parameter, and increment arg pointer. 656 657 // Loop to fill registers. 658 .LfillRegisters: 659 ldrb w17, [x10], #1 // Load next character in signature, and increment. 660 cbz w17, .LcallFunction // Exit at end of signature. Shorty 0 terminated. 661 662 cmp w17, #'F' // is this a float? 663 bne .LisDouble 664 665 cmp x15, # 8*12 // Skip this load if all registers full. 666 beq .Ladvance4 667 668 add x17, x13, x15 // Calculate subroutine to jump to. 669 br x17 670 671 .LisDouble: 672 cmp w17, #'D' // is this a double? 673 bne .LisLong 674 675 cmp x15, # 8*12 // Skip this load if all registers full. 676 beq .Ladvance8 677 678 add x17, x14, x15 // Calculate subroutine to jump to. 679 br x17 680 681 .LisLong: 682 cmp w17, #'J' // is this a long? 683 bne .LisOther 684 685 cmp x8, # 6*12 // Skip this load if all registers full. 686 beq .Ladvance8 687 688 add x17, x12, x8 // Calculate subroutine to jump to. 689 br x17 690 691 .LisOther: // Everything else takes one vReg. 692 cmp x8, # 6*12 // Skip this load if all registers full. 693 beq .Ladvance4 694 695 add x17, x11, x8 // Calculate subroutine to jump to. 696 br x17 697 698 .Ladvance4: 699 add x9, x9, #4 700 b .LfillRegisters 701 702 .Ladvance8: 703 add x9, x9, #8 704 b .LfillRegisters 705 706 // Macro for loading a parameter into a register. 707 // counter - the register with offset into these tables 708 // size - the size of the register - 4 or 8 bytes. 709 // register - the name of the register to be loaded. 710 .macro LOADREG counter size register return 711 ldr \register , [x9], #\size 712 add \counter, \counter, 12 713 b \return 714 .endm 715 716 // Store ints. 717 .LstoreW2: 718 LOADREG x8 4 w2 .LfillRegisters 719 LOADREG x8 4 w3 .LfillRegisters 720 LOADREG x8 4 w4 .LfillRegisters 721 LOADREG x8 4 w5 .LfillRegisters 722 LOADREG x8 4 w6 .LfillRegisters 723 LOADREG x8 4 w7 .LfillRegisters 724 725 // Store longs. 726 .LstoreX2: 727 LOADREG x8 8 x2 .LfillRegisters 728 LOADREG x8 8 x3 .LfillRegisters 729 LOADREG x8 8 x4 .LfillRegisters 730 LOADREG x8 8 x5 .LfillRegisters 731 LOADREG x8 8 x6 .LfillRegisters 732 LOADREG x8 8 x7 .LfillRegisters 733 734 // Store singles. 735 .LstoreS0: 736 LOADREG x15 4 s0 .LfillRegisters 737 LOADREG x15 4 s1 .LfillRegisters 738 LOADREG x15 4 s2 .LfillRegisters 739 LOADREG x15 4 s3 .LfillRegisters 740 LOADREG x15 4 s4 .LfillRegisters 741 LOADREG x15 4 s5 .LfillRegisters 742 LOADREG x15 4 s6 .LfillRegisters 743 LOADREG x15 4 s7 .LfillRegisters 744 745 // Store doubles. 746 .LstoreD0: 747 LOADREG x15 8 d0 .LfillRegisters 748 LOADREG x15 8 d1 .LfillRegisters 749 LOADREG x15 8 d2 .LfillRegisters 750 LOADREG x15 8 d3 .LfillRegisters 751 LOADREG x15 8 d4 .LfillRegisters 752 LOADREG x15 8 d5 .LfillRegisters 753 LOADREG x15 8 d6 .LfillRegisters 754 LOADREG x15 8 d7 .LfillRegisters 755 756 757 .LcallFunction: 758 759 INVOKE_STUB_CALL_AND_RETURN 760 761 END art_quick_invoke_stub 762 763 /* extern"C" 764 * void art_quick_invoke_static_stub(ArtMethod *method, x0 765 * uint32_t *args, x1 766 * uint32_t argsize, w2 767 * Thread *self, x3 768 * JValue *result, x4 769 * char *shorty); x5 770 */ 771 ENTRY art_quick_invoke_static_stub 772 // Spill registers as per AACPS64 calling convention. 773 INVOKE_STUB_CREATE_FRAME 774 775 // Fill registers x/w1 to x/w7 and s/d0 to s/d7 with parameters. 776 // Parse the passed shorty to determine which register to load. 777 // Load addresses for routines that load WXSD registers. 778 adr x11, .LstoreW1_2 779 adr x12, .LstoreX1_2 780 adr x13, .LstoreS0_2 781 adr x14, .LstoreD0_2 782 783 // Initialize routine offsets to 0 for integers and floats. 784 // x8 for integers, x15 for floating point. 785 mov x8, #0 786 mov x15, #0 787 788 add x10, x5, #1 // Load shorty address, plus one to skip return value. 789 790 // Loop to fill registers. 791 .LfillRegisters2: 792 ldrb w17, [x10], #1 // Load next character in signature, and increment. 793 cbz w17, .LcallFunction2 // Exit at end of signature. Shorty 0 terminated. 794 795 cmp w17, #'F' // is this a float? 796 bne .LisDouble2 797 798 cmp x15, # 8*12 // Skip this load if all registers full. 799 beq .Ladvance4_2 800 801 add x17, x13, x15 // Calculate subroutine to jump to. 802 br x17 803 804 .LisDouble2: 805 cmp w17, #'D' // is this a double? 806 bne .LisLong2 807 808 cmp x15, # 8*12 // Skip this load if all registers full. 809 beq .Ladvance8_2 810 811 add x17, x14, x15 // Calculate subroutine to jump to. 812 br x17 813 814 .LisLong2: 815 cmp w17, #'J' // is this a long? 816 bne .LisOther2 817 818 cmp x8, # 7*12 // Skip this load if all registers full. 819 beq .Ladvance8_2 820 821 add x17, x12, x8 // Calculate subroutine to jump to. 822 br x17 823 824 .LisOther2: // Everything else takes one vReg. 825 cmp x8, # 7*12 // Skip this load if all registers full. 826 beq .Ladvance4_2 827 828 add x17, x11, x8 // Calculate subroutine to jump to. 829 br x17 830 831 .Ladvance4_2: 832 add x9, x9, #4 833 b .LfillRegisters2 834 835 .Ladvance8_2: 836 add x9, x9, #8 837 b .LfillRegisters2 838 839 // Store ints. 840 .LstoreW1_2: 841 LOADREG x8 4 w1 .LfillRegisters2 842 LOADREG x8 4 w2 .LfillRegisters2 843 LOADREG x8 4 w3 .LfillRegisters2 844 LOADREG x8 4 w4 .LfillRegisters2 845 LOADREG x8 4 w5 .LfillRegisters2 846 LOADREG x8 4 w6 .LfillRegisters2 847 LOADREG x8 4 w7 .LfillRegisters2 848 849 // Store longs. 850 .LstoreX1_2: 851 LOADREG x8 8 x1 .LfillRegisters2 852 LOADREG x8 8 x2 .LfillRegisters2 853 LOADREG x8 8 x3 .LfillRegisters2 854 LOADREG x8 8 x4 .LfillRegisters2 855 LOADREG x8 8 x5 .LfillRegisters2 856 LOADREG x8 8 x6 .LfillRegisters2 857 LOADREG x8 8 x7 .LfillRegisters2 858 859 // Store singles. 860 .LstoreS0_2: 861 LOADREG x15 4 s0 .LfillRegisters2 862 LOADREG x15 4 s1 .LfillRegisters2 863 LOADREG x15 4 s2 .LfillRegisters2 864 LOADREG x15 4 s3 .LfillRegisters2 865 LOADREG x15 4 s4 .LfillRegisters2 866 LOADREG x15 4 s5 .LfillRegisters2 867 LOADREG x15 4 s6 .LfillRegisters2 868 LOADREG x15 4 s7 .LfillRegisters2 869 870 // Store doubles. 871 .LstoreD0_2: 872 LOADREG x15 8 d0 .LfillRegisters2 873 LOADREG x15 8 d1 .LfillRegisters2 874 LOADREG x15 8 d2 .LfillRegisters2 875 LOADREG x15 8 d3 .LfillRegisters2 876 LOADREG x15 8 d4 .LfillRegisters2 877 LOADREG x15 8 d5 .LfillRegisters2 878 LOADREG x15 8 d6 .LfillRegisters2 879 LOADREG x15 8 d7 .LfillRegisters2 880 881 882 .LcallFunction2: 883 884 INVOKE_STUB_CALL_AND_RETURN 885 886 END art_quick_invoke_static_stub 887 888 889 890 /* 891 * On entry x0 is uintptr_t* gprs_ and x1 is uint64_t* fprs_ 892 */ 893 894 ENTRY art_quick_do_long_jump 895 // Load FPRs 896 ldp d0, d1, [x1], #16 897 ldp d2, d3, [x1], #16 898 ldp d4, d5, [x1], #16 899 ldp d6, d7, [x1], #16 900 ldp d8, d9, [x1], #16 901 ldp d10, d11, [x1], #16 902 ldp d12, d13, [x1], #16 903 ldp d14, d15, [x1], #16 904 ldp d16, d17, [x1], #16 905 ldp d18, d19, [x1], #16 906 ldp d20, d21, [x1], #16 907 ldp d22, d23, [x1], #16 908 ldp d24, d25, [x1], #16 909 ldp d26, d27, [x1], #16 910 ldp d28, d29, [x1], #16 911 ldp d30, d31, [x1] 912 913 // Load GPRs 914 // TODO: lots of those are smashed, could optimize. 915 add x0, x0, #30*8 916 ldp x30, x1, [x0], #-16 917 ldp x28, x29, [x0], #-16 918 ldp x26, x27, [x0], #-16 919 ldp x24, x25, [x0], #-16 920 ldp x22, x23, [x0], #-16 921 ldp x20, x21, [x0], #-16 922 ldp x18, x19, [x0], #-16 923 ldp x16, x17, [x0], #-16 924 ldp x14, x15, [x0], #-16 925 ldp x12, x13, [x0], #-16 926 ldp x10, x11, [x0], #-16 927 ldp x8, x9, [x0], #-16 928 ldp x6, x7, [x0], #-16 929 ldp x4, x5, [x0], #-16 930 ldp x2, x3, [x0], #-16 931 mov sp, x1 932 933 // TODO: Is it really OK to use LR for the target PC? 934 mov x0, #0 935 mov x1, #0 936 br xLR 937 END art_quick_do_long_jump 938 939 /* 940 * Entry from managed code that calls artHandleFillArrayDataFromCode and delivers exception on 941 * failure. 942 */ 943 .extern artHandleFillArrayDataFromCode 944 ENTRY art_quick_handle_fill_data 945 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // Save callee saves in case exception allocation triggers GC. 946 mov x2, xSELF // Pass Thread::Current. 947 mov x3, sp // Pass SP. 948 bl artHandleFillArrayDataFromCode // (Array*, const DexFile::Payload*, Thread*, SP) 949 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 950 RETURN_IF_RESULT_IS_ZERO 951 DELIVER_PENDING_EXCEPTION 952 END art_quick_handle_fill_data 953 954 /* 955 * Entry from managed code that calls artLockObjectFromCode, may block for GC. x0 holds the 956 * possibly null object to lock. 957 * 958 * Derived from arm32 code. 959 */ 960 .extern artLockObjectFromCode 961 ENTRY art_quick_lock_object 962 cbz w0, .Lslow_lock 963 add x4, x0, #LOCK_WORD_OFFSET // exclusive load/store had no immediate anymore 964 .Lretry_lock: 965 ldr w2, [xSELF, #THREAD_ID_OFFSET] // TODO: Can the thread ID really change during the loop? 966 ldxr w1, [x4] 967 cbnz w1, .Lnot_unlocked // already thin locked 968 stxr w3, w2, [x4] 969 cbnz w3, .Lstrex_fail // store failed, retry 970 dmb ishld // full (LoadLoad|LoadStore) memory barrier 971 ret 972 .Lstrex_fail: 973 b .Lretry_lock // unlikely forward branch, need to reload and recheck r1/r2 974 .Lnot_unlocked: 975 lsr w3, w1, 30 976 cbnz w3, .Lslow_lock // if either of the top two bits are set, go slow path 977 eor w2, w1, w2 // lock_word.ThreadId() ^ self->ThreadId() 978 uxth w2, w2 // zero top 16 bits 979 cbnz w2, .Lslow_lock // lock word and self thread id's match -> recursive lock 980 // else contention, go to slow path 981 add w2, w1, #65536 // increment count in lock word placing in w2 for storing 982 lsr w1, w2, 30 // if either of the top two bits are set, we overflowed. 983 cbnz w1, .Lslow_lock // if we overflow the count go slow path 984 str w2, [x0, #LOCK_WORD_OFFSET]// no need for stxr as we hold the lock 985 ret 986 .Lslow_lock: 987 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block 988 mov x1, xSELF // pass Thread::Current 989 mov x2, sp // pass SP 990 bl artLockObjectFromCode // (Object* obj, Thread*, SP) 991 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 992 RETURN_IF_W0_IS_ZERO_OR_DELIVER 993 END art_quick_lock_object 994 995 /* 996 * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. 997 * x0 holds the possibly null object to lock. 998 * 999 * Derived from arm32 code. 1000 */ 1001 .extern artUnlockObjectFromCode 1002 ENTRY art_quick_unlock_object 1003 cbz x0, .Lslow_unlock 1004 ldr w1, [x0, #LOCK_WORD_OFFSET] 1005 lsr w2, w1, 30 1006 cbnz w2, .Lslow_unlock // if either of the top two bits are set, go slow path 1007 ldr w2, [xSELF, #THREAD_ID_OFFSET] 1008 eor w3, w1, w2 // lock_word.ThreadId() ^ self->ThreadId() 1009 uxth w3, w3 // zero top 16 bits 1010 cbnz w3, .Lslow_unlock // do lock word and self thread id's match? 1011 cmp w1, #65536 1012 bpl .Lrecursive_thin_unlock 1013 // transition to unlocked, w3 holds 0 1014 dmb ish // full (LoadStore|StoreStore) memory barrier 1015 str w3, [x0, #LOCK_WORD_OFFSET] 1016 ret 1017 .Lrecursive_thin_unlock: 1018 sub w1, w1, #65536 1019 str w1, [x0, #LOCK_WORD_OFFSET] 1020 ret 1021 .Lslow_unlock: 1022 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC 1023 mov x1, xSELF // pass Thread::Current 1024 mov x2, sp // pass SP 1025 bl artUnlockObjectFromCode // (Object* obj, Thread*, SP) 1026 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 1027 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1028 END art_quick_unlock_object 1029 1030 /* 1031 * Entry from managed code that calls artIsAssignableFromCode and on failure calls 1032 * artThrowClassCastException. 1033 */ 1034 .extern artThrowClassCastException 1035 ENTRY art_quick_check_cast 1036 // Store arguments and link register 1037 sub sp, sp, #32 // Stack needs to be 16b aligned on calls 1038 .cfi_adjust_cfa_offset 32 1039 stp x0, x1, [sp] 1040 .cfi_rel_offset x0, 0 1041 .cfi_rel_offset x1, 8 1042 stp xSELF, xLR, [sp, #16] 1043 .cfi_rel_offset x18, 16 1044 .cfi_rel_offset x30, 24 1045 1046 // Call runtime code 1047 bl artIsAssignableFromCode 1048 1049 // Check for exception 1050 cbz x0, .Lthrow_class_cast_exception 1051 1052 // Restore and return 1053 ldp x0, x1, [sp] 1054 .cfi_restore x0 1055 .cfi_restore x1 1056 ldp xSELF, xLR, [sp, #16] 1057 .cfi_restore x18 1058 .cfi_restore x30 1059 add sp, sp, #32 1060 .cfi_adjust_cfa_offset -32 1061 ret 1062 1063 .Lthrow_class_cast_exception: 1064 // Restore 1065 ldp x0, x1, [sp] 1066 .cfi_restore x0 1067 .cfi_restore x1 1068 ldp xSELF, xLR, [sp, #16] 1069 .cfi_restore x18 1070 .cfi_restore x30 1071 add sp, sp, #32 1072 .cfi_adjust_cfa_offset -32 1073 1074 SETUP_SAVE_ALL_CALLEE_SAVE_FRAME // save all registers as basis for long jump context 1075 mov x2, xSELF // pass Thread::Current 1076 mov x3, sp // pass SP 1077 b artThrowClassCastException // (Class*, Class*, Thread*, SP) 1078 brk 0 // We should not return here... 1079 END art_quick_check_cast 1080 1081 /* 1082 * Entry from managed code for array put operations of objects where the value being stored 1083 * needs to be checked for compatibility. 1084 * x0 = array, x1 = index, x2 = value 1085 * 1086 * Currently all values should fit into w0/w1/w2, and w1 always will as indices are 32b. We 1087 * assume, though, that the upper 32b are zeroed out. At least for x1/w1 we can do better by 1088 * using index-zero-extension in load/stores. 1089 * 1090 * Temporaries: x3, x4 1091 * TODO: x4 OK? ip seems wrong here. 1092 */ 1093 ENTRY art_quick_aput_obj_with_null_and_bound_check 1094 tst x0, x0 1095 bne art_quick_aput_obj_with_bound_check 1096 b art_quick_throw_null_pointer_exception 1097 END art_quick_aput_obj_with_null_and_bound_check 1098 1099 ENTRY art_quick_aput_obj_with_bound_check 1100 ldr w3, [x0, #ARRAY_LENGTH_OFFSET] 1101 cmp w3, w1 1102 bhi art_quick_aput_obj 1103 mov x0, x1 1104 mov x1, x3 1105 b art_quick_throw_array_bounds 1106 END art_quick_aput_obj_with_bound_check 1107 1108 ENTRY art_quick_aput_obj 1109 cbz x2, .Ldo_aput_null 1110 ldr w3, [x0, #CLASS_OFFSET] // Heap reference = 32b 1111 // This also zero-extends to x3 1112 ldr w4, [x2, #CLASS_OFFSET] // Heap reference = 32b 1113 // This also zero-extends to x4 1114 ldr w3, [x3, #CLASS_COMPONENT_TYPE_OFFSET] // Heap reference = 32b 1115 // This also zero-extends to x3 1116 cmp w3, w4 // value's type == array's component type - trivial assignability 1117 bne .Lcheck_assignability 1118 .Ldo_aput: 1119 add x3, x0, #OBJECT_ARRAY_DATA_OFFSET 1120 // "Compress" = do nothing 1121 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1122 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1123 lsr x0, x0, #7 1124 strb w3, [x3, x0] 1125 ret 1126 .Ldo_aput_null: 1127 add x3, x0, #OBJECT_ARRAY_DATA_OFFSET 1128 // "Compress" = do nothing 1129 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1130 ret 1131 .Lcheck_assignability: 1132 // Store arguments and link register 1133 sub sp, sp, #48 // Stack needs to be 16b aligned on calls 1134 .cfi_adjust_cfa_offset 48 1135 stp x0, x1, [sp] 1136 .cfi_rel_offset x0, 0 1137 .cfi_rel_offset x1, 8 1138 stp x2, xSELF, [sp, #16] 1139 .cfi_rel_offset x2, 16 1140 .cfi_rel_offset x18, 24 1141 str xLR, [sp, #32] 1142 .cfi_rel_offset x30, 32 1143 1144 // Call runtime code 1145 mov x0, x3 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1146 mov x1, x4 // Heap reference, 32b, "uncompress" = do nothing, already zero-extended 1147 bl artIsAssignableFromCode 1148 1149 // Check for exception 1150 cbz x0, .Lthrow_array_store_exception 1151 1152 // Restore 1153 ldp x0, x1, [sp] 1154 .cfi_restore x0 1155 .cfi_restore x1 1156 ldp x2, xSELF, [sp, #16] 1157 .cfi_restore x2 1158 .cfi_restore x18 1159 ldr xLR, [sp, #32] 1160 .cfi_restore x30 1161 add sp, sp, #48 1162 .cfi_adjust_cfa_offset -48 1163 1164 add x3, x0, #OBJECT_ARRAY_DATA_OFFSET 1165 // "Compress" = do nothing 1166 str w2, [x3, x1, lsl #2] // Heap reference = 32b 1167 ldr x3, [xSELF, #THREAD_CARD_TABLE_OFFSET] 1168 lsr x0, x0, #7 1169 strb w3, [x3, x0] 1170 ret 1171 .Lthrow_array_store_exception: 1172 ldp x0, x1, [sp] 1173 .cfi_restore x0 1174 .cfi_restore x1 1175 ldp x2, xSELF, [sp, #16] 1176 .cfi_restore x2 1177 .cfi_restore x18 1178 ldr xLR, [sp, #32] 1179 .cfi_restore x30 1180 add sp, sp, #48 1181 .cfi_adjust_cfa_offset -48 1182 1183 SETUP_SAVE_ALL_CALLEE_SAVE_FRAME 1184 mov x1, x2 // Pass value. 1185 mov x2, xSELF // Pass Thread::Current. 1186 mov x3, sp // Pass SP. 1187 b artThrowArrayStoreException // (Object*, Object*, Thread*, SP). 1188 brk 0 // Unreached. 1189 END art_quick_aput_obj 1190 1191 // Macro to facilitate adding new allocation entrypoints. 1192 .macro TWO_ARG_DOWNCALL name, entrypoint, return 1193 .extern \entrypoint 1194 ENTRY \name 1195 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC 1196 mov x2, xSELF // pass Thread::Current 1197 mov x3, sp // pass SP 1198 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP) 1199 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 1200 \return 1201 DELIVER_PENDING_EXCEPTION 1202 END \name 1203 .endm 1204 1205 // Macro to facilitate adding new array allocation entrypoints. 1206 .macro THREE_ARG_DOWNCALL name, entrypoint, return 1207 .extern \entrypoint 1208 ENTRY \name 1209 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC 1210 mov x3, xSELF // pass Thread::Current 1211 mov x4, sp // pass SP 1212 bl \entrypoint 1213 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 1214 \return 1215 DELIVER_PENDING_EXCEPTION 1216 END \name 1217 .endm 1218 1219 // Macros taking opportunity of code similarities for downcalls with referrer. 1220 .macro ONE_ARG_REF_DOWNCALL name, entrypoint, return 1221 .extern \entrypoint 1222 ENTRY \name 1223 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC 1224 ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer 1225 mov x2, xSELF // pass Thread::Current 1226 mov x3, sp // pass SP 1227 bl \entrypoint // (uint32_t type_idx, Method* method, Thread*, SP) 1228 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 1229 \return 1230 END \name 1231 .endm 1232 1233 .macro TWO_ARG_REF_DOWNCALL name, entrypoint, return 1234 .extern \entrypoint 1235 ENTRY \name 1236 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC 1237 ldr w2, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer 1238 mov x3, xSELF // pass Thread::Current 1239 mov x4, sp // pass SP 1240 bl \entrypoint 1241 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 1242 \return 1243 END \name 1244 .endm 1245 1246 .macro THREE_ARG_REF_DOWNCALL name, entrypoint, return 1247 .extern \entrypoint 1248 ENTRY \name 1249 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC 1250 ldr w3, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer 1251 mov x4, xSELF // pass Thread::Current 1252 mov x5, sp // pass SP 1253 bl \entrypoint 1254 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 1255 \return 1256 END \name 1257 .endm 1258 1259 /* 1260 * Entry from managed code when uninitialized static storage, this stub will run the class 1261 * initializer and deliver the exception on error. On success the static storage base is 1262 * returned. 1263 */ 1264 TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorageFromCode, RETURN_IF_RESULT_IS_NON_ZERO 1265 1266 TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO 1267 TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO 1268 1269 ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1270 ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1271 ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1272 1273 TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1274 TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1275 TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1 1276 1277 TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1278 TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1279 1280 THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1281 THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1282 THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER 1283 1284 // This is separated out as the argument order is different. 1285 .extern artSet64StaticFromCode 1286 ENTRY art_quick_set64_static 1287 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves in case of GC 1288 mov x3, x1 // Store value 1289 ldr w1, [sp, #FRAME_SIZE_REFS_ONLY_CALLEE_SAVE] // Load referrer 1290 mov x2, x3 // Put value param 1291 mov x3, xSELF // pass Thread::Current 1292 mov x4, sp // pass SP 1293 bl artSet64StaticFromCode 1294 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME 1295 RETURN_IF_W0_IS_ZERO_OR_DELIVER 1296 END art_quick_set64_static 1297 1298 /* 1299 * Entry from managed code to resolve a string, this stub will allocate a String and deliver an 1300 * exception on error. On success the String is returned. x0 holds the referring method, 1301 * w1 holds the string index. The fast path check for hit in strings cache has already been 1302 * performed. 1303 */ 1304 TWO_ARG_DOWNCALL art_quick_resolve_string, artResolveStringFromCode, RETURN_IF_RESULT_IS_NON_ZERO 1305 1306 // Generate the allocation entrypoints for each allocator. 1307 GENERATE_ALL_ALLOC_ENTRYPOINTS 1308 1309 /* 1310 * Called by managed code when the value in wSUSPEND has been decremented to 0. 1311 */ 1312 .extern artTestSuspendFromCode 1313 ENTRY art_quick_test_suspend 1314 ldrh w0, [xSELF, #THREAD_FLAGS_OFFSET] // get xSELF->state_and_flags.as_struct.flags 1315 mov wSUSPEND, #SUSPEND_CHECK_INTERVAL // reset wSUSPEND to SUSPEND_CHECK_INTERVAL 1316 cbnz w0, .Lneed_suspend // check flags == 0 1317 ret // return if flags == 0 1318 .Lneed_suspend: 1319 mov x0, xSELF 1320 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl 1321 mov x1, sp 1322 bl artTestSuspendFromCode // (Thread*, SP) 1323 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN 1324 END art_quick_test_suspend 1325 1326 ENTRY art_quick_implicit_suspend 1327 mov x0, xSELF 1328 SETUP_REF_ONLY_CALLEE_SAVE_FRAME // save callee saves for stack crawl 1329 mov x1, sp 1330 bl artTestSuspendFromCode // (Thread*, SP) 1331 RESTORE_REF_ONLY_CALLEE_SAVE_FRAME_AND_RETURN 1332 END art_quick_implicit_suspend 1333 1334 /* 1335 * Called by managed code that is attempting to call a method on a proxy class. On entry 1336 * x0 holds the proxy method and x1 holds the receiver; The frame size of the invoked proxy 1337 * method agrees with a ref and args callee save frame. 1338 */ 1339 .extern artQuickProxyInvokeHandler 1340 ENTRY art_quick_proxy_invoke_handler 1341 SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME 1342 str x0, [sp, #0] // place proxy method at bottom of frame 1343 mov x2, xSELF // pass Thread::Current 1344 mov x3, sp // pass SP 1345 bl artQuickProxyInvokeHandler // (Method* proxy method, receiver, Thread*, SP) 1346 // Use xETR as xSELF might be scratched by native function above. 1347 ldr x2, [xETR, THREAD_EXCEPTION_OFFSET] 1348 cbnz x2, .Lexception_in_proxy // success if no exception is pending 1349 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Restore frame 1350 fmov d0, x0 // Store result in d0 in case it was float or double 1351 ret // return on success 1352 .Lexception_in_proxy: 1353 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME 1354 DELIVER_PENDING_EXCEPTION 1355 END art_quick_proxy_invoke_handler 1356 1357 /* 1358 * Called to resolve an imt conflict. xIP1 is a hidden argument that holds the target method's 1359 * dex method index. 1360 */ 1361 ENTRY art_quick_imt_conflict_trampoline 1362 ldr w0, [sp, #0] // load caller Method* 1363 ldr w0, [x0, #METHOD_DEX_CACHE_METHODS_OFFSET] // load dex_cache_resolved_methods 1364 add x0, x0, #OBJECT_ARRAY_DATA_OFFSET // get starting address of data 1365 ldr w0, [x0, xIP1, lsl 2] // load the target method 1366 b art_quick_invoke_interface_trampoline 1367 END art_quick_imt_conflict_trampoline 1368 1369 ENTRY art_quick_resolution_trampoline 1370 SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME 1371 mov x2, xSELF 1372 mov x3, sp 1373 bl artQuickResolutionTrampoline // (called, receiver, Thread*, SP) 1374 cbz x0, 1f 1375 mov xIP0, x0 // Remember returned code pointer in xIP0. 1376 ldr w0, [sp, #0] // artQuickResolutionTrampoline puts called method in *SP. 1377 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME 1378 br xIP0 1379 1: 1380 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME 1381 DELIVER_PENDING_EXCEPTION 1382 END art_quick_resolution_trampoline 1383 1384 /* 1385 * Generic JNI frame layout: 1386 * 1387 * #-------------------# 1388 * | | 1389 * | caller method... | 1390 * #-------------------# <--- SP on entry 1391 * | Return X30/LR | 1392 * | X29/FP | callee save 1393 * | X28 | callee save 1394 * | X27 | callee save 1395 * | X26 | callee save 1396 * | X25 | callee save 1397 * | X24 | callee save 1398 * | X23 | callee save 1399 * | X22 | callee save 1400 * | X21 | callee save 1401 * | X20 | callee save 1402 * | X7 | arg7 1403 * | X6 | arg6 1404 * | X5 | arg5 1405 * | X4 | arg4 1406 * | X3 | arg3 1407 * | X2 | arg2 1408 * | X1 | arg1 1409 * | D7 | float arg 8 1410 * | D6 | float arg 7 1411 * | D5 | float arg 6 1412 * | D4 | float arg 5 1413 * | D3 | float arg 4 1414 * | D2 | float arg 3 1415 * | D1 | float arg 2 1416 * | D0 | float arg 1 1417 * | Method* | <- X0 1418 * #-------------------# 1419 * | local ref cookie | // 4B 1420 * | handle scope size | // 4B 1421 * #-------------------# 1422 * | JNI Call Stack | 1423 * #-------------------# <--- SP on native call 1424 * | | 1425 * | Stack for Regs | The trampoline assembly will pop these values 1426 * | | into registers for native call 1427 * #-------------------# 1428 * | Native code ptr | 1429 * #-------------------# 1430 * | Free scratch | 1431 * #-------------------# 1432 * | Ptr to (1) | <--- SP 1433 * #-------------------# 1434 */ 1435 /* 1436 * Called to do a generic JNI down-call 1437 */ 1438 ENTRY_NO_HIDE art_quick_generic_jni_trampoline 1439 SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME_INTERNAL 1440 str x0, [sp, #0] // Store native ArtMethod* to bottom of stack. 1441 1442 // Save SP , so we can have static CFI info. 1443 mov x28, sp 1444 .cfi_def_cfa_register x28 1445 1446 // This looks the same, but is different: this will be updated to point to the bottom 1447 // of the frame when the handle scope is inserted. 1448 mov xFP, sp 1449 1450 mov xIP0, #5120 1451 sub sp, sp, xIP0 1452 1453 // prepare for artQuickGenericJniTrampoline call 1454 // (Thread*, SP) 1455 // x0 x1 <= C calling convention 1456 // xSELF xFP <= where they are 1457 1458 mov x0, xSELF // Thread* 1459 mov x1, xFP 1460 bl artQuickGenericJniTrampoline // (Thread*, sp) 1461 1462 // The C call will have registered the complete save-frame on success. 1463 // The result of the call is: 1464 // x0: pointer to native code, 0 on error. 1465 // x1: pointer to the bottom of the used area of the alloca, can restore stack till there. 1466 1467 // Check for error = 0. 1468 cbz x0, .Lentry_error 1469 1470 // Release part of the alloca. 1471 mov sp, x1 1472 1473 // Save the code pointer 1474 mov xIP0, x0 1475 1476 // Load parameters from frame into registers. 1477 // TODO Check with artQuickGenericJniTrampoline. 1478 // Also, check again APPCS64 - the stack arguments are interleaved. 1479 ldp x0, x1, [sp] 1480 ldp x2, x3, [sp, #16] 1481 ldp x4, x5, [sp, #32] 1482 ldp x6, x7, [sp, #48] 1483 1484 ldp d0, d1, [sp, #64] 1485 ldp d2, d3, [sp, #80] 1486 ldp d4, d5, [sp, #96] 1487 ldp d6, d7, [sp, #112] 1488 1489 add sp, sp, #128 1490 1491 blr xIP0 // native call. 1492 1493 // result sign extension is handled in C code 1494 // prepare for artQuickGenericJniEndTrampoline call 1495 // (Thread*, result, result_f) 1496 // x0 x1 x2 <= C calling convention 1497 mov x1, x0 // Result (from saved) 1498 mov x0, xETR // Thread register, original xSELF might be scratched by native code. 1499 fmov x2, d0 // d0 will contain floating point result, but needs to go into x2 1500 1501 bl artQuickGenericJniEndTrampoline 1502 1503 // Tear down the alloca. 1504 mov sp, x28 1505 .cfi_def_cfa_register sp 1506 1507 // Pending exceptions possible. 1508 // Use xETR as xSELF might be scratched by native code 1509 ldr x1, [xETR, THREAD_EXCEPTION_OFFSET] 1510 cbnz x1, .Lexception_in_native 1511 1512 // Tear down the callee-save frame. 1513 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME 1514 1515 // store into fpr, for when it's a fpr return... 1516 fmov d0, x0 1517 ret 1518 1519 .Lentry_error: 1520 mov sp, x28 1521 .cfi_def_cfa_register sp 1522 .Lexception_in_native: 1523 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME 1524 DELIVER_PENDING_EXCEPTION 1525 1526 END art_quick_generic_jni_trampoline 1527 1528 /* 1529 * Called to bridge from the quick to interpreter ABI. On entry the arguments match those 1530 * of a quick call: 1531 * x0 = method being called/to bridge to. 1532 * x1..x7, d0..d7 = arguments to that method. 1533 */ 1534 ENTRY_NO_HIDE art_quick_to_interpreter_bridge 1535 SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME // Set up frame and save arguments. 1536 1537 // x0 will contain mirror::ArtMethod* method. 1538 mov x1, xSELF // How to get Thread::Current() ??? 1539 mov x2, sp 1540 1541 // uint64_t artQuickToInterpreterBridge(mirror::ArtMethod* method, Thread* self, 1542 // mirror::ArtMethod** sp) 1543 bl artQuickToInterpreterBridge 1544 1545 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // TODO: no need to restore arguments in this case. 1546 1547 fmov d0, x0 1548 1549 RETURN_OR_DELIVER_PENDING_EXCEPTION 1550 END art_quick_to_interpreter_bridge 1551 1552 1553 // 1554 // Instrumentation-related stubs 1555 // 1556 .extern artInstrumentationMethodEntryFromCode 1557 ENTRY art_quick_instrumentation_entry 1558 SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME 1559 1560 mov x20, x0 // Preserve method reference in a callee-save. 1561 1562 mov x2, xSELF 1563 mov x3, sp 1564 mov x4, xLR 1565 bl artInstrumentationMethodEntryFromCode // (Method*, Object*, Thread*, SP, LR) 1566 1567 mov xIP0, x0 // x0 = result of call. 1568 mov x0, x20 // Reload method reference. 1569 1570 RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME // Note: will restore xSELF 1571 adr xLR, art_quick_instrumentation_exit 1572 br xIP0 // Tail-call method with lr set to art_quick_instrumentation_exit. 1573 END art_quick_instrumentation_entry 1574 1575 .extern artInstrumentationMethodExitFromCode 1576 ENTRY art_quick_instrumentation_exit 1577 mov xLR, #0 // Clobber LR for later checks. 1578 1579 SETUP_REF_ONLY_CALLEE_SAVE_FRAME 1580 1581 // We need to save x0 and d0. We could use a callee-save from SETUP_REF_ONLY, but then 1582 // we would need to fully restore it. As there are a lot of callee-save registers, it seems 1583 // easier to have an extra small stack area. 1584 1585 str x0, [sp, #-16]! // Save integer result. 1586 .cfi_adjust_cfa_offset 16 1587 str d0, [sp, #8] // Save floating-point result. 1588 1589 add x1, sp, #16 // Pass SP. 1590 mov x2, x0 // Pass integer result. 1591 fmov x3, d0 // Pass floating-point result. 1592 mov x0, xSELF // Pass Thread. 1593 bl artInstrumentationMethodExitFromCode // (Thread*, SP, gpr_res, fpr_res) 1594 1595 mov xIP0, x0 // Return address from instrumentation call. 1596 mov xLR, x1 // r1 is holding link register if we're to bounce to deoptimize 1597 1598 ldr d0, [sp, #8] // Restore floating-point result. 1599 ldr x0, [sp], 16 // Restore integer result, and drop stack area. 1600 .cfi_adjust_cfa_offset 16 1601 1602 POP_REF_ONLY_CALLEE_SAVE_FRAME 1603 1604 br xIP0 // Tail-call out. 1605 END art_quick_instrumentation_exit 1606 1607 /* 1608 * Instrumentation has requested that we deoptimize into the interpreter. The deoptimization 1609 * will long jump to the upcall with a special exception of -1. 1610 */ 1611 .extern artDeoptimize 1612 ENTRY art_quick_deoptimize 1613 SETUP_SAVE_ALL_CALLEE_SAVE_FRAME 1614 mov x0, xSELF // Pass thread. 1615 mov x1, sp // Pass SP. 1616 bl artDeoptimize // artDeoptimize(Thread*, SP) 1617 brk 0 1618 END art_quick_deoptimize 1619 1620 1621 /* 1622 * String's indexOf. 1623 * 1624 * TODO: Not very optimized. 1625 * On entry: 1626 * x0: string object (known non-null) 1627 * w1: char to match (known <= 0xFFFF) 1628 * w2: Starting offset in string data 1629 */ 1630 ENTRY art_quick_indexof 1631 ldr w3, [x0, #STRING_COUNT_OFFSET] 1632 ldr w4, [x0, #STRING_OFFSET_OFFSET] 1633 ldr w0, [x0, #STRING_VALUE_OFFSET] // x0 ? 1634 1635 /* Clamp start to [0..count] */ 1636 cmp w2, #0 1637 csel w2, wzr, w2, lt 1638 cmp w2, w3 1639 csel w2, w3, w2, gt 1640 1641 /* Build a pointer to the start of the string data */ 1642 add x0, x0, #STRING_DATA_OFFSET 1643 add x0, x0, x4, lsl #1 1644 1645 /* Save a copy to compute result */ 1646 mov x5, x0 1647 1648 /* Build pointer to start of data to compare and pre-bias */ 1649 add x0, x0, x2, lsl #1 1650 sub x0, x0, #2 1651 1652 /* Compute iteration count */ 1653 sub w2, w3, w2 1654 1655 /* 1656 * At this point we have: 1657 * x0: start of the data to test 1658 * w1: char to compare 1659 * w2: iteration count 1660 * x5: original start of string data 1661 */ 1662 1663 subs w2, w2, #4 1664 b.lt .Lindexof_remainder 1665 1666 .Lindexof_loop4: 1667 ldrh w6, [x0, #2]! 1668 ldrh w7, [x0, #2]! 1669 ldrh wIP0, [x0, #2]! 1670 ldrh wIP1, [x0, #2]! 1671 cmp w6, w1 1672 b.eq .Lmatch_0 1673 cmp w7, w1 1674 b.eq .Lmatch_1 1675 cmp wIP0, w1 1676 b.eq .Lmatch_2 1677 cmp wIP1, w1 1678 b.eq .Lmatch_3 1679 subs w2, w2, #4 1680 b.ge .Lindexof_loop4 1681 1682 .Lindexof_remainder: 1683 adds w2, w2, #4 1684 b.eq .Lindexof_nomatch 1685 1686 .Lindexof_loop1: 1687 ldrh w6, [x0, #2]! 1688 cmp w6, w1 1689 b.eq .Lmatch_3 1690 subs w2, w2, #1 1691 b.ne .Lindexof_loop1 1692 1693 .Lindexof_nomatch: 1694 mov x0, #-1 1695 ret 1696 1697 .Lmatch_0: 1698 sub x0, x0, #6 1699 sub x0, x0, x5 1700 asr x0, x0, #1 1701 ret 1702 .Lmatch_1: 1703 sub x0, x0, #4 1704 sub x0, x0, x5 1705 asr x0, x0, #1 1706 ret 1707 .Lmatch_2: 1708 sub x0, x0, #2 1709 sub x0, x0, x5 1710 asr x0, x0, #1 1711 ret 1712 .Lmatch_3: 1713 sub x0, x0, x5 1714 asr x0, x0, #1 1715 ret 1716 END art_quick_indexof 1717 1718 /* 1719 * String's compareTo. 1720 * 1721 * TODO: Not very optimized. 1722 * 1723 * On entry: 1724 * x0: this object pointer 1725 * x1: comp object pointer 1726 * 1727 */ 1728 .extern __memcmp16 1729 ENTRY art_quick_string_compareto 1730 mov x2, x0 // x0 is return, use x2 for first input. 1731 sub x0, x2, x1 // Same string object? 1732 cbnz x0,1f 1733 ret 1734 1: // Different string objects. 1735 1736 ldr w6, [x2, #STRING_OFFSET_OFFSET] 1737 ldr w5, [x1, #STRING_OFFSET_OFFSET] 1738 ldr w4, [x2, #STRING_COUNT_OFFSET] 1739 ldr w3, [x1, #STRING_COUNT_OFFSET] 1740 ldr w2, [x2, #STRING_VALUE_OFFSET] 1741 ldr w1, [x1, #STRING_VALUE_OFFSET] 1742 1743 /* 1744 * Now: CharArray* Offset Count 1745 * first arg x2 w6 w4 1746 * second arg x1 w5 w3 1747 */ 1748 1749 // x0 := str1.length(w4) - str2.length(w3). ldr zero-extended w3/w4 into x3/x4. 1750 subs x0, x4, x3 1751 // Min(count1, count2) into w3. 1752 csel x3, x3, x4, ge 1753 1754 // Build pointer into string data. 1755 1756 // Add offset in array (substr etc.) (sign extend and << 1). 1757 add x2, x2, w6, sxtw #1 1758 add x1, x1, w5, sxtw #1 1759 1760 // Add offset in CharArray to array. 1761 add x2, x2, #STRING_DATA_OFFSET 1762 add x1, x1, #STRING_DATA_OFFSET 1763 1764 // TODO: Tune this value. 1765 // Check for long string, do memcmp16 for them. 1766 cmp w3, #28 // Constant from arm32. 1767 bgt .Ldo_memcmp16 1768 1769 /* 1770 * Now: 1771 * x2: *first string data 1772 * x1: *second string data 1773 * w3: iteration count 1774 * x0: return value if comparison equal 1775 * x4, x5, x6, x7: free 1776 */ 1777 1778 // Do a simple unrolled loop. 1779 .Lloop: 1780 // At least two more elements? 1781 subs w3, w3, #2 1782 b.lt .Lremainder_or_done 1783 1784 ldrh w4, [x2], #2 1785 ldrh w5, [x1], #2 1786 1787 ldrh w6, [x2], #2 1788 ldrh w7, [x1], #2 1789 1790 subs w4, w4, w5 1791 b.ne .Lw4_result 1792 1793 subs w6, w6, w7 1794 b.ne .Lw6_result 1795 1796 b .Lloop 1797 1798 .Lremainder_or_done: 1799 adds w3, w3, #1 1800 b.eq .Lremainder 1801 ret 1802 1803 .Lremainder: 1804 ldrh w4, [x2], #2 1805 ldrh w5, [x1], #2 1806 subs w4, w4, w5 1807 b.ne .Lw4_result 1808 ret 1809 1810 // Result is in w4 1811 .Lw4_result: 1812 sxtw x0, w4 1813 ret 1814 1815 // Result is in w6 1816 .Lw6_result: 1817 sxtw x0, w6 1818 ret 1819 1820 .Ldo_memcmp16: 1821 mov x14, x0 // Save x0 and LR. __memcmp16 does not use these temps. 1822 mov x15, xLR // TODO: Codify and check that? 1823 1824 mov x0, x2 1825 uxtw x2, w3 1826 bl __memcmp16 1827 1828 mov xLR, x15 // Restore LR. 1829 1830 cmp x0, #0 // Check the memcmp difference. 1831 csel x0, x0, x14, ne // x0 := x0 != 0 ? x14(prev x0=length diff) : x1. 1832 ret 1833 END art_quick_string_compareto 1834 1835 // Macro to facilitate adding new entrypoints which call to native function directly. 1836 // Currently, xSELF is the only thing we need to take care of between managed code and AAPCS. 1837 // But we might introduce more differences. 1838 .macro NATIVE_DOWNCALL name, entrypoint 1839 .extern \entrypoint 1840 ENTRY \name 1841 stp xSELF, xLR, [sp, #-16]! 1842 bl \entrypoint 1843 ldp xSELF, xLR, [sp], #16 1844 ret 1845 END \name 1846 .endm 1847 1848 NATIVE_DOWNCALL art_quick_fmod fmod 1849 NATIVE_DOWNCALL art_quick_fmodf fmodf 1850 NATIVE_DOWNCALL art_quick_memcpy memcpy 1851 NATIVE_DOWNCALL art_quick_assignable_from_code artIsAssignableFromCode 1852