1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 18 /*! \file LowerGetPut.cpp 19 \brief This file lowers the following bytecodes: XGET|PUT_XXX 20 */ 21 #include "libdex/DexOpcodes.h" 22 #include "libdex/DexFile.h" 23 #include "Lower.h" 24 #include "NcgAot.h" 25 #include "enc_wrapper.h" 26 27 #define P_GPR_1 PhysicalReg_EBX 28 #define P_GPR_2 PhysicalReg_ECX 29 #define P_GPR_3 PhysicalReg_ESI 30 #define P_GPR_4 PhysicalReg_EDX 31 //! LOWER bytecode AGET without usage of helper function 32 33 //! It has null check and length check 34 int aget_common_nohelper(int flag, u2 vA, u2 vref, u2 vindex) { 35 //////////////////////////// 36 // Request VR free delays before register allocation for the temporaries 37 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) 38 requestVRFreeDelay(vref,VRDELAY_NULLCHECK); 39 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 40 requestVRFreeDelay(vref,VRDELAY_BOUNDCHECK); 41 requestVRFreeDelay(vindex,VRDELAY_BOUNDCHECK); 42 } 43 44 get_virtual_reg(vref, OpndSize_32, 1, false); //array 45 get_virtual_reg(vindex, OpndSize_32, 2, false); //index 46 47 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) { 48 //last argument is the exception number for this bytecode 49 nullCheck(1, false, 1, vref); //maybe optimized away, if not, call 50 cancelVRFreeDelayRequest(vref,VRDELAY_NULLCHECK); 51 } else { 52 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 53 } 54 55 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 56 boundCheck(vref, 1, false, 57 vindex, 2, false, 58 2); 59 cancelVRFreeDelayRequest(vref,VRDELAY_BOUNDCHECK); 60 cancelVRFreeDelayRequest(vindex,VRDELAY_BOUNDCHECK); 61 } else { 62 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 63 updateRefCount2(2, LowOpndRegType_gp, false); //update reference count for tmp2 64 } 65 66 if(flag == AGET) { 67 move_mem_disp_scale_to_reg(OpndSize_32, 1, false, offArrayObject_contents, 2, false, 4, 4, false); 68 } 69 else if(flag == AGET_WIDE) { 70 move_mem_disp_scale_to_reg(OpndSize_64, 1, false, offArrayObject_contents, 2, false, 8, 1, false); 71 } 72 else if(flag == AGET_CHAR) { 73 movez_mem_disp_scale_to_reg(OpndSize_16, 1, false, offArrayObject_contents, 2, false, 2, 4, false); 74 } 75 else if(flag == AGET_SHORT) { 76 moves_mem_disp_scale_to_reg(OpndSize_16, 1, false, offArrayObject_contents, 2, false, 2, 4, false); 77 } 78 else if(flag == AGET_BOOLEAN) { 79 movez_mem_disp_scale_to_reg(OpndSize_8, 1, false, offArrayObject_contents, 2, false, 1, 4, false); 80 } 81 else if(flag == AGET_BYTE) { 82 moves_mem_disp_scale_to_reg(OpndSize_8, 1, false, offArrayObject_contents, 2, false, 1, 4, false); 83 } 84 if(flag == AGET_WIDE) { 85 set_virtual_reg(vA, OpndSize_64, 1, false); 86 } 87 else { 88 set_virtual_reg(vA, OpndSize_32, 4, false); 89 } 90 ////////////////////////////////// 91 return 0; 92 } 93 //! wrapper to call either aget_common_helper or aget_common_nohelper 94 95 //! 96 int aget_common(int flag, u2 vA, u2 vref, u2 vindex) { 97 return aget_common_nohelper(flag, vA, vref, vindex); 98 } 99 #undef P_GPR_1 100 #undef P_GPR_2 101 #undef P_GPR_3 102 #undef P_GPR_4 103 //! lower bytecode AGET by calling aget_common 104 105 //! 106 int op_aget() { 107 u2 vA = INST_AA(inst); 108 u2 vref = FETCH(1) & 0xff; 109 u2 vindex = FETCH(1) >> 8; 110 int retval = aget_common(AGET, vA, vref, vindex); 111 rPC += 2; 112 return retval; 113 } 114 //! lower bytecode AGET_WIDE by calling aget_common 115 116 //! 117 int op_aget_wide() { 118 u2 vA = INST_AA(inst); 119 u2 vref = FETCH(1) & 0xff; 120 u2 vindex = FETCH(1) >> 8; 121 int retval = aget_common(AGET_WIDE, vA, vref, vindex); 122 rPC += 2; 123 return retval; 124 } 125 //! lower bytecode AGET_OBJECT by calling aget_common 126 127 //! 128 int op_aget_object() { 129 return op_aget(); 130 } 131 //! lower bytecode BOOLEAN by calling aget_common 132 133 //! 134 int op_aget_boolean() { 135 u2 vA = INST_AA(inst); 136 u2 vref = FETCH(1) & 0xff; 137 u2 vindex = FETCH(1) >> 8; 138 int retval = aget_common(AGET_BOOLEAN, vA, vref, vindex); 139 rPC += 2; 140 return retval; 141 } 142 //! lower bytecode AGET_BYTE by calling aget_common 143 144 //! 145 int op_aget_byte() { 146 u2 vA = INST_AA(inst); 147 u2 vref = FETCH(1) & 0xff; 148 u2 vindex = FETCH(1) >> 8; 149 int retval = aget_common(AGET_BYTE, vA, vref, vindex); 150 rPC += 2; 151 return retval; 152 } 153 //! lower bytecode AGET_CHAR by calling aget_common 154 155 //! 156 int op_aget_char() { 157 u2 vA = INST_AA(inst); 158 u2 vref = FETCH(1) & 0xff; 159 u2 vindex = FETCH(1) >> 8; 160 int retval = aget_common(AGET_CHAR, vA, vref, vindex); 161 rPC += 2; 162 return retval; 163 } 164 //! lower bytecode AGET_SHORT by calling aget_common 165 166 //! 167 int op_aget_short() { 168 u2 vA = INST_AA(inst); 169 u2 vref = FETCH(1) & 0xff; 170 u2 vindex = FETCH(1) >> 8; 171 int retval = aget_common(AGET_SHORT, vA, vref, vindex); 172 rPC += 2; 173 return retval; 174 } 175 176 #define P_GPR_1 PhysicalReg_EBX 177 #define P_GPR_2 PhysicalReg_ECX 178 #define P_GPR_3 PhysicalReg_ESI 179 #define P_GPR_4 PhysicalReg_EDX 180 //! LOWER bytecode APUT without usage of helper function 181 182 //! It has null check and length check 183 int aput_common_nohelper(int flag, u2 vA, u2 vref, u2 vindex) { 184 ////////////////////////////////////// 185 // Request VR free delays before register allocation for the temporaries. 186 // No need to request delay for vA since it will be transferred to temporary 187 // after the null check and bound check. 188 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) 189 requestVRFreeDelay(vref,VRDELAY_NULLCHECK); 190 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 191 requestVRFreeDelay(vref,VRDELAY_BOUNDCHECK); 192 requestVRFreeDelay(vindex,VRDELAY_BOUNDCHECK); 193 } 194 195 get_virtual_reg(vref, OpndSize_32, 1, false); //array 196 get_virtual_reg(vindex, OpndSize_32, 2, false); //index 197 198 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) { 199 //last argument is the exception number for this bytecode 200 nullCheck(1, false, 1, vref); //maybe optimized away, if not, call 201 cancelVRFreeDelayRequest(vref,VRDELAY_NULLCHECK); 202 } else { 203 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 204 } 205 206 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 207 boundCheck(vref, 1, false, 208 vindex, 2, false, 209 2); 210 cancelVRFreeDelayRequest(vref,VRDELAY_BOUNDCHECK); 211 cancelVRFreeDelayRequest(vindex,VRDELAY_BOUNDCHECK); 212 } else { 213 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 214 updateRefCount2(2, LowOpndRegType_gp, false); //update reference count for tmp2 215 } 216 217 if(flag == APUT_WIDE) { 218 get_virtual_reg(vA, OpndSize_64, 1, false); 219 } 220 else { 221 get_virtual_reg(vA, OpndSize_32, 4, false); 222 } 223 if(flag == APUT) 224 move_reg_to_mem_disp_scale(OpndSize_32, 4, false, 1, false, offArrayObject_contents, 2, false, 4); 225 else if(flag == APUT_WIDE) 226 move_reg_to_mem_disp_scale(OpndSize_64, 1, false, 1, false, offArrayObject_contents, 2, false, 8); 227 else if(flag == APUT_CHAR || flag == APUT_SHORT) 228 move_reg_to_mem_disp_scale(OpndSize_16, 4, false, 1, false, offArrayObject_contents, 2, false, 2); 229 else if(flag == APUT_BOOLEAN || flag == APUT_BYTE) 230 move_reg_to_mem_disp_scale(OpndSize_8, 4, false, 1, false, offArrayObject_contents, 2, false, 1); 231 ////////////////////////////////// 232 return 0; 233 } 234 //! wrapper to call either aput_common_helper or aput_common_nohelper 235 236 //! 237 int aput_common(int flag, u2 vA, u2 vref, u2 vindex) { 238 return aput_common_nohelper(flag, vA, vref, vindex); 239 } 240 #undef P_GPR_1 241 #undef P_GPR_2 242 #undef P_GPR_3 243 #undef P_GPR_4 244 //! lower bytecode APUT by calling aput_common 245 246 //! 247 int op_aput() { 248 u2 vA = INST_AA(inst); 249 u2 vref = FETCH(1) & 0xff; 250 u2 vindex = FETCH(1) >> 8; 251 int retval = aput_common(APUT, vA, vref, vindex); 252 rPC += 2; 253 return retval; 254 } 255 //! lower bytecode APUT_WIDE by calling aput_common 256 257 //! 258 int op_aput_wide() { 259 u2 vA = INST_AA(inst); 260 u2 vref = FETCH(1) & 0xff; 261 u2 vindex = FETCH(1) >> 8; 262 int retval = aput_common(APUT_WIDE, vA, vref, vindex); 263 rPC += 2; 264 return retval; 265 } 266 //! lower bytecode APUT_BOOLEAN by calling aput_common 267 268 //! 269 int op_aput_boolean() { 270 u2 vA = INST_AA(inst); 271 u2 vref = FETCH(1) & 0xff; 272 u2 vindex = FETCH(1) >> 8; 273 int retval = aput_common(APUT_BOOLEAN, vA, vref, vindex); 274 rPC += 2; 275 return retval; 276 } 277 //! lower bytecode APUT_BYTE by calling aput_common 278 279 //! 280 int op_aput_byte() { 281 u2 vA = INST_AA(inst); 282 u2 vref = FETCH(1) & 0xff; 283 u2 vindex = FETCH(1) >> 8; 284 int retval = aput_common(APUT_BYTE, vA, vref, vindex); 285 rPC += 2; 286 return retval; 287 } 288 //! lower bytecode APUT_CHAR by calling aput_common 289 290 //! 291 int op_aput_char() { 292 u2 vA = INST_AA(inst); 293 u2 vref = FETCH(1) & 0xff; 294 u2 vindex = FETCH(1) >> 8; 295 int retval = aput_common(APUT_CHAR, vA, vref, vindex); 296 rPC += 2; 297 return retval; 298 } 299 //! lower bytecode APUT_SHORT by calling aput_common 300 301 //! 302 int op_aput_short() { 303 u2 vA = INST_AA(inst); 304 u2 vref = FETCH(1) & 0xff; 305 u2 vindex = FETCH(1) >> 8; 306 int retval = aput_common(APUT_SHORT, vA, vref, vindex); 307 rPC += 2; 308 return retval; 309 } 310 311 #define P_GPR_1 PhysicalReg_EBX //callee-saved valid after CanPutArray 312 #define P_GPR_2 PhysicalReg_ECX 313 #define P_GPR_3 PhysicalReg_ESI //callee-saved 314 #define P_SCRATCH_1 PhysicalReg_EDX 315 #define P_SCRATCH_2 PhysicalReg_EAX 316 #define P_SCRATCH_3 PhysicalReg_EDX 317 318 void markCard_notNull(int tgtAddrReg, int scratchReg, bool isPhysical); 319 320 //! lower bytecode APUT_OBJECT 321 322 //! Lower the bytecode using helper function ".aput_obj_helper" if helper switch is on 323 int op_aput_object() { //type checking 324 u2 vA = INST_AA(inst); 325 u2 vref = FETCH(1) & 0xff; 326 u2 vindex = FETCH(1) >> 8; 327 328 /////////////////////////// 329 // Request VR free delays before register allocation for the temporaries 330 // No need to request delay for vA since it will be transferred to temporary 331 // after the null check and bound check. 332 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) 333 requestVRFreeDelay(vref,VRDELAY_NULLCHECK); 334 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 335 requestVRFreeDelay(vref,VRDELAY_BOUNDCHECK); 336 requestVRFreeDelay(vindex,VRDELAY_BOUNDCHECK); 337 } 338 339 get_virtual_reg(vref, OpndSize_32, 1, false); //array 340 export_pc(); //use %edx 341 342 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) { 343 compare_imm_reg(OpndSize_32, 0, 1, false); 344 conditional_jump_global_API(Condition_E, "common_errNullObject", false); 345 cancelVRFreeDelayRequest(vref,VRDELAY_NULLCHECK); 346 } else { 347 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 348 } 349 350 get_virtual_reg(vindex, OpndSize_32, 2, false); //index 351 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 352 compare_mem_reg(OpndSize_32, offArrayObject_length, 1, false, 2, false); 353 conditional_jump_global_API(Condition_NC, "common_errArrayIndex", false); 354 cancelVRFreeDelayRequest(vref,VRDELAY_BOUNDCHECK); 355 cancelVRFreeDelayRequest(vindex,VRDELAY_BOUNDCHECK); 356 } else { 357 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 358 updateRefCount2(2, LowOpndRegType_gp, false); //update reference count for tmp2 359 } 360 361 get_virtual_reg(vA, OpndSize_32, 4, false); 362 compare_imm_reg(OpndSize_32, 0, 4, false); 363 conditional_jump(Condition_E, ".aput_object_skip_check", true); 364 rememberState(1); 365 move_mem_to_reg(OpndSize_32, offObject_clazz, 4, false, 5, false); 366 load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 367 move_reg_to_mem(OpndSize_32, 5, false, 0, PhysicalReg_ESP, true); 368 move_mem_to_reg(OpndSize_32, offObject_clazz, 1, false, 6, false); 369 move_reg_to_mem(OpndSize_32, 6, false, 4, PhysicalReg_ESP, true); 370 371 scratchRegs[0] = PhysicalReg_SCRATCH_1; 372 call_dvmCanPutArrayElement(); //scratch?? 373 load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 374 compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true); 375 conditional_jump_global_API(Condition_E, "common_errArrayStore", false); 376 377 //NOTE: "2, false" is live through function call 378 move_reg_to_mem_disp_scale(OpndSize_32, 4, false, 1, false, offArrayObject_contents, 2, false, 4); 379 markCard_notNull(1, 11, false); 380 rememberState(2); 381 ////TODO NCG O1 + code cache 382 unconditional_jump(".aput_object_after_check", true); 383 384 insertLabel(".aput_object_skip_check", true); 385 goToState(1); 386 //NOTE: "2, false" is live through function call 387 move_reg_to_mem_disp_scale(OpndSize_32, 4, false, 1, false, offArrayObject_contents, 2, false, 4); 388 389 transferToState(2); 390 insertLabel(".aput_object_after_check", true); 391 /////////////////////////////// 392 rPC += 2; 393 return 0; 394 } 395 #undef P_GPR_1 396 #undef P_GPR_2 397 #undef P_GPR_3 398 #undef P_SCRATCH_1 399 #undef P_SCRATCH_2 400 #undef P_SCRATCH_3 401 402 ////////////////////////////////////////// 403 #define P_GPR_1 PhysicalReg_ECX 404 #define P_GPR_2 PhysicalReg_EBX //should be callee-saved to avoid overwritten by inst_field_resolve 405 #define P_GPR_3 PhysicalReg_ESI 406 #define P_SCRATCH_1 PhysicalReg_EDX 407 408 /* 409 movl offThread_cardTable(self), scratchReg 410 compare_imm_reg 0, valReg (testl valReg, valReg) 411 je .markCard_skip 412 shrl $GC_CARD_SHIFT, tgtAddrReg 413 movb %, (scratchReg, tgtAddrReg) 414 NOTE: scratchReg can be accessed with the corresponding byte 415 tgtAddrReg will be updated 416 for O1, update the corresponding reference count 417 */ 418 void markCard(int valReg, int tgtAddrReg, bool targetPhysical, int scratchReg, bool isPhysical) { 419 get_self_pointer(PhysicalReg_SCRATCH_6, isScratchPhysical); 420 move_mem_to_reg(OpndSize_32, offsetof(Thread, cardTable), PhysicalReg_SCRATCH_6, isScratchPhysical, scratchReg, isPhysical); 421 compare_imm_reg(OpndSize_32, 0, valReg, isPhysical); 422 conditional_jump(Condition_E, ".markCard_skip", true); 423 alu_binary_imm_reg(OpndSize_32, shr_opc, GC_CARD_SHIFT, tgtAddrReg, targetPhysical); 424 move_reg_to_mem_disp_scale(OpndSize_8, scratchReg, isPhysical, scratchReg, isPhysical, 0, tgtAddrReg, targetPhysical, 1); 425 insertLabel(".markCard_skip", true); 426 } 427 428 void markCard_notNull(int tgtAddrReg, int scratchReg, bool isPhysical) { 429 get_self_pointer(PhysicalReg_SCRATCH_2, isScratchPhysical); 430 move_mem_to_reg(OpndSize_32, offsetof(Thread, cardTable), PhysicalReg_SCRATCH_2, isScratchPhysical, scratchReg, isPhysical); 431 alu_binary_imm_reg(OpndSize_32, shr_opc, GC_CARD_SHIFT, tgtAddrReg, isPhysical); 432 move_reg_to_mem_disp_scale(OpndSize_8, scratchReg, isPhysical, scratchReg, isPhysical, 0, tgtAddrReg, isPhysical, 1); 433 } 434 435 void markCard_filled(int tgtAddrReg, bool isTgtPhysical, int scratchReg, bool isScratchPhysical) { 436 get_self_pointer(PhysicalReg_SCRATCH_2, false/*isPhysical*/); 437 move_mem_to_reg(OpndSize_32, offsetof(Thread, cardTable), PhysicalReg_SCRATCH_2, isScratchPhysical, scratchReg, isScratchPhysical); 438 alu_binary_imm_reg(OpndSize_32, shr_opc, GC_CARD_SHIFT, tgtAddrReg, isTgtPhysical); 439 move_reg_to_mem_disp_scale(OpndSize_8, scratchReg, isScratchPhysical, scratchReg, isScratchPhysical, 0, tgtAddrReg, isTgtPhysical, 1); 440 } 441 //! LOWER bytecode IGET,IPUT without usage of helper function 442 443 //! It has null check and calls assembly function inst_field_resolve 444 int iget_iput_common_nohelper(int tmp, int flag, u2 vA, u2 vB, int isObj, bool isVolatile) { 445 #ifdef WITH_JIT_INLINING 446 const Method *method = (traceCurrentMIR->OptimizationFlags & MIR_CALLEE) ? 447 traceCurrentMIR->meta.calleeMethod : currentMethod; 448 InstField *pInstField = (InstField *) 449 method->clazz->pDvmDex->pResFields[tmp]; 450 #else 451 InstField *pInstField = (InstField *) 452 currentMethod->clazz->pDvmDex->pResFields[tmp]; 453 #endif 454 int fieldOffset; 455 456 assert(pInstField != NULL); 457 fieldOffset = pInstField->byteOffset; 458 move_imm_to_reg(OpndSize_32, fieldOffset, 8, false); 459 // Request VR delay before transfer to temporary. Only vB needs delay. 460 // vA will have non-zero reference count since transfer to temporary for 461 // it happens after null check, thus no delay is needed. 462 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); 463 get_virtual_reg(vB, OpndSize_32, 7, false); 464 nullCheck(7, false, 2, vB); //maybe optimized away, if not, call 465 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 466 if(flag == IGET) { 467 move_mem_scale_to_reg(OpndSize_32, 7, false, 8, false, 1, 9, false); 468 set_virtual_reg(vA, OpndSize_32, 9, false); 469 #ifdef DEBUG_IGET_OBJ 470 if(isObj > 0) { 471 pushAllRegs(); 472 load_effective_addr(-16, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 473 move_reg_to_mem(OpndSize_32, 9, false, 12, PhysicalReg_ESP, true); //field 474 move_reg_to_mem(OpndSize_32, 7, false, 8, PhysicalReg_ESP, true); //object 475 move_imm_to_mem(OpndSize_32, tmp, 4, PhysicalReg_ESP, true); //field 476 move_imm_to_mem(OpndSize_32, 0, 0, PhysicalReg_ESP, true); //iget 477 call_dvmDebugIgetIput(); 478 load_effective_addr(16, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 479 popAllRegs(); 480 } 481 #endif 482 } else if(flag == IGET_WIDE) { 483 if(isVolatile) { 484 /* call dvmQuasiAtomicRead64(addr) */ 485 load_effective_addr(fieldOffset, 7, false, 9, false); 486 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //1st argument 487 load_effective_addr(-4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 488 nextVersionOfHardReg(PhysicalReg_EAX, 2); 489 nextVersionOfHardReg(PhysicalReg_EDX, 2); 490 scratchRegs[0] = PhysicalReg_SCRATCH_3; 491 call_dvmQuasiAtomicRead64(); 492 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 493 //memory content in %edx, %eax 494 set_virtual_reg(vA, OpndSize_32, PhysicalReg_EAX, true); 495 set_virtual_reg(vA+1, OpndSize_32, PhysicalReg_EDX, true); 496 } else { 497 move_mem_scale_to_reg(OpndSize_64, 7, false, 8, false, 1, 1, false); //access field 498 set_virtual_reg(vA, OpndSize_64, 1, false); 499 } 500 } else if(flag == IPUT) { 501 get_virtual_reg(vA, OpndSize_32, 9, false); 502 move_reg_to_mem_scale(OpndSize_32, 9, false, 7, false, 8, false, 1); //access field 503 if(isObj) { 504 markCard(9, 7, false, 11, false); 505 } 506 } else if(flag == IPUT_WIDE) { 507 get_virtual_reg(vA, OpndSize_64, 1, false); 508 if(isVolatile) { 509 /* call dvmQuasiAtomicSwap64(val, addr) */ 510 load_effective_addr(fieldOffset, 7, false, 9, false); 511 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //2nd argument 512 move_reg_to_mem(OpndSize_64, 1, false, -12, PhysicalReg_ESP, true); //1st argument 513 load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 514 scratchRegs[0] = PhysicalReg_SCRATCH_3; 515 call_dvmQuasiAtomicSwap64(); 516 load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 517 } 518 else { 519 move_reg_to_mem_scale(OpndSize_64, 1, false, 7, false, 8, false, 1); 520 } 521 } 522 /////////////////////////// 523 return 0; 524 } 525 //! wrapper to call either iget_iput_common_helper or iget_iput_common_nohelper 526 527 //! 528 int iget_iput_common(int tmp, int flag, u2 vA, u2 vB, int isObj, bool isVolatile) { 529 return iget_iput_common_nohelper(tmp, flag, vA, vB, isObj, isVolatile); 530 } 531 #undef P_GPR_1 532 #undef P_GPR_2 533 #undef P_GPR_3 534 #undef P_SCRATCH_1 535 //! lower bytecode IGET by calling iget_iput_common 536 537 //! 538 int op_iget() { 539 u2 vA = INST_A(inst); 540 u2 vB = INST_B(inst); 541 u2 tmp = FETCH(1); 542 int retval = iget_iput_common(tmp, IGET, vA, vB, 0, false); 543 rPC += 2; 544 return retval; 545 } 546 //! lower bytecode IGET_WIDE by calling iget_iput_common 547 548 //! 549 int op_iget_wide(bool isVolatile) { 550 u2 vA = INST_A(inst); 551 u2 vB = INST_B(inst); 552 u2 tmp = FETCH(1); 553 int retval = iget_iput_common(tmp, IGET_WIDE, vA, vB, 0, isVolatile); 554 rPC += 2; 555 return retval; 556 } 557 //! lower bytecode IGET_OBJECT by calling iget_iput_common 558 559 //! 560 int op_iget_object() { 561 u2 vA = INST_A(inst); 562 u2 vB = INST_B(inst); 563 u2 tmp = FETCH(1); 564 int retval = iget_iput_common(tmp, IGET, vA, vB, 1, false); 565 rPC += 2; 566 return retval; 567 } 568 //! lower bytecode IGET_BOOLEAN by calling iget_iput_common 569 570 //! 571 int op_iget_boolean() { 572 return op_iget(); 573 } 574 //! lower bytecode IGET_BYTE by calling iget_iput_common 575 576 //! 577 int op_iget_byte() { 578 return op_iget(); 579 } 580 //! lower bytecode IGET_CHAR by calling iget_iput_common 581 582 //! 583 int op_iget_char() { 584 return op_iget(); 585 } 586 //! lower bytecode IGET_SHORT by calling iget_iput_common 587 588 //! 589 int op_iget_short() { 590 return op_iget(); 591 } 592 //! lower bytecode IPUT by calling iget_iput_common 593 594 //! 595 int op_iput() { 596 u2 vA = INST_A(inst); 597 u2 vB = INST_B(inst); 598 u2 tmp = FETCH(1); 599 int retval = iget_iput_common(tmp, IPUT, vA, vB, 0, false); 600 rPC += 2; 601 return retval; 602 } 603 //! lower bytecode IPUT_WIDE by calling iget_iput_common 604 605 //! 606 int op_iput_wide(bool isVolatile) { 607 u2 vA = INST_A(inst); 608 u2 vB = INST_B(inst); 609 u2 tmp = FETCH(1); 610 int retval = iget_iput_common(tmp, IPUT_WIDE, vA, vB, 0, isVolatile); 611 rPC += 2; 612 return retval; 613 } 614 //! lower bytecode IPUT_OBJECT by calling iget_iput_common 615 616 //! 617 int op_iput_object() { 618 u2 vA = INST_A(inst); 619 u2 vB = INST_B(inst); 620 u2 tmp = FETCH(1); 621 int retval = iget_iput_common(tmp, IPUT, vA, vB, 1, false); 622 rPC += 2; 623 return retval; 624 } 625 //! lower bytecode IPUT_BOOLEAN by calling iget_iput_common 626 627 //! 628 int op_iput_boolean() { 629 return op_iput(); 630 } 631 //! lower bytecode IPUT_BYTE by calling iget_iput_common 632 633 //! 634 int op_iput_byte() { 635 return op_iput(); 636 } 637 //! lower bytecode IPUT_CHAR by calling iget_iput_common 638 639 //! 640 int op_iput_char() { 641 return op_iput(); 642 } 643 //! lower bytecode IPUT_SHORT by calling iget_iput_common 644 645 //! 646 int op_iput_short() { 647 return op_iput(); 648 } 649 650 #define P_GPR_1 PhysicalReg_EBX 651 #define P_GPR_2 PhysicalReg_ECX 652 #define P_GPR_3 PhysicalReg_EDX //used by helper only 653 654 //! common section to lower IGET & IPUT 655 656 //! It will use helper function sget_helper if the switch is on 657 int sget_sput_common(int flag, u2 vA, u2 tmp, bool isObj, bool isVolatile) { 658 //call assembly static_field_resolve 659 //no exception 660 //glue: get_res_fields 661 //hard-coded: eax (one version?) 662 ////////////////////////////////////////// 663 #ifdef WITH_JIT_INLINING 664 const Method *method = (traceCurrentMIR->OptimizationFlags & MIR_CALLEE) ? traceCurrentMIR->meta.calleeMethod : currentMethod; 665 void *fieldPtr = (void*) 666 (method->clazz->pDvmDex->pResFields[tmp]); 667 #else 668 void *fieldPtr = (void*) 669 (currentMethod->clazz->pDvmDex->pResFields[tmp]); 670 #endif 671 672 /* Usually, fieldPtr should not be null. The interpreter should resolve 673 * it before we come here, or not allow this opcode in a trace. However, 674 * we can be in a loop trace and this opcode might have been picked up 675 * by exhaustTrace. Sending a -1 here will terminate the loop formation 676 * and fall back to normal trace, which will not have this opcode. 677 */ 678 if (!fieldPtr) { 679 return -1; 680 } 681 682 move_imm_to_reg(OpndSize_32, (int)fieldPtr, PhysicalReg_EAX, true); 683 if(flag == SGET) { 684 move_mem_to_reg(OpndSize_32, offStaticField_value, PhysicalReg_EAX, true, 7, false); //access field 685 set_virtual_reg(vA, OpndSize_32, 7, false); 686 } else if(flag == SGET_WIDE) { 687 if(isVolatile) { 688 /* call dvmQuasiAtomicRead64(addr) */ 689 load_effective_addr(offStaticField_value, PhysicalReg_EAX, true, 9, false); 690 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //1st argument 691 load_effective_addr(-4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 692 nextVersionOfHardReg(PhysicalReg_EAX, 2); 693 nextVersionOfHardReg(PhysicalReg_EDX, 2); 694 scratchRegs[0] = PhysicalReg_SCRATCH_3; 695 call_dvmQuasiAtomicRead64(); 696 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 697 //memory content in %edx, %eax 698 set_virtual_reg(vA, OpndSize_32, PhysicalReg_EAX, true); 699 set_virtual_reg(vA+1, OpndSize_32, PhysicalReg_EDX, true); 700 } 701 else { 702 move_mem_to_reg(OpndSize_64, offStaticField_value, PhysicalReg_EAX, true, 1, false); //access field 703 set_virtual_reg(vA, OpndSize_64, 1, false); 704 } 705 } else if(flag == SPUT) { 706 get_virtual_reg(vA, OpndSize_32, 7, false); 707 move_reg_to_mem(OpndSize_32, 7, false, offStaticField_value, PhysicalReg_EAX, true); //access field 708 if(isObj) { 709 /* get clazz object, then use clazz object to mark card */ 710 move_mem_to_reg(OpndSize_32, offField_clazz, PhysicalReg_EAX, true, 12, false); 711 markCard(7/*valReg*/, 12, false, 11, false); 712 } 713 } else if(flag == SPUT_WIDE) { 714 get_virtual_reg(vA, OpndSize_64, 1, false); 715 if(isVolatile) { 716 /* call dvmQuasiAtomicSwap64(val, addr) */ 717 load_effective_addr(offStaticField_value, PhysicalReg_EAX, true, 9, false); 718 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //2nd argument 719 move_reg_to_mem(OpndSize_64, 1, false, -12, PhysicalReg_ESP, true); //1st argument 720 load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 721 scratchRegs[0] = PhysicalReg_SCRATCH_3; 722 call_dvmQuasiAtomicSwap64(); 723 load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 724 } 725 else { 726 move_reg_to_mem(OpndSize_64, 1, false, offStaticField_value, PhysicalReg_EAX, true); //access field 727 } 728 } 729 ////////////////////////////////////////////// 730 return 0; 731 } 732 #undef P_GPR_1 733 #undef P_GPR_2 734 #undef P_GPR_3 735 //! lower bytecode SGET by calling sget_sput_common 736 737 //! 738 int op_sget() { 739 u2 vA = INST_AA(inst); 740 u2 tmp = FETCH(1); 741 int retval = sget_sput_common(SGET, vA, tmp, false, false); 742 rPC += 2; 743 return retval; 744 } 745 //! lower bytecode SGET_WIDE by calling sget_sput_common 746 747 //! 748 int op_sget_wide(bool isVolatile) { 749 u2 vA = INST_AA(inst); 750 u2 tmp = FETCH(1); 751 int retval = sget_sput_common(SGET_WIDE, vA, tmp, false, isVolatile); 752 rPC += 2; 753 return retval; 754 } 755 //! lower bytecode SGET_OBJECT by calling sget_sput_common 756 757 //! 758 int op_sget_object() { 759 return op_sget(); 760 } 761 //! lower bytecode SGET_BOOLEAN by calling sget_sput_common 762 763 //! 764 int op_sget_boolean() { 765 return op_sget(); 766 } 767 //! lower bytecode SGET_BYTE by calling sget_sput_common 768 769 //! 770 int op_sget_byte() { 771 return op_sget(); 772 } 773 //! lower bytecode SGET_CHAR by calling sget_sput_common 774 775 //! 776 int op_sget_char() { 777 return op_sget(); 778 } 779 //! lower bytecode SGET_SHORT by calling sget_sput_common 780 781 //! 782 int op_sget_short() { 783 return op_sget(); 784 } 785 //! lower bytecode SPUT by calling sget_sput_common 786 787 //! 788 int op_sput(bool isObj) { 789 u2 vA = INST_AA(inst); 790 u2 tmp = FETCH(1); 791 int retval = sget_sput_common(SPUT, vA, tmp, isObj, false); 792 rPC += 2; 793 return retval; 794 } 795 //! lower bytecode SPUT_WIDE by calling sget_sput_common 796 797 //! 798 int op_sput_wide(bool isVolatile) { 799 u2 vA = INST_AA(inst); 800 u2 tmp = FETCH(1); 801 int retval = sget_sput_common(SPUT_WIDE, vA, tmp, false, isVolatile); 802 rPC += 2; 803 return retval; 804 } 805 //! lower bytecode SPUT_OBJECT by calling sget_sput_common 806 807 //! 808 int op_sput_object() { 809 return op_sput(true); 810 } 811 //! lower bytecode SPUT_OBJECT by calling sget_sput_common 812 813 //! 814 int op_sput_boolean() { 815 return op_sput(false); 816 } 817 //! lower bytecode SPUT_BOOLEAN by calling sget_sput_common 818 819 //! 820 int op_sput_byte() { 821 return op_sput(false); 822 } 823 //! lower bytecode SPUT_BYTE by calling sget_sput_common 824 825 //! 826 int op_sput_char() { 827 return op_sput(false); 828 } 829 //! lower bytecode SPUT_SHORT by calling sget_sput_common 830 831 //! 832 int op_sput_short() { 833 return op_sput(false); 834 } 835 #define P_GPR_1 PhysicalReg_EBX 836 #define P_GPR_2 PhysicalReg_ECX 837 //! lower bytecode IGET_QUICK 838 839 //! 840 int op_iget_quick() { 841 u2 vA = INST_A(inst); 842 u2 vB = INST_B(inst); //object 843 u2 tmp = FETCH(1); 844 845 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); // Request VR delay before transfer to temporary 846 get_virtual_reg(vB, OpndSize_32, 1, false); 847 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 848 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 849 850 move_mem_to_reg(OpndSize_32, tmp, 1, false, 2, false); 851 set_virtual_reg(vA, OpndSize_32, 2, false); 852 rPC += 2; 853 return 0; 854 } 855 #undef P_GPR_1 856 #undef P_GPR_2 857 #define P_GPR_1 PhysicalReg_EBX 858 //! lower bytecode IGET_WIDE_QUICK 859 860 //! 861 int op_iget_wide_quick() { 862 u2 vA = INST_A(inst); 863 u2 vB = INST_B(inst); //object 864 u2 tmp = FETCH(1); 865 866 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); // Request VR delay before transfer to temporary 867 get_virtual_reg(vB, OpndSize_32, 1, false); 868 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 869 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 870 871 move_mem_to_reg(OpndSize_64, tmp, 1, false, 1, false); 872 set_virtual_reg(vA, OpndSize_64, 1, false); 873 rPC += 2; 874 return 0; 875 } 876 #undef P_GPR_1 877 //! lower bytecode IGET_OBJECT_QUICK 878 879 //! 880 int op_iget_object_quick() { 881 return op_iget_quick(); 882 } 883 #define P_GPR_1 PhysicalReg_EBX 884 #define P_GPR_2 PhysicalReg_ECX 885 //! lower bytecode IPUT_QUICK 886 887 //! 888 int iput_quick_common(bool isObj) { 889 u2 vA = INST_A(inst); 890 u2 vB = INST_B(inst); //object 891 u2 tmp = FETCH(1); 892 893 // Request VR delay before transfer to temporary. Only vB needs delay. 894 // vA will have non-zero reference count since transfer to temporary for 895 // it happens after null check, thus no delay is needed. 896 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); 897 get_virtual_reg(vB, OpndSize_32, 1, false); 898 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 899 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 900 901 get_virtual_reg(vA, OpndSize_32, 2, false); 902 move_reg_to_mem(OpndSize_32, 2, false, tmp, 1, false); 903 if(isObj) { 904 markCard(2/*valReg*/, 1, false, 11, false); 905 } 906 rPC += 2; 907 return 0; 908 } 909 int op_iput_quick() { 910 return iput_quick_common(false); 911 } 912 #undef P_GPR_1 913 #undef P_GPR_2 914 #define P_GPR_1 PhysicalReg_EBX 915 //! lower bytecode IPUT_WIDE_QUICK 916 917 //! 918 int op_iput_wide_quick() { 919 u2 vA = INST_A(inst); 920 u2 vB = INST_B(inst); //object 921 u2 tmp = FETCH(1); //byte offset 922 923 // Request VR delay before transfer to temporary. Only vB needs delay. 924 // vA will have non-zero reference count since transfer to temporary for 925 // it happens after null check, thus no delay is needed. 926 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); 927 get_virtual_reg(vB, OpndSize_32, 1, false); 928 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 929 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 930 931 get_virtual_reg(vA, OpndSize_64, 1, false); 932 move_reg_to_mem(OpndSize_64, 1, false, tmp, 1, false); 933 rPC += 2; 934 return 0; 935 } 936 #undef P_GPR_1 937 //! lower bytecode IPUT_OBJECT_QUICK 938 939 //! 940 int op_iput_object_quick() { 941 return iput_quick_common(true); 942 } 943 944