1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 18 /*! \file LowerGetPut.cpp 19 \brief This file lowers the following bytecodes: XGET|PUT_XXX 20 */ 21 #include "libdex/DexOpcodes.h" 22 #include "libdex/DexFile.h" 23 #include "Lower.h" 24 #include "NcgAot.h" 25 #include "enc_wrapper.h" 26 27 #define P_GPR_1 PhysicalReg_EBX 28 #define P_GPR_2 PhysicalReg_ECX 29 #define P_GPR_3 PhysicalReg_ESI 30 #define P_GPR_4 PhysicalReg_EDX 31 //! LOWER bytecode AGET without usage of helper function 32 33 //! It has null check and length check 34 int aget_common_nohelper(int flag, u2 vA, u2 vref, u2 vindex) { 35 //////////////////////////// 36 // Request VR free delays before register allocation for the temporaries 37 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) 38 requestVRFreeDelay(vref,VRDELAY_NULLCHECK); 39 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 40 requestVRFreeDelay(vref,VRDELAY_BOUNDCHECK); 41 requestVRFreeDelay(vindex,VRDELAY_BOUNDCHECK); 42 } 43 44 get_virtual_reg(vref, OpndSize_32, 1, false); //array 45 get_virtual_reg(vindex, OpndSize_32, 2, false); //index 46 47 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) { 48 //last argument is the exception number for this bytecode 49 nullCheck(1, false, 1, vref); //maybe optimized away, if not, call 50 cancelVRFreeDelayRequest(vref,VRDELAY_NULLCHECK); 51 } else { 52 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 53 } 54 55 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 56 boundCheck(vref, 1, false, 57 vindex, 2, false, 58 2); 59 cancelVRFreeDelayRequest(vref,VRDELAY_BOUNDCHECK); 60 cancelVRFreeDelayRequest(vindex,VRDELAY_BOUNDCHECK); 61 } else { 62 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 63 updateRefCount2(2, LowOpndRegType_gp, false); //update reference count for tmp2 64 } 65 66 if(flag == AGET) { 67 move_mem_disp_scale_to_reg(OpndSize_32, 1, false, offArrayObject_contents, 2, false, 4, 4, false); 68 } 69 else if(flag == AGET_WIDE) { 70 move_mem_disp_scale_to_reg(OpndSize_64, 1, false, offArrayObject_contents, 2, false, 8, 1, false); 71 } 72 else if(flag == AGET_CHAR) { 73 movez_mem_disp_scale_to_reg(OpndSize_16, 1, false, offArrayObject_contents, 2, false, 2, 4, false); 74 } 75 else if(flag == AGET_SHORT) { 76 moves_mem_disp_scale_to_reg(OpndSize_16, 1, false, offArrayObject_contents, 2, false, 2, 4, false); 77 } 78 else if(flag == AGET_BOOLEAN) { 79 movez_mem_disp_scale_to_reg(OpndSize_8, 1, false, offArrayObject_contents, 2, false, 1, 4, false); 80 } 81 else if(flag == AGET_BYTE) { 82 moves_mem_disp_scale_to_reg(OpndSize_8, 1, false, offArrayObject_contents, 2, false, 1, 4, false); 83 } 84 if(flag == AGET_WIDE) { 85 set_virtual_reg(vA, OpndSize_64, 1, false); 86 } 87 else { 88 set_virtual_reg(vA, OpndSize_32, 4, false); 89 } 90 ////////////////////////////////// 91 return 0; 92 } 93 //! wrapper to call either aget_common_helper or aget_common_nohelper 94 95 //! 96 int aget_common(int flag, u2 vA, u2 vref, u2 vindex) { 97 return aget_common_nohelper(flag, vA, vref, vindex); 98 } 99 #undef P_GPR_1 100 #undef P_GPR_2 101 #undef P_GPR_3 102 #undef P_GPR_4 103 //! lower bytecode AGET by calling aget_common 104 105 //! 106 int op_aget() { 107 u2 vA = INST_AA(inst); 108 u2 vref = FETCH(1) & 0xff; 109 u2 vindex = FETCH(1) >> 8; 110 int retval = aget_common(AGET, vA, vref, vindex); 111 rPC += 2; 112 return retval; 113 } 114 //! lower bytecode AGET_WIDE by calling aget_common 115 116 //! 117 int op_aget_wide() { 118 u2 vA = INST_AA(inst); 119 u2 vref = FETCH(1) & 0xff; 120 u2 vindex = FETCH(1) >> 8; 121 int retval = aget_common(AGET_WIDE, vA, vref, vindex); 122 rPC += 2; 123 return retval; 124 } 125 //! lower bytecode AGET_OBJECT by calling aget_common 126 127 //! 128 int op_aget_object() { 129 return op_aget(); 130 } 131 //! lower bytecode BOOLEAN by calling aget_common 132 133 //! 134 int op_aget_boolean() { 135 u2 vA = INST_AA(inst); 136 u2 vref = FETCH(1) & 0xff; 137 u2 vindex = FETCH(1) >> 8; 138 int retval = aget_common(AGET_BOOLEAN, vA, vref, vindex); 139 rPC += 2; 140 return retval; 141 } 142 //! lower bytecode AGET_BYTE by calling aget_common 143 144 //! 145 int op_aget_byte() { 146 u2 vA = INST_AA(inst); 147 u2 vref = FETCH(1) & 0xff; 148 u2 vindex = FETCH(1) >> 8; 149 int retval = aget_common(AGET_BYTE, vA, vref, vindex); 150 rPC += 2; 151 return retval; 152 } 153 //! lower bytecode AGET_CHAR by calling aget_common 154 155 //! 156 int op_aget_char() { 157 u2 vA = INST_AA(inst); 158 u2 vref = FETCH(1) & 0xff; 159 u2 vindex = FETCH(1) >> 8; 160 int retval = aget_common(AGET_CHAR, vA, vref, vindex); 161 rPC += 2; 162 return retval; 163 } 164 //! lower bytecode AGET_SHORT by calling aget_common 165 166 //! 167 int op_aget_short() { 168 u2 vA = INST_AA(inst); 169 u2 vref = FETCH(1) & 0xff; 170 u2 vindex = FETCH(1) >> 8; 171 int retval = aget_common(AGET_SHORT, vA, vref, vindex); 172 rPC += 2; 173 return retval; 174 } 175 176 #define P_GPR_1 PhysicalReg_EBX 177 #define P_GPR_2 PhysicalReg_ECX 178 #define P_GPR_3 PhysicalReg_ESI 179 #define P_GPR_4 PhysicalReg_EDX 180 //! LOWER bytecode APUT without usage of helper function 181 182 //! It has null check and length check 183 int aput_common_nohelper(int flag, u2 vA, u2 vref, u2 vindex) { 184 ////////////////////////////////////// 185 // Request VR free delays before register allocation for the temporaries. 186 // No need to request delay for vA since it will be transferred to temporary 187 // after the null check and bound check. 188 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) 189 requestVRFreeDelay(vref,VRDELAY_NULLCHECK); 190 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 191 requestVRFreeDelay(vref,VRDELAY_BOUNDCHECK); 192 requestVRFreeDelay(vindex,VRDELAY_BOUNDCHECK); 193 } 194 195 get_virtual_reg(vref, OpndSize_32, 1, false); //array 196 get_virtual_reg(vindex, OpndSize_32, 2, false); //index 197 198 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) { 199 //last argument is the exception number for this bytecode 200 nullCheck(1, false, 1, vref); //maybe optimized away, if not, call 201 cancelVRFreeDelayRequest(vref,VRDELAY_NULLCHECK); 202 } else { 203 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 204 } 205 206 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 207 boundCheck(vref, 1, false, 208 vindex, 2, false, 209 2); 210 cancelVRFreeDelayRequest(vref,VRDELAY_BOUNDCHECK); 211 cancelVRFreeDelayRequest(vindex,VRDELAY_BOUNDCHECK); 212 } else { 213 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 214 updateRefCount2(2, LowOpndRegType_gp, false); //update reference count for tmp2 215 } 216 217 if(flag == APUT_WIDE) { 218 get_virtual_reg(vA, OpndSize_64, 1, false); 219 } 220 else { 221 get_virtual_reg(vA, OpndSize_32, 4, false); 222 } 223 if(flag == APUT) 224 move_reg_to_mem_disp_scale(OpndSize_32, 4, false, 1, false, offArrayObject_contents, 2, false, 4); 225 else if(flag == APUT_WIDE) 226 move_reg_to_mem_disp_scale(OpndSize_64, 1, false, 1, false, offArrayObject_contents, 2, false, 8); 227 else if(flag == APUT_CHAR || flag == APUT_SHORT) 228 move_reg_to_mem_disp_scale(OpndSize_16, 4, false, 1, false, offArrayObject_contents, 2, false, 2); 229 else if(flag == APUT_BOOLEAN || flag == APUT_BYTE) 230 move_reg_to_mem_disp_scale(OpndSize_8, 4, false, 1, false, offArrayObject_contents, 2, false, 1); 231 ////////////////////////////////// 232 return 0; 233 } 234 //! wrapper to call either aput_common_helper or aput_common_nohelper 235 236 //! 237 int aput_common(int flag, u2 vA, u2 vref, u2 vindex) { 238 return aput_common_nohelper(flag, vA, vref, vindex); 239 } 240 #undef P_GPR_1 241 #undef P_GPR_2 242 #undef P_GPR_3 243 #undef P_GPR_4 244 //! lower bytecode APUT by calling aput_common 245 246 //! 247 int op_aput() { 248 u2 vA = INST_AA(inst); 249 u2 vref = FETCH(1) & 0xff; 250 u2 vindex = FETCH(1) >> 8; 251 int retval = aput_common(APUT, vA, vref, vindex); 252 rPC += 2; 253 return retval; 254 } 255 //! lower bytecode APUT_WIDE by calling aput_common 256 257 //! 258 int op_aput_wide() { 259 u2 vA = INST_AA(inst); 260 u2 vref = FETCH(1) & 0xff; 261 u2 vindex = FETCH(1) >> 8; 262 int retval = aput_common(APUT_WIDE, vA, vref, vindex); 263 rPC += 2; 264 return retval; 265 } 266 //! lower bytecode APUT_BOOLEAN by calling aput_common 267 268 //! 269 int op_aput_boolean() { 270 u2 vA = INST_AA(inst); 271 u2 vref = FETCH(1) & 0xff; 272 u2 vindex = FETCH(1) >> 8; 273 int retval = aput_common(APUT_BOOLEAN, vA, vref, vindex); 274 rPC += 2; 275 return retval; 276 } 277 //! lower bytecode APUT_BYTE by calling aput_common 278 279 //! 280 int op_aput_byte() { 281 u2 vA = INST_AA(inst); 282 u2 vref = FETCH(1) & 0xff; 283 u2 vindex = FETCH(1) >> 8; 284 int retval = aput_common(APUT_BYTE, vA, vref, vindex); 285 rPC += 2; 286 return retval; 287 } 288 //! lower bytecode APUT_CHAR by calling aput_common 289 290 //! 291 int op_aput_char() { 292 u2 vA = INST_AA(inst); 293 u2 vref = FETCH(1) & 0xff; 294 u2 vindex = FETCH(1) >> 8; 295 int retval = aput_common(APUT_CHAR, vA, vref, vindex); 296 rPC += 2; 297 return retval; 298 } 299 //! lower bytecode APUT_SHORT by calling aput_common 300 301 //! 302 int op_aput_short() { 303 u2 vA = INST_AA(inst); 304 u2 vref = FETCH(1) & 0xff; 305 u2 vindex = FETCH(1) >> 8; 306 int retval = aput_common(APUT_SHORT, vA, vref, vindex); 307 rPC += 2; 308 return retval; 309 } 310 311 #define P_GPR_1 PhysicalReg_EBX //callee-saved valid after CanPutArray 312 #define P_GPR_2 PhysicalReg_ECX 313 #define P_GPR_3 PhysicalReg_ESI //callee-saved 314 #define P_SCRATCH_1 PhysicalReg_EDX 315 #define P_SCRATCH_2 PhysicalReg_EAX 316 #define P_SCRATCH_3 PhysicalReg_EDX 317 318 void markCard_notNull(int tgtAddrReg, int scratchReg, bool isPhysical); 319 320 //! lower bytecode APUT_OBJECT 321 322 //! Lower the bytecode using helper function ".aput_obj_helper" if helper switch is on 323 int op_aput_object() { //type checking 324 u2 vA = INST_AA(inst); 325 u2 vref = FETCH(1) & 0xff; 326 u2 vindex = FETCH(1) >> 8; 327 328 /////////////////////////// 329 // Request VR free delays before register allocation for the temporaries 330 // No need to request delay for vA since it will be transferred to temporary 331 // after the null check and bound check. 332 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) 333 requestVRFreeDelay(vref,VRDELAY_NULLCHECK); 334 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 335 requestVRFreeDelay(vref,VRDELAY_BOUNDCHECK); 336 requestVRFreeDelay(vindex,VRDELAY_BOUNDCHECK); 337 } 338 339 get_virtual_reg(vref, OpndSize_32, 1, false); //array 340 export_pc(); //use %edx 341 342 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_NULL_CHECK)) { 343 compare_imm_reg(OpndSize_32, 0, 1, false); 344 conditional_jump_global_API(Condition_E, "common_errNullObject", false); 345 cancelVRFreeDelayRequest(vref,VRDELAY_NULLCHECK); 346 } else { 347 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 348 } 349 350 get_virtual_reg(vindex, OpndSize_32, 2, false); //index 351 if(!(traceCurrentMIR->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) { 352 compare_mem_reg(OpndSize_32, offArrayObject_length, 1, false, 2, false); 353 conditional_jump_global_API(Condition_NC, "common_errArrayIndex", false); 354 cancelVRFreeDelayRequest(vref,VRDELAY_BOUNDCHECK); 355 cancelVRFreeDelayRequest(vindex,VRDELAY_BOUNDCHECK); 356 } else { 357 updateRefCount2(1, LowOpndRegType_gp, false); //update reference count for tmp1 358 updateRefCount2(2, LowOpndRegType_gp, false); //update reference count for tmp2 359 } 360 361 get_virtual_reg(vA, OpndSize_32, 4, false); 362 compare_imm_reg(OpndSize_32, 0, 4, false); 363 conditional_jump(Condition_E, ".aput_object_skip_check", true); 364 rememberState(1); 365 move_mem_to_reg(OpndSize_32, offObject_clazz, 4, false, 5, false); 366 load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 367 move_reg_to_mem(OpndSize_32, 5, false, 0, PhysicalReg_ESP, true); 368 move_mem_to_reg(OpndSize_32, offObject_clazz, 1, false, 6, false); 369 move_reg_to_mem(OpndSize_32, 6, false, 4, PhysicalReg_ESP, true); 370 371 scratchRegs[0] = PhysicalReg_SCRATCH_1; 372 call_dvmCanPutArrayElement(); //scratch?? 373 load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 374 compare_imm_reg(OpndSize_32, 0, PhysicalReg_EAX, true); 375 conditional_jump_global_API(Condition_E, "common_errArrayStore", false); 376 377 //NOTE: "2, false" is live through function call 378 move_reg_to_mem_disp_scale(OpndSize_32, 4, false, 1, false, offArrayObject_contents, 2, false, 4); 379 markCard_notNull(1, 11, false); 380 rememberState(2); 381 ////TODO NCG O1 + code cache 382 unconditional_jump(".aput_object_after_check", true); 383 384 insertLabel(".aput_object_skip_check", true); 385 goToState(1); 386 //NOTE: "2, false" is live through function call 387 move_reg_to_mem_disp_scale(OpndSize_32, 4, false, 1, false, offArrayObject_contents, 2, false, 4); 388 389 transferToState(2); 390 insertLabel(".aput_object_after_check", true); 391 /////////////////////////////// 392 rPC += 2; 393 return 0; 394 } 395 #undef P_GPR_1 396 #undef P_GPR_2 397 #undef P_GPR_3 398 #undef P_SCRATCH_1 399 #undef P_SCRATCH_2 400 #undef P_SCRATCH_3 401 402 ////////////////////////////////////////// 403 #define P_GPR_1 PhysicalReg_ECX 404 #define P_GPR_2 PhysicalReg_EBX //should be callee-saved to avoid overwritten by inst_field_resolve 405 #define P_GPR_3 PhysicalReg_ESI 406 #define P_SCRATCH_1 PhysicalReg_EDX 407 408 /* 409 movl offThread_cardTable(self), scratchReg 410 compare_imm_reg 0, valReg (testl valReg, valReg) 411 je .markCard_skip 412 shrl $GC_CARD_SHIFT, tgtAddrReg 413 movb %, (scratchReg, tgtAddrReg) 414 NOTE: scratchReg can be accessed with the corresponding byte 415 tgtAddrReg will be updated 416 for O1, update the corresponding reference count 417 */ 418 void markCard(int valReg, int tgtAddrReg, bool targetPhysical, int scratchReg, bool isPhysical) { 419 get_self_pointer(PhysicalReg_SCRATCH_6, isScratchPhysical); 420 move_mem_to_reg(OpndSize_32, offsetof(Thread, cardTable), PhysicalReg_SCRATCH_6, isScratchPhysical, scratchReg, isPhysical); 421 compare_imm_reg(OpndSize_32, 0, valReg, isPhysical); 422 conditional_jump(Condition_E, ".markCard_skip", true); 423 alu_binary_imm_reg(OpndSize_32, shr_opc, GC_CARD_SHIFT, tgtAddrReg, targetPhysical); 424 move_reg_to_mem_disp_scale(OpndSize_8, scratchReg, isPhysical, scratchReg, isPhysical, 0, tgtAddrReg, targetPhysical, 1); 425 insertLabel(".markCard_skip", true); 426 } 427 428 void markCard_notNull(int tgtAddrReg, int scratchReg, bool isPhysical) { 429 get_self_pointer(PhysicalReg_SCRATCH_2, isScratchPhysical); 430 move_mem_to_reg(OpndSize_32, offsetof(Thread, cardTable), PhysicalReg_SCRATCH_2, isScratchPhysical, scratchReg, isPhysical); 431 alu_binary_imm_reg(OpndSize_32, shr_opc, GC_CARD_SHIFT, tgtAddrReg, isPhysical); 432 move_reg_to_mem_disp_scale(OpndSize_8, scratchReg, isPhysical, scratchReg, isPhysical, 0, tgtAddrReg, isPhysical, 1); 433 } 434 435 void markCard_filled(int tgtAddrReg, bool isTgtPhysical, int scratchReg, bool isScratchPhysical) { 436 get_self_pointer(PhysicalReg_SCRATCH_2, false/*isPhysical*/); 437 move_mem_to_reg(OpndSize_32, offsetof(Thread, cardTable), PhysicalReg_SCRATCH_2, isScratchPhysical, scratchReg, isScratchPhysical); 438 alu_binary_imm_reg(OpndSize_32, shr_opc, GC_CARD_SHIFT, tgtAddrReg, isTgtPhysical); 439 move_reg_to_mem_disp_scale(OpndSize_8, scratchReg, isScratchPhysical, scratchReg, isScratchPhysical, 0, tgtAddrReg, isTgtPhysical, 1); 440 } 441 //! LOWER bytecode IGET,IPUT without usage of helper function 442 443 //! It has null check and calls assembly function inst_field_resolve 444 int iget_iput_common_nohelper(int tmp, int flag, u2 vA, u2 vB, int isObj, bool isVolatile) { 445 #ifdef WITH_JIT_INLINING 446 const Method *method = (traceCurrentMIR->OptimizationFlags & MIR_CALLEE) ? 447 traceCurrentMIR->meta.calleeMethod : currentMethod; 448 InstField *pInstField = (InstField *) 449 method->clazz->pDvmDex->pResFields[tmp]; 450 #else 451 InstField *pInstField = (InstField *) 452 currentMethod->clazz->pDvmDex->pResFields[tmp]; 453 #endif 454 int fieldOffset; 455 456 assert(pInstField != NULL); 457 fieldOffset = pInstField->byteOffset; 458 move_imm_to_reg(OpndSize_32, fieldOffset, 8, false); 459 // Request VR delay before transfer to temporary. Only vB needs delay. 460 // vA will have non-zero reference count since transfer to temporary for 461 // it happens after null check, thus no delay is needed. 462 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); 463 get_virtual_reg(vB, OpndSize_32, 7, false); 464 nullCheck(7, false, 2, vB); //maybe optimized away, if not, call 465 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 466 if(flag == IGET) { 467 move_mem_scale_to_reg(OpndSize_32, 7, false, 8, false, 1, 9, false); 468 set_virtual_reg(vA, OpndSize_32, 9, false); 469 #ifdef DEBUG_IGET_OBJ 470 if(isObj > 0) { 471 pushAllRegs(); 472 load_effective_addr(-16, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 473 move_reg_to_mem(OpndSize_32, 9, false, 12, PhysicalReg_ESP, true); //field 474 move_reg_to_mem(OpndSize_32, 7, false, 8, PhysicalReg_ESP, true); //object 475 move_imm_to_mem(OpndSize_32, tmp, 4, PhysicalReg_ESP, true); //field 476 move_imm_to_mem(OpndSize_32, 0, 0, PhysicalReg_ESP, true); //iget 477 call_dvmDebugIgetIput(); 478 load_effective_addr(16, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 479 popAllRegs(); 480 } 481 #endif 482 } else if(flag == IGET_WIDE) { 483 if(isVolatile) { 484 /* call dvmQuasiAtomicRead64(addr) */ 485 load_effective_addr(fieldOffset, 7, false, 9, false); 486 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //1st argument 487 load_effective_addr(-4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 488 nextVersionOfHardReg(PhysicalReg_EAX, 2); 489 nextVersionOfHardReg(PhysicalReg_EDX, 2); 490 scratchRegs[0] = PhysicalReg_SCRATCH_3; 491 call_dvmQuasiAtomicRead64(); 492 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 493 //memory content in %edx, %eax 494 set_virtual_reg(vA, OpndSize_32, PhysicalReg_EAX, true); 495 set_virtual_reg(vA+1, OpndSize_32, PhysicalReg_EDX, true); 496 } else { 497 move_mem_scale_to_reg(OpndSize_64, 7, false, 8, false, 1, 1, false); //access field 498 set_virtual_reg(vA, OpndSize_64, 1, false); 499 } 500 } else if(flag == IPUT) { 501 get_virtual_reg(vA, OpndSize_32, 9, false); 502 move_reg_to_mem_scale(OpndSize_32, 9, false, 7, false, 8, false, 1); //access field 503 if(isObj) { 504 markCard(9, 7, false, 11, false); 505 } 506 } else if(flag == IPUT_WIDE) { 507 get_virtual_reg(vA, OpndSize_64, 1, false); 508 if(isVolatile) { 509 /* call dvmQuasiAtomicSwap64(val, addr) */ 510 load_effective_addr(fieldOffset, 7, false, 9, false); 511 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //2nd argument 512 move_reg_to_mem(OpndSize_64, 1, false, -12, PhysicalReg_ESP, true); //1st argument 513 load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 514 scratchRegs[0] = PhysicalReg_SCRATCH_3; 515 call_dvmQuasiAtomicSwap64(); 516 load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 517 } 518 else { 519 move_reg_to_mem_scale(OpndSize_64, 1, false, 7, false, 8, false, 1); 520 } 521 } 522 /////////////////////////// 523 return 0; 524 } 525 //! wrapper to call either iget_iput_common_helper or iget_iput_common_nohelper 526 527 //! 528 int iget_iput_common(int tmp, int flag, u2 vA, u2 vB, int isObj, bool isVolatile) { 529 return iget_iput_common_nohelper(tmp, flag, vA, vB, isObj, isVolatile); 530 } 531 #undef P_GPR_1 532 #undef P_GPR_2 533 #undef P_GPR_3 534 #undef P_SCRATCH_1 535 //! lower bytecode IGET by calling iget_iput_common 536 537 //! 538 int op_iget() { 539 u2 vA = INST_A(inst); 540 u2 vB = INST_B(inst); 541 u2 tmp = FETCH(1); 542 int retval = iget_iput_common(tmp, IGET, vA, vB, 0, false); 543 rPC += 2; 544 return retval; 545 } 546 //! lower bytecode IGET_WIDE by calling iget_iput_common 547 548 //! 549 int op_iget_wide(bool isVolatile) { 550 u2 vA = INST_A(inst); 551 u2 vB = INST_B(inst); 552 u2 tmp = FETCH(1); 553 int retval = iget_iput_common(tmp, IGET_WIDE, vA, vB, 0, isVolatile); 554 rPC += 2; 555 return retval; 556 } 557 //! lower bytecode IGET_OBJECT by calling iget_iput_common 558 559 //! 560 int op_iget_object() { 561 u2 vA = INST_A(inst); 562 u2 vB = INST_B(inst); 563 u2 tmp = FETCH(1); 564 int retval = iget_iput_common(tmp, IGET, vA, vB, 1, false); 565 rPC += 2; 566 return retval; 567 } 568 //! lower bytecode IGET_BOOLEAN by calling iget_iput_common 569 570 //! 571 int op_iget_boolean() { 572 return op_iget(); 573 } 574 //! lower bytecode IGET_BYTE by calling iget_iput_common 575 576 //! 577 int op_iget_byte() { 578 return op_iget(); 579 } 580 //! lower bytecode IGET_CHAR by calling iget_iput_common 581 582 //! 583 int op_iget_char() { 584 return op_iget(); 585 } 586 //! lower bytecode IGET_SHORT by calling iget_iput_common 587 588 //! 589 int op_iget_short() { 590 return op_iget(); 591 } 592 //! lower bytecode IPUT by calling iget_iput_common 593 594 //! 595 int op_iput() { 596 u2 vA = INST_A(inst); 597 u2 vB = INST_B(inst); 598 u2 tmp = FETCH(1); 599 int retval = iget_iput_common(tmp, IPUT, vA, vB, 0, false); 600 rPC += 2; 601 return retval; 602 } 603 //! lower bytecode IPUT_WIDE by calling iget_iput_common 604 605 //! 606 int op_iput_wide(bool isVolatile) { 607 u2 vA = INST_A(inst); 608 u2 vB = INST_B(inst); 609 u2 tmp = FETCH(1); 610 int retval = iget_iput_common(tmp, IPUT_WIDE, vA, vB, 0, isVolatile); 611 rPC += 2; 612 return retval; 613 } 614 //! lower bytecode IPUT_OBJECT by calling iget_iput_common 615 616 //! 617 int op_iput_object() { 618 u2 vA = INST_A(inst); 619 u2 vB = INST_B(inst); 620 u2 tmp = FETCH(1); 621 int retval = iget_iput_common(tmp, IPUT, vA, vB, 1, false); 622 rPC += 2; 623 return retval; 624 } 625 //! lower bytecode IPUT_BOOLEAN by calling iget_iput_common 626 627 //! 628 int op_iput_boolean() { 629 return op_iput(); 630 } 631 //! lower bytecode IPUT_BYTE by calling iget_iput_common 632 633 //! 634 int op_iput_byte() { 635 return op_iput(); 636 } 637 //! lower bytecode IPUT_CHAR by calling iget_iput_common 638 639 //! 640 int op_iput_char() { 641 return op_iput(); 642 } 643 //! lower bytecode IPUT_SHORT by calling iget_iput_common 644 645 //! 646 int op_iput_short() { 647 return op_iput(); 648 } 649 650 #define P_GPR_1 PhysicalReg_EBX 651 #define P_GPR_2 PhysicalReg_ECX 652 #define P_GPR_3 PhysicalReg_EDX //used by helper only 653 654 //! common section to lower IGET & IPUT 655 656 //! It will use helper function sget_helper if the switch is on 657 int sget_sput_common(int flag, u2 vA, u2 tmp, bool isObj, bool isVolatile) { 658 //call assembly static_field_resolve 659 //no exception 660 //glue: get_res_fields 661 //hard-coded: eax (one version?) 662 ////////////////////////////////////////// 663 #ifdef WITH_JIT_INLINING 664 const Method *method = (traceCurrentMIR->OptimizationFlags & MIR_CALLEE) ? traceCurrentMIR->meta.calleeMethod : currentMethod; 665 void *fieldPtr = (void*) 666 (method->clazz->pDvmDex->pResFields[tmp]); 667 #else 668 void *fieldPtr = (void*) 669 (currentMethod->clazz->pDvmDex->pResFields[tmp]); 670 #endif 671 assert(fieldPtr != NULL); 672 move_imm_to_reg(OpndSize_32, (int)fieldPtr, PhysicalReg_EAX, true); 673 if(flag == SGET) { 674 move_mem_to_reg(OpndSize_32, offStaticField_value, PhysicalReg_EAX, true, 7, false); //access field 675 set_virtual_reg(vA, OpndSize_32, 7, false); 676 } else if(flag == SGET_WIDE) { 677 if(isVolatile) { 678 /* call dvmQuasiAtomicRead64(addr) */ 679 load_effective_addr(offStaticField_value, PhysicalReg_EAX, true, 9, false); 680 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //1st argument 681 load_effective_addr(-4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 682 nextVersionOfHardReg(PhysicalReg_EAX, 2); 683 nextVersionOfHardReg(PhysicalReg_EDX, 2); 684 scratchRegs[0] = PhysicalReg_SCRATCH_3; 685 call_dvmQuasiAtomicRead64(); 686 load_effective_addr(4, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 687 //memory content in %edx, %eax 688 set_virtual_reg(vA, OpndSize_32, PhysicalReg_EAX, true); 689 set_virtual_reg(vA+1, OpndSize_32, PhysicalReg_EDX, true); 690 } 691 else { 692 move_mem_to_reg(OpndSize_64, offStaticField_value, PhysicalReg_EAX, true, 1, false); //access field 693 set_virtual_reg(vA, OpndSize_64, 1, false); 694 } 695 } else if(flag == SPUT) { 696 get_virtual_reg(vA, OpndSize_32, 7, false); 697 move_reg_to_mem(OpndSize_32, 7, false, offStaticField_value, PhysicalReg_EAX, true); //access field 698 if(isObj) { 699 /* get clazz object, then use clazz object to mark card */ 700 move_mem_to_reg(OpndSize_32, offField_clazz, PhysicalReg_EAX, true, 12, false); 701 markCard(7/*valReg*/, 12, false, 11, false); 702 } 703 } else if(flag == SPUT_WIDE) { 704 get_virtual_reg(vA, OpndSize_64, 1, false); 705 if(isVolatile) { 706 /* call dvmQuasiAtomicSwap64(val, addr) */ 707 load_effective_addr(offStaticField_value, PhysicalReg_EAX, true, 9, false); 708 move_reg_to_mem(OpndSize_32, 9, false, -4, PhysicalReg_ESP, true); //2nd argument 709 move_reg_to_mem(OpndSize_64, 1, false, -12, PhysicalReg_ESP, true); //1st argument 710 load_effective_addr(-12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 711 scratchRegs[0] = PhysicalReg_SCRATCH_3; 712 call_dvmQuasiAtomicSwap64(); 713 load_effective_addr(12, PhysicalReg_ESP, true, PhysicalReg_ESP, true); 714 } 715 else { 716 move_reg_to_mem(OpndSize_64, 1, false, offStaticField_value, PhysicalReg_EAX, true); //access field 717 } 718 } 719 ////////////////////////////////////////////// 720 return 0; 721 } 722 #undef P_GPR_1 723 #undef P_GPR_2 724 #undef P_GPR_3 725 //! lower bytecode SGET by calling sget_sput_common 726 727 //! 728 int op_sget() { 729 u2 vA = INST_AA(inst); 730 u2 tmp = FETCH(1); 731 int retval = sget_sput_common(SGET, vA, tmp, false, false); 732 rPC += 2; 733 return retval; 734 } 735 //! lower bytecode SGET_WIDE by calling sget_sput_common 736 737 //! 738 int op_sget_wide(bool isVolatile) { 739 u2 vA = INST_AA(inst); 740 u2 tmp = FETCH(1); 741 int retval = sget_sput_common(SGET_WIDE, vA, tmp, false, isVolatile); 742 rPC += 2; 743 return retval; 744 } 745 //! lower bytecode SGET_OBJECT by calling sget_sput_common 746 747 //! 748 int op_sget_object() { 749 return op_sget(); 750 } 751 //! lower bytecode SGET_BOOLEAN by calling sget_sput_common 752 753 //! 754 int op_sget_boolean() { 755 return op_sget(); 756 } 757 //! lower bytecode SGET_BYTE by calling sget_sput_common 758 759 //! 760 int op_sget_byte() { 761 return op_sget(); 762 } 763 //! lower bytecode SGET_CHAR by calling sget_sput_common 764 765 //! 766 int op_sget_char() { 767 return op_sget(); 768 } 769 //! lower bytecode SGET_SHORT by calling sget_sput_common 770 771 //! 772 int op_sget_short() { 773 return op_sget(); 774 } 775 //! lower bytecode SPUT by calling sget_sput_common 776 777 //! 778 int op_sput(bool isObj) { 779 u2 vA = INST_AA(inst); 780 u2 tmp = FETCH(1); 781 int retval = sget_sput_common(SPUT, vA, tmp, isObj, false); 782 rPC += 2; 783 return retval; 784 } 785 //! lower bytecode SPUT_WIDE by calling sget_sput_common 786 787 //! 788 int op_sput_wide(bool isVolatile) { 789 u2 vA = INST_AA(inst); 790 u2 tmp = FETCH(1); 791 int retval = sget_sput_common(SPUT_WIDE, vA, tmp, false, isVolatile); 792 rPC += 2; 793 return retval; 794 } 795 //! lower bytecode SPUT_OBJECT by calling sget_sput_common 796 797 //! 798 int op_sput_object() { 799 return op_sput(true); 800 } 801 //! lower bytecode SPUT_OBJECT by calling sget_sput_common 802 803 //! 804 int op_sput_boolean() { 805 return op_sput(false); 806 } 807 //! lower bytecode SPUT_BOOLEAN by calling sget_sput_common 808 809 //! 810 int op_sput_byte() { 811 return op_sput(false); 812 } 813 //! lower bytecode SPUT_BYTE by calling sget_sput_common 814 815 //! 816 int op_sput_char() { 817 return op_sput(false); 818 } 819 //! lower bytecode SPUT_SHORT by calling sget_sput_common 820 821 //! 822 int op_sput_short() { 823 return op_sput(false); 824 } 825 #define P_GPR_1 PhysicalReg_EBX 826 #define P_GPR_2 PhysicalReg_ECX 827 //! lower bytecode IGET_QUICK 828 829 //! 830 int op_iget_quick() { 831 u2 vA = INST_A(inst); 832 u2 vB = INST_B(inst); //object 833 u2 tmp = FETCH(1); 834 835 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); // Request VR delay before transfer to temporary 836 get_virtual_reg(vB, OpndSize_32, 1, false); 837 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 838 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 839 840 move_mem_to_reg(OpndSize_32, tmp, 1, false, 2, false); 841 set_virtual_reg(vA, OpndSize_32, 2, false); 842 rPC += 2; 843 return 0; 844 } 845 #undef P_GPR_1 846 #undef P_GPR_2 847 #define P_GPR_1 PhysicalReg_EBX 848 //! lower bytecode IGET_WIDE_QUICK 849 850 //! 851 int op_iget_wide_quick() { 852 u2 vA = INST_A(inst); 853 u2 vB = INST_B(inst); //object 854 u2 tmp = FETCH(1); 855 856 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); // Request VR delay before transfer to temporary 857 get_virtual_reg(vB, OpndSize_32, 1, false); 858 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 859 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 860 861 move_mem_to_reg(OpndSize_64, tmp, 1, false, 1, false); 862 set_virtual_reg(vA, OpndSize_64, 1, false); 863 rPC += 2; 864 return 0; 865 } 866 #undef P_GPR_1 867 //! lower bytecode IGET_OBJECT_QUICK 868 869 //! 870 int op_iget_object_quick() { 871 return op_iget_quick(); 872 } 873 #define P_GPR_1 PhysicalReg_EBX 874 #define P_GPR_2 PhysicalReg_ECX 875 //! lower bytecode IPUT_QUICK 876 877 //! 878 int iput_quick_common(bool isObj) { 879 u2 vA = INST_A(inst); 880 u2 vB = INST_B(inst); //object 881 u2 tmp = FETCH(1); 882 883 // Request VR delay before transfer to temporary. Only vB needs delay. 884 // vA will have non-zero reference count since transfer to temporary for 885 // it happens after null check, thus no delay is needed. 886 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); 887 get_virtual_reg(vB, OpndSize_32, 1, false); 888 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 889 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 890 891 get_virtual_reg(vA, OpndSize_32, 2, false); 892 move_reg_to_mem(OpndSize_32, 2, false, tmp, 1, false); 893 if(isObj) { 894 markCard(2/*valReg*/, 1, false, 11, false); 895 } 896 rPC += 2; 897 return 0; 898 } 899 int op_iput_quick() { 900 return iput_quick_common(false); 901 } 902 #undef P_GPR_1 903 #undef P_GPR_2 904 #define P_GPR_1 PhysicalReg_EBX 905 //! lower bytecode IPUT_WIDE_QUICK 906 907 //! 908 int op_iput_wide_quick() { 909 u2 vA = INST_A(inst); 910 u2 vB = INST_B(inst); //object 911 u2 tmp = FETCH(1); //byte offset 912 913 // Request VR delay before transfer to temporary. Only vB needs delay. 914 // vA will have non-zero reference count since transfer to temporary for 915 // it happens after null check, thus no delay is needed. 916 requestVRFreeDelay(vB,VRDELAY_NULLCHECK); 917 get_virtual_reg(vB, OpndSize_32, 1, false); 918 nullCheck(1, false, 1, vB); //maybe optimized away, if not, call 919 cancelVRFreeDelayRequest(vB,VRDELAY_NULLCHECK); 920 921 get_virtual_reg(vA, OpndSize_64, 1, false); 922 move_reg_to_mem(OpndSize_64, 1, false, tmp, 1, false); 923 rPC += 2; 924 return 0; 925 } 926 #undef P_GPR_1 927 //! lower bytecode IPUT_OBJECT_QUICK 928 929 //! 930 int op_iput_object_quick() { 931 return iput_quick_common(true); 932 } 933 934