1 /* 2 * Tiny Code Generator for QEMU 3 * 4 * Copyright (c) 2008 Fabrice Bellard 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 25 #ifndef NDEBUG 26 static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { 27 "%eax", 28 "%ecx", 29 "%edx", 30 "%ebx", 31 "%esp", 32 "%ebp", 33 "%esi", 34 "%edi", 35 }; 36 #endif 37 38 static const int tcg_target_reg_alloc_order[] = { 39 TCG_REG_EAX, 40 TCG_REG_EDX, 41 TCG_REG_ECX, 42 TCG_REG_EBX, 43 TCG_REG_ESI, 44 TCG_REG_EDI, 45 TCG_REG_EBP, 46 }; 47 48 static const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX }; 49 static const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX }; 50 51 static uint8_t *tb_ret_addr; 52 53 static void patch_reloc(uint8_t *code_ptr, int type, 54 tcg_target_long value, tcg_target_long addend) 55 { 56 value += addend; 57 switch(type) { 58 case R_386_32: 59 *(uint32_t *)code_ptr = value; 60 break; 61 case R_386_PC32: 62 *(uint32_t *)code_ptr = value - (long)code_ptr; 63 break; 64 default: 65 tcg_abort(); 66 } 67 } 68 69 /* maximum number of register used for input function arguments */ 70 static inline int tcg_target_get_call_iarg_regs_count(int flags) 71 { 72 flags &= TCG_CALL_TYPE_MASK; 73 switch(flags) { 74 case TCG_CALL_TYPE_STD: 75 return 0; 76 case TCG_CALL_TYPE_REGPARM_1: 77 case TCG_CALL_TYPE_REGPARM_2: 78 case TCG_CALL_TYPE_REGPARM: 79 return flags - TCG_CALL_TYPE_REGPARM_1 + 1; 80 default: 81 tcg_abort(); 82 } 83 } 84 85 /* parse target specific constraints */ 86 static int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str) 87 { 88 const char *ct_str; 89 90 ct_str = *pct_str; 91 switch(ct_str[0]) { 92 case 'a': 93 ct->ct |= TCG_CT_REG; 94 tcg_regset_set_reg(ct->u.regs, TCG_REG_EAX); 95 break; 96 case 'b': 97 ct->ct |= TCG_CT_REG; 98 tcg_regset_set_reg(ct->u.regs, TCG_REG_EBX); 99 break; 100 case 'c': 101 ct->ct |= TCG_CT_REG; 102 tcg_regset_set_reg(ct->u.regs, TCG_REG_ECX); 103 break; 104 case 'd': 105 ct->ct |= TCG_CT_REG; 106 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDX); 107 break; 108 case 'S': 109 ct->ct |= TCG_CT_REG; 110 tcg_regset_set_reg(ct->u.regs, TCG_REG_ESI); 111 break; 112 case 'D': 113 ct->ct |= TCG_CT_REG; 114 tcg_regset_set_reg(ct->u.regs, TCG_REG_EDI); 115 break; 116 case 'q': 117 ct->ct |= TCG_CT_REG; 118 tcg_regset_set32(ct->u.regs, 0, 0xf); 119 break; 120 case 'r': 121 ct->ct |= TCG_CT_REG; 122 tcg_regset_set32(ct->u.regs, 0, 0xff); 123 break; 124 125 /* qemu_ld/st address constraint */ 126 case 'L': 127 ct->ct |= TCG_CT_REG; 128 tcg_regset_set32(ct->u.regs, 0, 0xff); 129 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EAX); 130 tcg_regset_reset_reg(ct->u.regs, TCG_REG_EDX); 131 break; 132 default: 133 return -1; 134 } 135 ct_str++; 136 *pct_str = ct_str; 137 return 0; 138 } 139 140 /* test if a constant matches the constraint */ 141 static inline int tcg_target_const_match(tcg_target_long val, 142 const TCGArgConstraint *arg_ct) 143 { 144 int ct; 145 ct = arg_ct->ct; 146 if (ct & TCG_CT_CONST) 147 return 1; 148 else 149 return 0; 150 } 151 152 #define ARITH_ADD 0 153 #define ARITH_OR 1 154 #define ARITH_ADC 2 155 #define ARITH_SBB 3 156 #define ARITH_AND 4 157 #define ARITH_SUB 5 158 #define ARITH_XOR 6 159 #define ARITH_CMP 7 160 161 #define SHIFT_ROL 0 162 #define SHIFT_ROR 1 163 #define SHIFT_SHL 4 164 #define SHIFT_SHR 5 165 #define SHIFT_SAR 7 166 167 #define JCC_JMP (-1) 168 #define JCC_JO 0x0 169 #define JCC_JNO 0x1 170 #define JCC_JB 0x2 171 #define JCC_JAE 0x3 172 #define JCC_JE 0x4 173 #define JCC_JNE 0x5 174 #define JCC_JBE 0x6 175 #define JCC_JA 0x7 176 #define JCC_JS 0x8 177 #define JCC_JNS 0x9 178 #define JCC_JP 0xa 179 #define JCC_JNP 0xb 180 #define JCC_JL 0xc 181 #define JCC_JGE 0xd 182 #define JCC_JLE 0xe 183 #define JCC_JG 0xf 184 185 #define P_EXT 0x100 /* 0x0f opcode prefix */ 186 187 static const uint8_t tcg_cond_to_jcc[10] = { 188 [TCG_COND_EQ] = JCC_JE, 189 [TCG_COND_NE] = JCC_JNE, 190 [TCG_COND_LT] = JCC_JL, 191 [TCG_COND_GE] = JCC_JGE, 192 [TCG_COND_LE] = JCC_JLE, 193 [TCG_COND_GT] = JCC_JG, 194 [TCG_COND_LTU] = JCC_JB, 195 [TCG_COND_GEU] = JCC_JAE, 196 [TCG_COND_LEU] = JCC_JBE, 197 [TCG_COND_GTU] = JCC_JA, 198 }; 199 200 static inline void tcg_out_opc(TCGContext *s, int opc) 201 { 202 if (opc & P_EXT) 203 tcg_out8(s, 0x0f); 204 tcg_out8(s, opc); 205 } 206 207 static inline void tcg_out_modrm(TCGContext *s, int opc, int r, int rm) 208 { 209 tcg_out_opc(s, opc); 210 tcg_out8(s, 0xc0 | (r << 3) | rm); 211 } 212 213 /* rm == -1 means no register index */ 214 static inline void tcg_out_modrm_offset(TCGContext *s, int opc, int r, int rm, 215 int32_t offset) 216 { 217 tcg_out_opc(s, opc); 218 if (rm == -1) { 219 tcg_out8(s, 0x05 | (r << 3)); 220 tcg_out32(s, offset); 221 } else if (offset == 0 && rm != TCG_REG_EBP) { 222 if (rm == TCG_REG_ESP) { 223 tcg_out8(s, 0x04 | (r << 3)); 224 tcg_out8(s, 0x24); 225 } else { 226 tcg_out8(s, 0x00 | (r << 3) | rm); 227 } 228 } else if ((int8_t)offset == offset) { 229 if (rm == TCG_REG_ESP) { 230 tcg_out8(s, 0x44 | (r << 3)); 231 tcg_out8(s, 0x24); 232 } else { 233 tcg_out8(s, 0x40 | (r << 3) | rm); 234 } 235 tcg_out8(s, offset); 236 } else { 237 if (rm == TCG_REG_ESP) { 238 tcg_out8(s, 0x84 | (r << 3)); 239 tcg_out8(s, 0x24); 240 } else { 241 tcg_out8(s, 0x80 | (r << 3) | rm); 242 } 243 tcg_out32(s, offset); 244 } 245 } 246 247 static inline void tcg_out_mov(TCGContext *s, int ret, int arg) 248 { 249 if (arg != ret) 250 tcg_out_modrm(s, 0x8b, ret, arg); 251 } 252 253 static inline void tcg_out_movi(TCGContext *s, TCGType type, 254 int ret, int32_t arg) 255 { 256 if (arg == 0) { 257 /* xor r0,r0 */ 258 tcg_out_modrm(s, 0x01 | (ARITH_XOR << 3), ret, ret); 259 } else { 260 tcg_out8(s, 0xb8 + ret); 261 tcg_out32(s, arg); 262 } 263 } 264 265 static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret, 266 int arg1, tcg_target_long arg2) 267 { 268 /* movl */ 269 tcg_out_modrm_offset(s, 0x8b, ret, arg1, arg2); 270 } 271 272 static inline void tcg_out_st(TCGContext *s, TCGType type, int arg, 273 int arg1, tcg_target_long arg2) 274 { 275 /* movl */ 276 tcg_out_modrm_offset(s, 0x89, arg, arg1, arg2); 277 } 278 279 static inline void tgen_arithi(TCGContext *s, int c, int r0, int32_t val, int cf) 280 { 281 if (!cf && ((c == ARITH_ADD && val == 1) || (c == ARITH_SUB && val == -1))) { 282 /* inc */ 283 tcg_out_opc(s, 0x40 + r0); 284 } else if (!cf && ((c == ARITH_ADD && val == -1) || (c == ARITH_SUB && val == 1))) { 285 /* dec */ 286 tcg_out_opc(s, 0x48 + r0); 287 } else if (val == (int8_t)val) { 288 tcg_out_modrm(s, 0x83, c, r0); 289 tcg_out8(s, val); 290 } else if (c == ARITH_AND && val == 0xffu && r0 < 4) { 291 /* movzbl */ 292 tcg_out_modrm(s, 0xb6 | P_EXT, r0, r0); 293 } else if (c == ARITH_AND && val == 0xffffu) { 294 /* movzwl */ 295 tcg_out_modrm(s, 0xb7 | P_EXT, r0, r0); 296 } else { 297 tcg_out_modrm(s, 0x81, c, r0); 298 tcg_out32(s, val); 299 } 300 } 301 302 static void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val) 303 { 304 if (val != 0) 305 tgen_arithi(s, ARITH_ADD, reg, val, 0); 306 } 307 308 static void tcg_out_jxx(TCGContext *s, int opc, int label_index) 309 { 310 int32_t val, val1; 311 TCGLabel *l = &s->labels[label_index]; 312 313 if (l->has_value) { 314 val = l->u.value - (tcg_target_long)s->code_ptr; 315 val1 = val - 2; 316 if ((int8_t)val1 == val1) { 317 if (opc == -1) 318 tcg_out8(s, 0xeb); 319 else 320 tcg_out8(s, 0x70 + opc); 321 tcg_out8(s, val1); 322 } else { 323 if (opc == -1) { 324 tcg_out8(s, 0xe9); 325 tcg_out32(s, val - 5); 326 } else { 327 tcg_out8(s, 0x0f); 328 tcg_out8(s, 0x80 + opc); 329 tcg_out32(s, val - 6); 330 } 331 } 332 } else { 333 if (opc == -1) { 334 tcg_out8(s, 0xe9); 335 } else { 336 tcg_out8(s, 0x0f); 337 tcg_out8(s, 0x80 + opc); 338 } 339 tcg_out_reloc(s, s->code_ptr, R_386_PC32, label_index, -4); 340 s->code_ptr += 4; 341 } 342 } 343 344 static void tcg_out_brcond(TCGContext *s, int cond, 345 TCGArg arg1, TCGArg arg2, int const_arg2, 346 int label_index) 347 { 348 if (const_arg2) { 349 if (arg2 == 0) { 350 /* test r, r */ 351 tcg_out_modrm(s, 0x85, arg1, arg1); 352 } else { 353 tgen_arithi(s, ARITH_CMP, arg1, arg2, 0); 354 } 355 } else { 356 tcg_out_modrm(s, 0x01 | (ARITH_CMP << 3), arg2, arg1); 357 } 358 tcg_out_jxx(s, tcg_cond_to_jcc[cond], label_index); 359 } 360 361 /* XXX: we implement it at the target level to avoid having to 362 handle cross basic blocks temporaries */ 363 static void tcg_out_brcond2(TCGContext *s, 364 const TCGArg *args, const int *const_args) 365 { 366 int label_next; 367 label_next = gen_new_label(); 368 switch(args[4]) { 369 case TCG_COND_EQ: 370 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], label_next); 371 tcg_out_brcond(s, TCG_COND_EQ, args[1], args[3], const_args[3], args[5]); 372 break; 373 case TCG_COND_NE: 374 tcg_out_brcond(s, TCG_COND_NE, args[0], args[2], const_args[2], args[5]); 375 tcg_out_brcond(s, TCG_COND_NE, args[1], args[3], const_args[3], args[5]); 376 break; 377 case TCG_COND_LT: 378 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]); 379 tcg_out_jxx(s, JCC_JNE, label_next); 380 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]); 381 break; 382 case TCG_COND_LE: 383 tcg_out_brcond(s, TCG_COND_LT, args[1], args[3], const_args[3], args[5]); 384 tcg_out_jxx(s, JCC_JNE, label_next); 385 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]); 386 break; 387 case TCG_COND_GT: 388 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]); 389 tcg_out_jxx(s, JCC_JNE, label_next); 390 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]); 391 break; 392 case TCG_COND_GE: 393 tcg_out_brcond(s, TCG_COND_GT, args[1], args[3], const_args[3], args[5]); 394 tcg_out_jxx(s, JCC_JNE, label_next); 395 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]); 396 break; 397 case TCG_COND_LTU: 398 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]); 399 tcg_out_jxx(s, JCC_JNE, label_next); 400 tcg_out_brcond(s, TCG_COND_LTU, args[0], args[2], const_args[2], args[5]); 401 break; 402 case TCG_COND_LEU: 403 tcg_out_brcond(s, TCG_COND_LTU, args[1], args[3], const_args[3], args[5]); 404 tcg_out_jxx(s, JCC_JNE, label_next); 405 tcg_out_brcond(s, TCG_COND_LEU, args[0], args[2], const_args[2], args[5]); 406 break; 407 case TCG_COND_GTU: 408 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]); 409 tcg_out_jxx(s, JCC_JNE, label_next); 410 tcg_out_brcond(s, TCG_COND_GTU, args[0], args[2], const_args[2], args[5]); 411 break; 412 case TCG_COND_GEU: 413 tcg_out_brcond(s, TCG_COND_GTU, args[1], args[3], const_args[3], args[5]); 414 tcg_out_jxx(s, JCC_JNE, label_next); 415 tcg_out_brcond(s, TCG_COND_GEU, args[0], args[2], const_args[2], args[5]); 416 break; 417 default: 418 tcg_abort(); 419 } 420 tcg_out_label(s, label_next, (tcg_target_long)s->code_ptr); 421 } 422 423 #if defined(CONFIG_SOFTMMU) 424 425 #include "../../softmmu_defs.h" 426 427 static void *qemu_ld_helpers[4] = { 428 __ldb_mmu, 429 __ldw_mmu, 430 __ldl_mmu, 431 __ldq_mmu, 432 }; 433 434 static void *qemu_st_helpers[4] = { 435 __stb_mmu, 436 __stw_mmu, 437 __stl_mmu, 438 __stq_mmu, 439 }; 440 #endif 441 442 /* XXX: qemu_ld and qemu_st could be modified to clobber only EDX and 443 EAX. It will be useful once fixed registers globals are less 444 common. */ 445 static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, 446 int opc) 447 { 448 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap; 449 #if defined(CONFIG_SOFTMMU) 450 uint8_t *label1_ptr, *label2_ptr; 451 #endif 452 #if TARGET_LONG_BITS == 64 453 #if defined(CONFIG_SOFTMMU) 454 uint8_t *label3_ptr; 455 #endif 456 int addr_reg2; 457 #endif 458 459 data_reg = *args++; 460 if (opc == 3) 461 data_reg2 = *args++; 462 else 463 data_reg2 = 0; 464 addr_reg = *args++; 465 #if TARGET_LONG_BITS == 64 466 addr_reg2 = *args++; 467 #endif 468 mem_index = *args; 469 s_bits = opc & 3; 470 471 r0 = TCG_REG_EAX; 472 r1 = TCG_REG_EDX; 473 474 #if defined(CONFIG_SOFTMMU) 475 tcg_out_mov(s, r1, addr_reg); 476 477 tcg_out_mov(s, r0, addr_reg); 478 479 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */ 480 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 481 482 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */ 483 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); 484 485 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ 486 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); 487 488 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ 489 tcg_out8(s, 0x80 | (r1 << 3) | 0x04); 490 tcg_out8(s, (5 << 3) | r1); 491 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_read)); 492 493 /* cmp 0(r1), r0 */ 494 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0); 495 496 tcg_out_mov(s, r0, addr_reg); 497 498 #if TARGET_LONG_BITS == 32 499 /* je label1 */ 500 tcg_out8(s, 0x70 + JCC_JE); 501 label1_ptr = s->code_ptr; 502 s->code_ptr++; 503 #else 504 /* jne label3 */ 505 tcg_out8(s, 0x70 + JCC_JNE); 506 label3_ptr = s->code_ptr; 507 s->code_ptr++; 508 509 /* cmp 4(r1), addr_reg2 */ 510 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4); 511 512 /* je label1 */ 513 tcg_out8(s, 0x70 + JCC_JE); 514 label1_ptr = s->code_ptr; 515 s->code_ptr++; 516 517 /* label3: */ 518 *label3_ptr = s->code_ptr - label3_ptr - 1; 519 #endif 520 521 /* XXX: move that code at the end of the TB */ 522 #if TARGET_LONG_BITS == 32 523 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EDX, mem_index); 524 #else 525 tcg_out_mov(s, TCG_REG_EDX, addr_reg2); 526 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index); 527 #endif 528 tcg_out8(s, 0xe8); 529 tcg_out32(s, (tcg_target_long)qemu_ld_helpers[s_bits] - 530 (tcg_target_long)s->code_ptr - 4); 531 532 switch(opc) { 533 case 0 | 4: 534 /* movsbl */ 535 tcg_out_modrm(s, 0xbe | P_EXT, data_reg, TCG_REG_EAX); 536 break; 537 case 1 | 4: 538 /* movswl */ 539 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, TCG_REG_EAX); 540 break; 541 case 0: 542 /* movzbl */ 543 tcg_out_modrm(s, 0xb6 | P_EXT, data_reg, TCG_REG_EAX); 544 break; 545 case 1: 546 /* movzwl */ 547 tcg_out_modrm(s, 0xb7 | P_EXT, data_reg, TCG_REG_EAX); 548 break; 549 case 2: 550 default: 551 tcg_out_mov(s, data_reg, TCG_REG_EAX); 552 break; 553 case 3: 554 if (data_reg == TCG_REG_EDX) { 555 tcg_out_opc(s, 0x90 + TCG_REG_EDX); /* xchg %edx, %eax */ 556 tcg_out_mov(s, data_reg2, TCG_REG_EAX); 557 } else { 558 tcg_out_mov(s, data_reg, TCG_REG_EAX); 559 tcg_out_mov(s, data_reg2, TCG_REG_EDX); 560 } 561 break; 562 } 563 564 /* jmp label2 */ 565 tcg_out8(s, 0xeb); 566 label2_ptr = s->code_ptr; 567 s->code_ptr++; 568 569 /* label1: */ 570 *label1_ptr = s->code_ptr - label1_ptr - 1; 571 572 /* add x(r1), r0 */ 573 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) - 574 offsetof(CPUTLBEntry, addr_read)); 575 #else 576 r0 = addr_reg; 577 #endif 578 579 #ifdef TARGET_WORDS_BIGENDIAN 580 bswap = 1; 581 #else 582 bswap = 0; 583 #endif 584 switch(opc) { 585 case 0: 586 /* movzbl */ 587 tcg_out_modrm_offset(s, 0xb6 | P_EXT, data_reg, r0, 0); 588 break; 589 case 0 | 4: 590 /* movsbl */ 591 tcg_out_modrm_offset(s, 0xbe | P_EXT, data_reg, r0, 0); 592 break; 593 case 1: 594 /* movzwl */ 595 tcg_out_modrm_offset(s, 0xb7 | P_EXT, data_reg, r0, 0); 596 if (bswap) { 597 /* rolw $8, data_reg */ 598 tcg_out8(s, 0x66); 599 tcg_out_modrm(s, 0xc1, 0, data_reg); 600 tcg_out8(s, 8); 601 } 602 break; 603 case 1 | 4: 604 /* movswl */ 605 tcg_out_modrm_offset(s, 0xbf | P_EXT, data_reg, r0, 0); 606 if (bswap) { 607 /* rolw $8, data_reg */ 608 tcg_out8(s, 0x66); 609 tcg_out_modrm(s, 0xc1, 0, data_reg); 610 tcg_out8(s, 8); 611 612 /* movswl data_reg, data_reg */ 613 tcg_out_modrm(s, 0xbf | P_EXT, data_reg, data_reg); 614 } 615 break; 616 case 2: 617 /* movl (r0), data_reg */ 618 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0); 619 if (bswap) { 620 /* bswap */ 621 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT); 622 } 623 break; 624 case 3: 625 /* XXX: could be nicer */ 626 if (r0 == data_reg) { 627 r1 = TCG_REG_EDX; 628 if (r1 == data_reg) 629 r1 = TCG_REG_EAX; 630 tcg_out_mov(s, r1, r0); 631 r0 = r1; 632 } 633 if (!bswap) { 634 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 0); 635 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 4); 636 } else { 637 tcg_out_modrm_offset(s, 0x8b, data_reg, r0, 4); 638 tcg_out_opc(s, (0xc8 + data_reg) | P_EXT); 639 640 tcg_out_modrm_offset(s, 0x8b, data_reg2, r0, 0); 641 /* bswap */ 642 tcg_out_opc(s, (0xc8 + data_reg2) | P_EXT); 643 } 644 break; 645 default: 646 tcg_abort(); 647 } 648 649 #if defined(CONFIG_SOFTMMU) 650 /* label2: */ 651 *label2_ptr = s->code_ptr - label2_ptr - 1; 652 #endif 653 } 654 655 656 static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, 657 int opc) 658 { 659 int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap; 660 #if defined(CONFIG_SOFTMMU) 661 uint8_t *label1_ptr, *label2_ptr; 662 #endif 663 #if TARGET_LONG_BITS == 64 664 #if defined(CONFIG_SOFTMMU) 665 uint8_t *label3_ptr; 666 #endif 667 int addr_reg2; 668 #endif 669 670 data_reg = *args++; 671 if (opc == 3) 672 data_reg2 = *args++; 673 else 674 data_reg2 = 0; 675 addr_reg = *args++; 676 #if TARGET_LONG_BITS == 64 677 addr_reg2 = *args++; 678 #endif 679 mem_index = *args; 680 681 s_bits = opc; 682 683 r0 = TCG_REG_EAX; 684 r1 = TCG_REG_EDX; 685 686 #if defined(CONFIG_SOFTMMU) 687 tcg_out_mov(s, r1, addr_reg); 688 689 tcg_out_mov(s, r0, addr_reg); 690 691 tcg_out_modrm(s, 0xc1, 5, r1); /* shr $x, r1 */ 692 tcg_out8(s, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); 693 694 tcg_out_modrm(s, 0x81, 4, r0); /* andl $x, r0 */ 695 tcg_out32(s, TARGET_PAGE_MASK | ((1 << s_bits) - 1)); 696 697 tcg_out_modrm(s, 0x81, 4, r1); /* andl $x, r1 */ 698 tcg_out32(s, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS); 699 700 tcg_out_opc(s, 0x8d); /* lea offset(r1, %ebp), r1 */ 701 tcg_out8(s, 0x80 | (r1 << 3) | 0x04); 702 tcg_out8(s, (5 << 3) | r1); 703 tcg_out32(s, offsetof(CPUState, tlb_table[mem_index][0].addr_write)); 704 705 /* cmp 0(r1), r0 */ 706 tcg_out_modrm_offset(s, 0x3b, r0, r1, 0); 707 708 tcg_out_mov(s, r0, addr_reg); 709 710 #if TARGET_LONG_BITS == 32 711 /* je label1 */ 712 tcg_out8(s, 0x70 + JCC_JE); 713 label1_ptr = s->code_ptr; 714 s->code_ptr++; 715 #else 716 /* jne label3 */ 717 tcg_out8(s, 0x70 + JCC_JNE); 718 label3_ptr = s->code_ptr; 719 s->code_ptr++; 720 721 /* cmp 4(r1), addr_reg2 */ 722 tcg_out_modrm_offset(s, 0x3b, addr_reg2, r1, 4); 723 724 /* je label1 */ 725 tcg_out8(s, 0x70 + JCC_JE); 726 label1_ptr = s->code_ptr; 727 s->code_ptr++; 728 729 /* label3: */ 730 *label3_ptr = s->code_ptr - label3_ptr - 1; 731 #endif 732 733 /* XXX: move that code at the end of the TB */ 734 #if TARGET_LONG_BITS == 32 735 if (opc == 3) { 736 tcg_out_mov(s, TCG_REG_EDX, data_reg); 737 tcg_out_mov(s, TCG_REG_ECX, data_reg2); 738 tcg_out8(s, 0x6a); /* push Ib */ 739 tcg_out8(s, mem_index); 740 tcg_out8(s, 0xe8); 741 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 742 (tcg_target_long)s->code_ptr - 4); 743 tcg_out_addi(s, TCG_REG_ESP, 4); 744 } else { 745 switch(opc) { 746 case 0: 747 /* movzbl */ 748 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_EDX, data_reg); 749 break; 750 case 1: 751 /* movzwl */ 752 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_EDX, data_reg); 753 break; 754 case 2: 755 tcg_out_mov(s, TCG_REG_EDX, data_reg); 756 break; 757 } 758 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_ECX, mem_index); 759 tcg_out8(s, 0xe8); 760 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 761 (tcg_target_long)s->code_ptr - 4); 762 } 763 #else 764 if (opc == 3) { 765 tcg_out_mov(s, TCG_REG_EDX, addr_reg2); 766 tcg_out8(s, 0x6a); /* push Ib */ 767 tcg_out8(s, mem_index); 768 tcg_out_opc(s, 0x50 + data_reg2); /* push */ 769 tcg_out_opc(s, 0x50 + data_reg); /* push */ 770 tcg_out8(s, 0xe8); 771 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 772 (tcg_target_long)s->code_ptr - 4); 773 tcg_out_addi(s, TCG_REG_ESP, 12); 774 } else { 775 tcg_out_mov(s, TCG_REG_EDX, addr_reg2); 776 switch(opc) { 777 case 0: 778 /* movzbl */ 779 tcg_out_modrm(s, 0xb6 | P_EXT, TCG_REG_ECX, data_reg); 780 break; 781 case 1: 782 /* movzwl */ 783 tcg_out_modrm(s, 0xb7 | P_EXT, TCG_REG_ECX, data_reg); 784 break; 785 case 2: 786 tcg_out_mov(s, TCG_REG_ECX, data_reg); 787 break; 788 } 789 tcg_out8(s, 0x6a); /* push Ib */ 790 tcg_out8(s, mem_index); 791 tcg_out8(s, 0xe8); 792 tcg_out32(s, (tcg_target_long)qemu_st_helpers[s_bits] - 793 (tcg_target_long)s->code_ptr - 4); 794 tcg_out_addi(s, TCG_REG_ESP, 4); 795 } 796 #endif 797 798 /* jmp label2 */ 799 tcg_out8(s, 0xeb); 800 label2_ptr = s->code_ptr; 801 s->code_ptr++; 802 803 /* label1: */ 804 *label1_ptr = s->code_ptr - label1_ptr - 1; 805 806 /* add x(r1), r0 */ 807 tcg_out_modrm_offset(s, 0x03, r0, r1, offsetof(CPUTLBEntry, addend) - 808 offsetof(CPUTLBEntry, addr_write)); 809 #else 810 r0 = addr_reg; 811 #endif 812 813 #ifdef TARGET_WORDS_BIGENDIAN 814 bswap = 1; 815 #else 816 bswap = 0; 817 #endif 818 switch(opc) { 819 case 0: 820 /* movb */ 821 tcg_out_modrm_offset(s, 0x88, data_reg, r0, 0); 822 break; 823 case 1: 824 if (bswap) { 825 tcg_out_mov(s, r1, data_reg); 826 tcg_out8(s, 0x66); /* rolw $8, %ecx */ 827 tcg_out_modrm(s, 0xc1, 0, r1); 828 tcg_out8(s, 8); 829 data_reg = r1; 830 } 831 /* movw */ 832 tcg_out8(s, 0x66); 833 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); 834 break; 835 case 2: 836 if (bswap) { 837 tcg_out_mov(s, r1, data_reg); 838 /* bswap data_reg */ 839 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 840 data_reg = r1; 841 } 842 /* movl */ 843 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); 844 break; 845 case 3: 846 if (bswap) { 847 tcg_out_mov(s, r1, data_reg2); 848 /* bswap data_reg */ 849 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 850 tcg_out_modrm_offset(s, 0x89, r1, r0, 0); 851 tcg_out_mov(s, r1, data_reg); 852 /* bswap data_reg */ 853 tcg_out_opc(s, (0xc8 + r1) | P_EXT); 854 tcg_out_modrm_offset(s, 0x89, r1, r0, 4); 855 } else { 856 tcg_out_modrm_offset(s, 0x89, data_reg, r0, 0); 857 tcg_out_modrm_offset(s, 0x89, data_reg2, r0, 4); 858 } 859 break; 860 default: 861 tcg_abort(); 862 } 863 864 #if defined(CONFIG_SOFTMMU) 865 /* label2: */ 866 *label2_ptr = s->code_ptr - label2_ptr - 1; 867 #endif 868 } 869 870 static inline void tcg_out_op(TCGContext *s, int opc, 871 const TCGArg *args, const int *const_args) 872 { 873 int c; 874 875 switch(opc) { 876 case INDEX_op_exit_tb: 877 tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_EAX, args[0]); 878 tcg_out8(s, 0xe9); /* jmp tb_ret_addr */ 879 tcg_out32(s, tb_ret_addr - s->code_ptr - 4); 880 break; 881 case INDEX_op_goto_tb: 882 if (s->tb_jmp_offset) { 883 /* direct jump method */ 884 tcg_out8(s, 0xe9); /* jmp im */ 885 s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf; 886 tcg_out32(s, 0); 887 } else { 888 /* indirect jump method */ 889 /* jmp Ev */ 890 tcg_out_modrm_offset(s, 0xff, 4, -1, 891 (tcg_target_long)(s->tb_next + args[0])); 892 } 893 s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf; 894 break; 895 case INDEX_op_call: 896 if (const_args[0]) { 897 tcg_out8(s, 0xe8); 898 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); 899 } else { 900 tcg_out_modrm(s, 0xff, 2, args[0]); 901 } 902 break; 903 case INDEX_op_jmp: 904 if (const_args[0]) { 905 tcg_out8(s, 0xe9); 906 tcg_out32(s, args[0] - (tcg_target_long)s->code_ptr - 4); 907 } else { 908 tcg_out_modrm(s, 0xff, 4, args[0]); 909 } 910 break; 911 case INDEX_op_br: 912 tcg_out_jxx(s, JCC_JMP, args[0]); 913 break; 914 case INDEX_op_movi_i32: 915 tcg_out_movi(s, TCG_TYPE_I32, args[0], args[1]); 916 break; 917 case INDEX_op_ld8u_i32: 918 /* movzbl */ 919 tcg_out_modrm_offset(s, 0xb6 | P_EXT, args[0], args[1], args[2]); 920 break; 921 case INDEX_op_ld8s_i32: 922 /* movsbl */ 923 tcg_out_modrm_offset(s, 0xbe | P_EXT, args[0], args[1], args[2]); 924 break; 925 case INDEX_op_ld16u_i32: 926 /* movzwl */ 927 tcg_out_modrm_offset(s, 0xb7 | P_EXT, args[0], args[1], args[2]); 928 break; 929 case INDEX_op_ld16s_i32: 930 /* movswl */ 931 tcg_out_modrm_offset(s, 0xbf | P_EXT, args[0], args[1], args[2]); 932 break; 933 case INDEX_op_ld_i32: 934 /* movl */ 935 tcg_out_modrm_offset(s, 0x8b, args[0], args[1], args[2]); 936 break; 937 case INDEX_op_st8_i32: 938 /* movb */ 939 tcg_out_modrm_offset(s, 0x88, args[0], args[1], args[2]); 940 break; 941 case INDEX_op_st16_i32: 942 /* movw */ 943 tcg_out8(s, 0x66); 944 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); 945 break; 946 case INDEX_op_st_i32: 947 /* movl */ 948 tcg_out_modrm_offset(s, 0x89, args[0], args[1], args[2]); 949 break; 950 case INDEX_op_sub_i32: 951 c = ARITH_SUB; 952 goto gen_arith; 953 case INDEX_op_and_i32: 954 c = ARITH_AND; 955 goto gen_arith; 956 case INDEX_op_or_i32: 957 c = ARITH_OR; 958 goto gen_arith; 959 case INDEX_op_xor_i32: 960 c = ARITH_XOR; 961 goto gen_arith; 962 case INDEX_op_add_i32: 963 c = ARITH_ADD; 964 gen_arith: 965 if (const_args[2]) { 966 tgen_arithi(s, c, args[0], args[2], 0); 967 } else { 968 tcg_out_modrm(s, 0x01 | (c << 3), args[2], args[0]); 969 } 970 break; 971 case INDEX_op_mul_i32: 972 if (const_args[2]) { 973 int32_t val; 974 val = args[2]; 975 if (val == (int8_t)val) { 976 tcg_out_modrm(s, 0x6b, args[0], args[0]); 977 tcg_out8(s, val); 978 } else { 979 tcg_out_modrm(s, 0x69, args[0], args[0]); 980 tcg_out32(s, val); 981 } 982 } else { 983 tcg_out_modrm(s, 0xaf | P_EXT, args[0], args[2]); 984 } 985 break; 986 case INDEX_op_mulu2_i32: 987 tcg_out_modrm(s, 0xf7, 4, args[3]); 988 break; 989 case INDEX_op_div2_i32: 990 tcg_out_modrm(s, 0xf7, 7, args[4]); 991 break; 992 case INDEX_op_divu2_i32: 993 tcg_out_modrm(s, 0xf7, 6, args[4]); 994 break; 995 case INDEX_op_shl_i32: 996 c = SHIFT_SHL; 997 gen_shift32: 998 if (const_args[2]) { 999 if (args[2] == 1) { 1000 tcg_out_modrm(s, 0xd1, c, args[0]); 1001 } else { 1002 tcg_out_modrm(s, 0xc1, c, args[0]); 1003 tcg_out8(s, args[2]); 1004 } 1005 } else { 1006 tcg_out_modrm(s, 0xd3, c, args[0]); 1007 } 1008 break; 1009 case INDEX_op_shr_i32: 1010 c = SHIFT_SHR; 1011 goto gen_shift32; 1012 case INDEX_op_sar_i32: 1013 c = SHIFT_SAR; 1014 goto gen_shift32; 1015 case INDEX_op_rotl_i32: 1016 c = SHIFT_ROL; 1017 goto gen_shift32; 1018 case INDEX_op_rotr_i32: 1019 c = SHIFT_ROR; 1020 goto gen_shift32; 1021 1022 case INDEX_op_add2_i32: 1023 if (const_args[4]) 1024 tgen_arithi(s, ARITH_ADD, args[0], args[4], 1); 1025 else 1026 tcg_out_modrm(s, 0x01 | (ARITH_ADD << 3), args[4], args[0]); 1027 if (const_args[5]) 1028 tgen_arithi(s, ARITH_ADC, args[1], args[5], 1); 1029 else 1030 tcg_out_modrm(s, 0x01 | (ARITH_ADC << 3), args[5], args[1]); 1031 break; 1032 case INDEX_op_sub2_i32: 1033 if (const_args[4]) 1034 tgen_arithi(s, ARITH_SUB, args[0], args[4], 1); 1035 else 1036 tcg_out_modrm(s, 0x01 | (ARITH_SUB << 3), args[4], args[0]); 1037 if (const_args[5]) 1038 tgen_arithi(s, ARITH_SBB, args[1], args[5], 1); 1039 else 1040 tcg_out_modrm(s, 0x01 | (ARITH_SBB << 3), args[5], args[1]); 1041 break; 1042 case INDEX_op_brcond_i32: 1043 tcg_out_brcond(s, args[2], args[0], args[1], const_args[1], args[3]); 1044 break; 1045 case INDEX_op_brcond2_i32: 1046 tcg_out_brcond2(s, args, const_args); 1047 break; 1048 1049 case INDEX_op_bswap16_i32: 1050 tcg_out8(s, 0x66); 1051 tcg_out_modrm(s, 0xc1, SHIFT_ROL, args[0]); 1052 tcg_out8(s, 8); 1053 break; 1054 case INDEX_op_bswap32_i32: 1055 tcg_out_opc(s, (0xc8 + args[0]) | P_EXT); 1056 break; 1057 1058 case INDEX_op_neg_i32: 1059 tcg_out_modrm(s, 0xf7, 3, args[0]); 1060 break; 1061 1062 case INDEX_op_not_i32: 1063 tcg_out_modrm(s, 0xf7, 2, args[0]); 1064 break; 1065 1066 case INDEX_op_ext8s_i32: 1067 tcg_out_modrm(s, 0xbe | P_EXT, args[0], args[1]); 1068 break; 1069 case INDEX_op_ext16s_i32: 1070 tcg_out_modrm(s, 0xbf | P_EXT, args[0], args[1]); 1071 break; 1072 1073 case INDEX_op_qemu_ld8u: 1074 tcg_out_qemu_ld(s, args, 0); 1075 break; 1076 case INDEX_op_qemu_ld8s: 1077 tcg_out_qemu_ld(s, args, 0 | 4); 1078 break; 1079 case INDEX_op_qemu_ld16u: 1080 tcg_out_qemu_ld(s, args, 1); 1081 break; 1082 case INDEX_op_qemu_ld16s: 1083 tcg_out_qemu_ld(s, args, 1 | 4); 1084 break; 1085 case INDEX_op_qemu_ld32u: 1086 tcg_out_qemu_ld(s, args, 2); 1087 break; 1088 case INDEX_op_qemu_ld64: 1089 tcg_out_qemu_ld(s, args, 3); 1090 break; 1091 1092 case INDEX_op_qemu_st8: 1093 tcg_out_qemu_st(s, args, 0); 1094 break; 1095 case INDEX_op_qemu_st16: 1096 tcg_out_qemu_st(s, args, 1); 1097 break; 1098 case INDEX_op_qemu_st32: 1099 tcg_out_qemu_st(s, args, 2); 1100 break; 1101 case INDEX_op_qemu_st64: 1102 tcg_out_qemu_st(s, args, 3); 1103 break; 1104 1105 default: 1106 tcg_abort(); 1107 } 1108 } 1109 1110 static const TCGTargetOpDef x86_op_defs[] = { 1111 { INDEX_op_exit_tb, { } }, 1112 { INDEX_op_goto_tb, { } }, 1113 { INDEX_op_call, { "ri" } }, 1114 { INDEX_op_jmp, { "ri" } }, 1115 { INDEX_op_br, { } }, 1116 { INDEX_op_mov_i32, { "r", "r" } }, 1117 { INDEX_op_movi_i32, { "r" } }, 1118 { INDEX_op_ld8u_i32, { "r", "r" } }, 1119 { INDEX_op_ld8s_i32, { "r", "r" } }, 1120 { INDEX_op_ld16u_i32, { "r", "r" } }, 1121 { INDEX_op_ld16s_i32, { "r", "r" } }, 1122 { INDEX_op_ld_i32, { "r", "r" } }, 1123 { INDEX_op_st8_i32, { "q", "r" } }, 1124 { INDEX_op_st16_i32, { "r", "r" } }, 1125 { INDEX_op_st_i32, { "r", "r" } }, 1126 1127 { INDEX_op_add_i32, { "r", "0", "ri" } }, 1128 { INDEX_op_sub_i32, { "r", "0", "ri" } }, 1129 { INDEX_op_mul_i32, { "r", "0", "ri" } }, 1130 { INDEX_op_mulu2_i32, { "a", "d", "a", "r" } }, 1131 { INDEX_op_div2_i32, { "a", "d", "0", "1", "r" } }, 1132 { INDEX_op_divu2_i32, { "a", "d", "0", "1", "r" } }, 1133 { INDEX_op_and_i32, { "r", "0", "ri" } }, 1134 { INDEX_op_or_i32, { "r", "0", "ri" } }, 1135 { INDEX_op_xor_i32, { "r", "0", "ri" } }, 1136 1137 { INDEX_op_shl_i32, { "r", "0", "ci" } }, 1138 { INDEX_op_shr_i32, { "r", "0", "ci" } }, 1139 { INDEX_op_sar_i32, { "r", "0", "ci" } }, 1140 { INDEX_op_sar_i32, { "r", "0", "ci" } }, 1141 { INDEX_op_rotl_i32, { "r", "0", "ci" } }, 1142 { INDEX_op_rotr_i32, { "r", "0", "ci" } }, 1143 1144 { INDEX_op_brcond_i32, { "r", "ri" } }, 1145 1146 { INDEX_op_add2_i32, { "r", "r", "0", "1", "ri", "ri" } }, 1147 { INDEX_op_sub2_i32, { "r", "r", "0", "1", "ri", "ri" } }, 1148 { INDEX_op_brcond2_i32, { "r", "r", "ri", "ri" } }, 1149 1150 { INDEX_op_bswap16_i32, { "r", "0" } }, 1151 { INDEX_op_bswap32_i32, { "r", "0" } }, 1152 1153 { INDEX_op_neg_i32, { "r", "0" } }, 1154 1155 { INDEX_op_not_i32, { "r", "0" } }, 1156 1157 { INDEX_op_ext8s_i32, { "r", "q" } }, 1158 { INDEX_op_ext16s_i32, { "r", "r" } }, 1159 1160 #if TARGET_LONG_BITS == 32 1161 { INDEX_op_qemu_ld8u, { "r", "L" } }, 1162 { INDEX_op_qemu_ld8s, { "r", "L" } }, 1163 { INDEX_op_qemu_ld16u, { "r", "L" } }, 1164 { INDEX_op_qemu_ld16s, { "r", "L" } }, 1165 { INDEX_op_qemu_ld32u, { "r", "L" } }, 1166 { INDEX_op_qemu_ld64, { "r", "r", "L" } }, 1167 1168 { INDEX_op_qemu_st8, { "cb", "L" } }, 1169 { INDEX_op_qemu_st16, { "L", "L" } }, 1170 { INDEX_op_qemu_st32, { "L", "L" } }, 1171 { INDEX_op_qemu_st64, { "L", "L", "L" } }, 1172 #else 1173 { INDEX_op_qemu_ld8u, { "r", "L", "L" } }, 1174 { INDEX_op_qemu_ld8s, { "r", "L", "L" } }, 1175 { INDEX_op_qemu_ld16u, { "r", "L", "L" } }, 1176 { INDEX_op_qemu_ld16s, { "r", "L", "L" } }, 1177 { INDEX_op_qemu_ld32u, { "r", "L", "L" } }, 1178 { INDEX_op_qemu_ld64, { "r", "r", "L", "L" } }, 1179 1180 { INDEX_op_qemu_st8, { "cb", "L", "L" } }, 1181 { INDEX_op_qemu_st16, { "L", "L", "L" } }, 1182 { INDEX_op_qemu_st32, { "L", "L", "L" } }, 1183 { INDEX_op_qemu_st64, { "L", "L", "L", "L" } }, 1184 #endif 1185 { -1 }, 1186 }; 1187 1188 static int tcg_target_callee_save_regs[] = { 1189 /* TCG_REG_EBP, */ /* currently used for the global env, so no 1190 need to save */ 1191 TCG_REG_EBX, 1192 TCG_REG_ESI, 1193 TCG_REG_EDI, 1194 }; 1195 1196 static inline void tcg_out_push(TCGContext *s, int reg) 1197 { 1198 tcg_out_opc(s, 0x50 + reg); 1199 } 1200 1201 static inline void tcg_out_pop(TCGContext *s, int reg) 1202 { 1203 tcg_out_opc(s, 0x58 + reg); 1204 } 1205 1206 /* Generate global QEMU prologue and epilogue code */ 1207 void tcg_target_qemu_prologue(TCGContext *s) 1208 { 1209 int i, frame_size, push_size, stack_addend; 1210 1211 /* TB prologue */ 1212 /* save all callee saved registers */ 1213 for(i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { 1214 tcg_out_push(s, tcg_target_callee_save_regs[i]); 1215 } 1216 /* reserve some stack space */ 1217 push_size = 4 + ARRAY_SIZE(tcg_target_callee_save_regs) * 4; 1218 frame_size = push_size + TCG_STATIC_CALL_ARGS_SIZE; 1219 frame_size = (frame_size + TCG_TARGET_STACK_ALIGN - 1) & 1220 ~(TCG_TARGET_STACK_ALIGN - 1); 1221 stack_addend = frame_size - push_size; 1222 tcg_out_addi(s, TCG_REG_ESP, -stack_addend); 1223 1224 tcg_out_modrm(s, 0xff, 4, TCG_REG_EAX); /* jmp *%eax */ 1225 1226 /* TB epilogue */ 1227 tb_ret_addr = s->code_ptr; 1228 tcg_out_addi(s, TCG_REG_ESP, stack_addend); 1229 for(i = ARRAY_SIZE(tcg_target_callee_save_regs) - 1; i >= 0; i--) { 1230 tcg_out_pop(s, tcg_target_callee_save_regs[i]); 1231 } 1232 tcg_out8(s, 0xc3); /* ret */ 1233 } 1234 1235 void tcg_target_init(TCGContext *s) 1236 { 1237 /* fail safe */ 1238 if ((1 << CPU_TLB_ENTRY_BITS) != sizeof(CPUTLBEntry)) 1239 tcg_abort(); 1240 1241 tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xff); 1242 tcg_regset_set32(tcg_target_call_clobber_regs, 0, 1243 (1 << TCG_REG_EAX) | 1244 (1 << TCG_REG_EDX) | 1245 (1 << TCG_REG_ECX)); 1246 1247 tcg_regset_clear(s->reserved_regs); 1248 tcg_regset_set_reg(s->reserved_regs, TCG_REG_ESP); 1249 1250 tcg_add_target_add_op_defs(x86_op_defs); 1251 } 1252