Home | History | Annotate | Download | only in qemu
      1 /*
      2  *  i386 emulator main execution loop
      3  *
      4  *  Copyright (c) 2003-2005 Fabrice Bellard
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 #include "config.h"
     20 #include "exec.h"
     21 #include "disas.h"
     22 #include "tcg.h"
     23 #include "kvm.h"
     24 #include "hax.h"
     25 #include "qemu-barrier.h"
     26 
     27 #if !defined(CONFIG_SOFTMMU)
     28 #undef EAX
     29 #undef ECX
     30 #undef EDX
     31 #undef EBX
     32 #undef ESP
     33 #undef EBP
     34 #undef ESI
     35 #undef EDI
     36 #undef EIP
     37 #include <signal.h>
     38 #ifdef __linux__
     39 #include <sys/ucontext.h>
     40 #endif
     41 #endif
     42 
     43 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
     44 // Work around ugly bugs in glibc that mangle global register contents
     45 #undef env
     46 #define env cpu_single_env
     47 #endif
     48 
     49 int tb_invalidated_flag;
     50 
     51 //#define CONFIG_DEBUG_EXEC
     52 //#define DEBUG_SIGNAL
     53 
     54 int qemu_cpu_has_work(CPUState *env)
     55 {
     56     return cpu_has_work(env);
     57 }
     58 
     59 void cpu_loop_exit(void)
     60 {
     61     env->current_tb = NULL;
     62     longjmp(env->jmp_env, 1);
     63 }
     64 
     65 /* exit the current TB from a signal handler. The host registers are
     66    restored in a state compatible with the CPU emulator
     67  */
     68 void cpu_resume_from_signal(CPUState *env1, void *puc)
     69 {
     70 #if !defined(CONFIG_SOFTMMU)
     71 #ifdef __linux__
     72     struct ucontext *uc = puc;
     73 #elif defined(__OpenBSD__)
     74     struct sigcontext *uc = puc;
     75 #endif
     76 #endif
     77 
     78     env = env1;
     79 
     80     /* XXX: restore cpu registers saved in host registers */
     81 
     82 #if !defined(CONFIG_SOFTMMU)
     83     if (puc) {
     84         /* XXX: use siglongjmp ? */
     85 #ifdef __linux__
     86 #ifdef __ia64
     87         sigprocmask(SIG_SETMASK, (sigset_t *)&uc->uc_sigmask, NULL);
     88 #else
     89         sigprocmask(SIG_SETMASK, &uc->uc_sigmask, NULL);
     90 #endif
     91 #elif defined(__OpenBSD__)
     92         sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
     93 #endif
     94     }
     95 #endif
     96     env->exception_index = -1;
     97     longjmp(env->jmp_env, 1);
     98 }
     99 
    100 /* Execute the code without caching the generated code. An interpreter
    101    could be used if available. */
    102 static void cpu_exec_nocache(int max_cycles, TranslationBlock *orig_tb)
    103 {
    104     unsigned long next_tb;
    105     TranslationBlock *tb;
    106 
    107     /* Should never happen.
    108        We only end up here when an existing TB is too long.  */
    109     if (max_cycles > CF_COUNT_MASK)
    110         max_cycles = CF_COUNT_MASK;
    111 
    112     tb = tb_gen_code(env, orig_tb->pc, orig_tb->cs_base, orig_tb->flags,
    113                      max_cycles);
    114     env->current_tb = tb;
    115     /* execute the generated code */
    116     next_tb = tcg_qemu_tb_exec(tb->tc_ptr);
    117     env->current_tb = NULL;
    118 
    119     if ((next_tb & 3) == 2) {
    120         /* Restore PC.  This may happen if async event occurs before
    121            the TB starts executing.  */
    122         cpu_pc_from_tb(env, tb);
    123     }
    124     tb_phys_invalidate(tb, -1);
    125     tb_free(tb);
    126 }
    127 
    128 static TranslationBlock *tb_find_slow(target_ulong pc,
    129                                       target_ulong cs_base,
    130                                       uint64_t flags)
    131 {
    132     TranslationBlock *tb, **ptb1;
    133     unsigned int h;
    134     target_ulong phys_pc, phys_page1, phys_page2, virt_page2;
    135 
    136     tb_invalidated_flag = 0;
    137 
    138     /* find translated block using physical mappings */
    139     phys_pc = get_phys_addr_code(env, pc);
    140     phys_page1 = phys_pc & TARGET_PAGE_MASK;
    141     phys_page2 = -1;
    142     h = tb_phys_hash_func(phys_pc);
    143     ptb1 = &tb_phys_hash[h];
    144     for(;;) {
    145         tb = *ptb1;
    146         if (!tb)
    147             goto not_found;
    148         if (tb->pc == pc &&
    149             tb->page_addr[0] == phys_page1 &&
    150             tb->cs_base == cs_base &&
    151             tb->flags == flags) {
    152             /* check next page if needed */
    153             if (tb->page_addr[1] != -1) {
    154                 virt_page2 = (pc & TARGET_PAGE_MASK) +
    155                     TARGET_PAGE_SIZE;
    156                 phys_page2 = get_phys_addr_code(env, virt_page2);
    157                 if (tb->page_addr[1] == phys_page2)
    158                     goto found;
    159             } else {
    160                 goto found;
    161             }
    162         }
    163         ptb1 = &tb->phys_hash_next;
    164     }
    165  not_found:
    166    /* if no translated code available, then translate it now */
    167     tb = tb_gen_code(env, pc, cs_base, flags, 0);
    168 
    169  found:
    170     /* Move the last found TB to the head of the list */
    171     if (likely(*ptb1)) {
    172         *ptb1 = tb->phys_hash_next;
    173         tb->phys_hash_next = tb_phys_hash[h];
    174         tb_phys_hash[h] = tb;
    175     }
    176     /* we add the TB in the virtual pc hash table */
    177     env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)] = tb;
    178     return tb;
    179 }
    180 
    181 static inline TranslationBlock *tb_find_fast(void)
    182 {
    183     TranslationBlock *tb;
    184     target_ulong cs_base, pc;
    185     int flags;
    186 
    187     /* we record a subset of the CPU state. It will
    188        always be the same before a given translated block
    189        is executed. */
    190     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
    191     tb = env->tb_jmp_cache[tb_jmp_cache_hash_func(pc)];
    192     if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base ||
    193                  tb->flags != flags)) {
    194         tb = tb_find_slow(pc, cs_base, flags);
    195     }
    196     return tb;
    197 }
    198 
    199 static CPUDebugExcpHandler *debug_excp_handler;
    200 
    201 CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler)
    202 {
    203     CPUDebugExcpHandler *old_handler = debug_excp_handler;
    204 
    205     debug_excp_handler = handler;
    206     return old_handler;
    207 }
    208 
    209 static void cpu_handle_debug_exception(CPUState *env)
    210 {
    211     CPUWatchpoint *wp;
    212 
    213     if (!env->watchpoint_hit) {
    214         QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
    215             wp->flags &= ~BP_WATCHPOINT_HIT;
    216         }
    217     }
    218     if (debug_excp_handler) {
    219         debug_excp_handler(env);
    220     }
    221 }
    222 
    223 /* main execution loop */
    224 
    225 volatile sig_atomic_t exit_request;
    226 
    227 /*
    228  * Qemu emulation can happen because of MMIO or emulation mode,
    229  * i.e. non-PG mode.  For MMIO cases, the pending interrupt should not
    230  * be emulated in qemu because MMIO is emulated for only one
    231  * instruction now and then back to the HAX kernel module.
    232  */
    233 int need_handle_intr_request(CPUState *env)
    234 {
    235 #ifdef CONFIG_HAX
    236     if (!hax_enabled() || hax_vcpu_emulation_mode(env))
    237         return env->interrupt_request;
    238     return 0;
    239 #else
    240     return env->interrupt_request;
    241 #endif
    242 }
    243 
    244 int cpu_exec(CPUState *env1)
    245 {
    246     volatile host_reg_t saved_env_reg;
    247     int ret, interrupt_request;
    248     TranslationBlock *tb;
    249     uint8_t *tc_ptr;
    250     unsigned long next_tb;
    251 
    252     if (env1->halted) {
    253         if (!cpu_has_work(env1)) {
    254         return EXCP_HALTED;
    255         }
    256 
    257         env1->halted = 0;
    258     }
    259 
    260     cpu_single_env = env1;
    261 
    262     /* the access to env below is actually saving the global register's
    263        value, so that files not including target-xyz/exec.h are free to
    264        use it.  */
    265     QEMU_BUILD_BUG_ON (sizeof (saved_env_reg) != sizeof (env));
    266     saved_env_reg = (host_reg_t) env;
    267     barrier();
    268     env = env1;
    269 
    270     if (unlikely(exit_request)) {
    271         env->exit_request = 1;
    272     }
    273 
    274 #if defined(TARGET_I386)
    275     if (!kvm_enabled()) {
    276         /* put eflags in CPU temporary format */
    277         CC_SRC = env->eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    278         DF = 1 - (2 * ((env->eflags >> 10) & 1));
    279         CC_OP = CC_OP_EFLAGS;
    280         env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    281     }
    282 #elif defined(TARGET_SPARC)
    283 #elif defined(TARGET_M68K)
    284     env->cc_op = CC_OP_FLAGS;
    285     env->cc_dest = env->sr & 0xf;
    286     env->cc_x = (env->sr >> 4) & 1;
    287 #elif defined(TARGET_ALPHA)
    288 #elif defined(TARGET_ARM)
    289 #elif defined(TARGET_UNICORE32)
    290 #elif defined(TARGET_PPC)
    291 #elif defined(TARGET_LM32)
    292 #elif defined(TARGET_MICROBLAZE)
    293 #elif defined(TARGET_MIPS)
    294 #elif defined(TARGET_SH4)
    295 #elif defined(TARGET_CRIS)
    296 #elif defined(TARGET_S390X)
    297     /* XXXXX */
    298 #else
    299 #error unsupported target CPU
    300 #endif
    301     env->exception_index = -1;
    302 
    303     /* prepare setjmp context for exception handling */
    304     for(;;) {
    305         if (setjmp(env->jmp_env) == 0) {
    306 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
    307 #undef env
    308                     env = cpu_single_env;
    309 #define env cpu_single_env
    310 #endif
    311             /* if an exception is pending, we execute it here */
    312             if (env->exception_index >= 0) {
    313                 if (env->exception_index >= EXCP_INTERRUPT) {
    314                     /* exit request from the cpu execution loop */
    315                     ret = env->exception_index;
    316                     if (ret == EXCP_DEBUG) {
    317                         cpu_handle_debug_exception(env);
    318                     }
    319                     break;
    320                 } else {
    321 #if defined(CONFIG_USER_ONLY)
    322                     /* if user mode only, we simulate a fake exception
    323                        which will be handled outside the cpu execution
    324                        loop */
    325 #if defined(TARGET_I386)
    326                     do_interrupt_user(env->exception_index,
    327                                       env->exception_is_int,
    328                                       env->error_code,
    329                                       env->exception_next_eip);
    330                     /* successfully delivered */
    331                     env->old_exception = -1;
    332 #endif
    333                     ret = env->exception_index;
    334                     break;
    335 #else
    336 #if defined(TARGET_I386)
    337                     /* simulate a real cpu exception. On i386, it can
    338                        trigger new exceptions, but we do not handle
    339                        double or triple faults yet. */
    340                     do_interrupt(env->exception_index,
    341                                  env->exception_is_int,
    342                                  env->error_code,
    343                                  env->exception_next_eip, 0);
    344                     /* successfully delivered */
    345                     env->old_exception = -1;
    346 #elif defined(TARGET_PPC)
    347                     do_interrupt(env);
    348 #elif defined(TARGET_LM32)
    349                     do_interrupt(env);
    350 #elif defined(TARGET_MICROBLAZE)
    351                     do_interrupt(env);
    352 #elif defined(TARGET_MIPS)
    353                     do_interrupt(env);
    354 #elif defined(TARGET_SPARC)
    355                     do_interrupt(env);
    356 #elif defined(TARGET_ARM)
    357                     do_interrupt(env);
    358 #elif defined(TARGET_UNICORE32)
    359                     do_interrupt(env);
    360 #elif defined(TARGET_SH4)
    361 		    do_interrupt(env);
    362 #elif defined(TARGET_ALPHA)
    363                     do_interrupt(env);
    364 #elif defined(TARGET_CRIS)
    365                     do_interrupt(env);
    366 #elif defined(TARGET_M68K)
    367                     do_interrupt(0);
    368 #elif defined(TARGET_S390X)
    369                     do_interrupt(env);
    370 #endif
    371                     env->exception_index = -1;
    372 #endif
    373                 }
    374             }
    375 
    376 #ifdef CONFIG_HAX
    377             if (hax_enabled() && !hax_vcpu_exec(env))
    378                 longjmp(env->jmp_env, 1);
    379 #endif
    380 
    381             if (kvm_enabled()) {
    382                 kvm_cpu_exec(env);
    383                 longjmp(env->jmp_env, 1);
    384             }
    385 
    386             next_tb = 0; /* force lookup of first TB */
    387             for(;;) {
    388                 interrupt_request = env->interrupt_request;
    389                 if (unlikely(need_handle_intr_request(env))) {
    390                     if (unlikely(env->singlestep_enabled & SSTEP_NOIRQ)) {
    391                         /* Mask out external interrupts for this step. */
    392                         interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
    393                     }
    394                     if (interrupt_request & CPU_INTERRUPT_DEBUG) {
    395                         env->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
    396                         env->exception_index = EXCP_DEBUG;
    397                         cpu_loop_exit();
    398                     }
    399 #if defined(TARGET_ARM) || defined(TARGET_SPARC) || defined(TARGET_MIPS) || \
    400     defined(TARGET_PPC) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) || \
    401     defined(TARGET_MICROBLAZE) || defined(TARGET_LM32) || defined(TARGET_UNICORE32)
    402                     if (interrupt_request & CPU_INTERRUPT_HALT) {
    403                         env->interrupt_request &= ~CPU_INTERRUPT_HALT;
    404                         env->halted = 1;
    405                         env->exception_index = EXCP_HLT;
    406                         cpu_loop_exit();
    407                     }
    408 #endif
    409 #if defined(TARGET_I386)
    410                     if (interrupt_request & CPU_INTERRUPT_INIT) {
    411                             svm_check_intercept(SVM_EXIT_INIT);
    412                             do_cpu_init(env);
    413                             env->exception_index = EXCP_HALTED;
    414                             cpu_loop_exit();
    415                     } else if (interrupt_request & CPU_INTERRUPT_SIPI) {
    416                             do_cpu_sipi(env);
    417                     } else if (env->hflags2 & HF2_GIF_MASK) {
    418                         if ((interrupt_request & CPU_INTERRUPT_SMI) &&
    419                             !(env->hflags & HF_SMM_MASK)) {
    420                             svm_check_intercept(SVM_EXIT_SMI);
    421                             env->interrupt_request &= ~CPU_INTERRUPT_SMI;
    422                             do_smm_enter();
    423                             next_tb = 0;
    424                         } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
    425                                    !(env->hflags2 & HF2_NMI_MASK)) {
    426                             env->interrupt_request &= ~CPU_INTERRUPT_NMI;
    427                             env->hflags2 |= HF2_NMI_MASK;
    428                             do_interrupt(EXCP02_NMI, 0, 0, 0, 1);
    429                             next_tb = 0;
    430 			} else if (interrupt_request & CPU_INTERRUPT_MCE) {
    431                             env->interrupt_request &= ~CPU_INTERRUPT_MCE;
    432                             do_interrupt(EXCP12_MCHK, 0, 0, 0, 0);
    433                             next_tb = 0;
    434                         } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
    435                                    (((env->hflags2 & HF2_VINTR_MASK) &&
    436                                      (env->hflags2 & HF2_HIF_MASK)) ||
    437                                     (!(env->hflags2 & HF2_VINTR_MASK) &&
    438                                      (env->eflags & IF_MASK &&
    439                                       !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
    440                             int intno;
    441                             svm_check_intercept(SVM_EXIT_INTR);
    442                             env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
    443                             intno = cpu_get_pic_interrupt(env);
    444                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
    445 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
    446 #undef env
    447                     env = cpu_single_env;
    448 #define env cpu_single_env
    449 #endif
    450                             do_interrupt(intno, 0, 0, 0, 1);
    451                             /* ensure that no TB jump will be modified as
    452                                the program flow was changed */
    453                             next_tb = 0;
    454 #if !defined(CONFIG_USER_ONLY)
    455                         } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
    456                                    (env->eflags & IF_MASK) &&
    457                                    !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
    458                             int intno;
    459                             /* FIXME: this should respect TPR */
    460                             svm_check_intercept(SVM_EXIT_VINTR);
    461                             intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
    462                             qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
    463                             do_interrupt(intno, 0, 0, 0, 1);
    464                             env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
    465                             next_tb = 0;
    466 #endif
    467                         }
    468                     }
    469 #elif defined(TARGET_PPC)
    470 #if 0
    471                     if ((interrupt_request & CPU_INTERRUPT_RESET)) {
    472                         cpu_reset(env);
    473                     }
    474 #endif
    475                     if (interrupt_request & CPU_INTERRUPT_HARD) {
    476                         ppc_hw_interrupt(env);
    477                         if (env->pending_interrupts == 0)
    478                             env->interrupt_request &= ~CPU_INTERRUPT_HARD;
    479                         next_tb = 0;
    480                     }
    481 #elif defined(TARGET_LM32)
    482                     if ((interrupt_request & CPU_INTERRUPT_HARD)
    483                         && (env->ie & IE_IE)) {
    484                         env->exception_index = EXCP_IRQ;
    485                         do_interrupt(env);
    486                         next_tb = 0;
    487                     }
    488 #elif defined(TARGET_MICROBLAZE)
    489                     if ((interrupt_request & CPU_INTERRUPT_HARD)
    490                         && (env->sregs[SR_MSR] & MSR_IE)
    491                         && !(env->sregs[SR_MSR] & (MSR_EIP | MSR_BIP))
    492                         && !(env->iflags & (D_FLAG | IMM_FLAG))) {
    493                         env->exception_index = EXCP_IRQ;
    494                         do_interrupt(env);
    495                         next_tb = 0;
    496                     }
    497 #elif defined(TARGET_MIPS)
    498                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
    499                         cpu_mips_hw_interrupts_pending(env)) {
    500                         /* Raise it */
    501                         env->exception_index = EXCP_EXT_INTERRUPT;
    502                         env->error_code = 0;
    503                         do_interrupt(env);
    504                         next_tb = 0;
    505                     }
    506 #elif defined(TARGET_SPARC)
    507                     if (interrupt_request & CPU_INTERRUPT_HARD) {
    508                         if (cpu_interrupts_enabled(env) &&
    509                             env->interrupt_index > 0) {
    510                             int pil = env->interrupt_index & 0xf;
    511                             int type = env->interrupt_index & 0xf0;
    512 
    513                             if (((type == TT_EXTINT) &&
    514                                   cpu_pil_allowed(env, pil)) ||
    515                                   type != TT_EXTINT) {
    516                                 env->exception_index = env->interrupt_index;
    517                                 do_interrupt(env);
    518                                 next_tb = 0;
    519                             }
    520                         }
    521 		    }
    522 #elif defined(TARGET_ARM)
    523                     if (interrupt_request & CPU_INTERRUPT_FIQ
    524                         && !(env->uncached_cpsr & CPSR_F)) {
    525                         env->exception_index = EXCP_FIQ;
    526                         do_interrupt(env);
    527                         next_tb = 0;
    528                     }
    529                     /* ARMv7-M interrupt return works by loading a magic value
    530                        into the PC.  On real hardware the load causes the
    531                        return to occur.  The qemu implementation performs the
    532                        jump normally, then does the exception return when the
    533                        CPU tries to execute code at the magic address.
    534                        This will cause the magic PC value to be pushed to
    535                        the stack if an interrupt occurred at the wrong time.
    536                        We avoid this by disabling interrupts when
    537                        pc contains a magic address.  */
    538                     if (interrupt_request & CPU_INTERRUPT_HARD
    539                         && ((IS_M(env) && env->regs[15] < 0xfffffff0)
    540                             || !(env->uncached_cpsr & CPSR_I))) {
    541                         env->exception_index = EXCP_IRQ;
    542                         do_interrupt(env);
    543                         next_tb = 0;
    544                     }
    545 #elif defined(TARGET_UNICORE32)
    546                     if (interrupt_request & CPU_INTERRUPT_HARD
    547                         && !(env->uncached_asr & ASR_I)) {
    548                         do_interrupt(env);
    549                         next_tb = 0;
    550                     }
    551 #elif defined(TARGET_SH4)
    552                     if (interrupt_request & CPU_INTERRUPT_HARD) {
    553                         do_interrupt(env);
    554                         next_tb = 0;
    555                     }
    556 #elif defined(TARGET_ALPHA)
    557                     if (interrupt_request & CPU_INTERRUPT_HARD) {
    558                         do_interrupt(env);
    559                         next_tb = 0;
    560                     }
    561 #elif defined(TARGET_CRIS)
    562                     if (interrupt_request & CPU_INTERRUPT_HARD
    563                         && (env->pregs[PR_CCS] & I_FLAG)
    564                         && !env->locked_irq) {
    565                         env->exception_index = EXCP_IRQ;
    566                         do_interrupt(env);
    567                         next_tb = 0;
    568                     }
    569                     if (interrupt_request & CPU_INTERRUPT_NMI
    570                         && (env->pregs[PR_CCS] & M_FLAG)) {
    571                         env->exception_index = EXCP_NMI;
    572                         do_interrupt(env);
    573                         next_tb = 0;
    574                     }
    575 #elif defined(TARGET_M68K)
    576                     if (interrupt_request & CPU_INTERRUPT_HARD
    577                         && ((env->sr & SR_I) >> SR_I_SHIFT)
    578                             < env->pending_level) {
    579                         /* Real hardware gets the interrupt vector via an
    580                            IACK cycle at this point.  Current emulated
    581                            hardware doesn't rely on this, so we
    582                            provide/save the vector when the interrupt is
    583                            first signalled.  */
    584                         env->exception_index = env->pending_vector;
    585                         do_interrupt(1);
    586                         next_tb = 0;
    587                     }
    588 #elif defined(TARGET_S390X) && !defined(CONFIG_USER_ONLY)
    589                     if ((interrupt_request & CPU_INTERRUPT_HARD) &&
    590                         (env->psw.mask & PSW_MASK_EXT)) {
    591                         do_interrupt(env);
    592                         next_tb = 0;
    593                     }
    594 #endif
    595                    /* Don't use the cached interrupt_request value,
    596                       do_interrupt may have updated the EXITTB flag. */
    597                     if (env->interrupt_request & CPU_INTERRUPT_EXITTB) {
    598                         env->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
    599                         /* ensure that no TB jump will be modified as
    600                            the program flow was changed */
    601                         next_tb = 0;
    602                     }
    603                 }
    604                 if (unlikely(env->exit_request)) {
    605                     env->exit_request = 0;
    606                     env->exception_index = EXCP_INTERRUPT;
    607                     cpu_loop_exit();
    608                 }
    609 #if defined(DEBUG_DISAS) || defined(CONFIG_DEBUG_EXEC)
    610                 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
    611                     /* restore flags in standard format */
    612 #if defined(TARGET_I386)
    613                     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
    614                     log_cpu_state(env, X86_DUMP_CCOP);
    615                     env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
    616 #elif defined(TARGET_M68K)
    617                     cpu_m68k_flush_flags(env, env->cc_op);
    618                     env->cc_op = CC_OP_FLAGS;
    619                     env->sr = (env->sr & 0xffe0)
    620                               | env->cc_dest | (env->cc_x << 4);
    621                     log_cpu_state(env, 0);
    622 #else
    623                     log_cpu_state(env, 0);
    624 #endif
    625                 }
    626 #endif /* DEBUG_DISAS || CONFIG_DEBUG_EXEC */
    627                 spin_lock(&tb_lock);
    628                 tb = tb_find_fast();
    629                 /* Note: we do it here to avoid a gcc bug on Mac OS X when
    630                    doing it in tb_find_slow */
    631                 if (tb_invalidated_flag) {
    632                     /* as some TB could have been invalidated because
    633                        of memory exceptions while generating the code, we
    634                        must recompute the hash index here */
    635                     next_tb = 0;
    636                     tb_invalidated_flag = 0;
    637                 }
    638 #ifdef CONFIG_DEBUG_EXEC
    639                 qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n",
    640                              (long)tb->tc_ptr, tb->pc,
    641                              lookup_symbol(tb->pc));
    642 #endif
    643                 /* see if we can patch the calling TB. When the TB
    644                    spans two pages, we cannot safely do a direct
    645                    jump. */
    646                 if (next_tb != 0 && tb->page_addr[1] == -1) {
    647                     tb_add_jump((TranslationBlock *)(next_tb & ~3), next_tb & 3, tb);
    648                 }
    649                 spin_unlock(&tb_lock);
    650 
    651                 /* cpu_interrupt might be called while translating the
    652                    TB, but before it is linked into a potentially
    653                    infinite loop and becomes env->current_tb. Avoid
    654                    starting execution if there is a pending interrupt. */
    655                 env->current_tb = tb;
    656                 barrier();
    657                 if (likely(!env->exit_request)) {
    658                     tc_ptr = tb->tc_ptr;
    659                 /* execute the generated code */
    660 #if defined(__sparc__) && !defined(CONFIG_SOLARIS)
    661 #undef env
    662                     env = cpu_single_env;
    663 #define env cpu_single_env
    664 #endif
    665                     next_tb = tcg_qemu_tb_exec(tc_ptr);
    666                     if ((next_tb & 3) == 2) {
    667                         /* Instruction counter expired.  */
    668                         int insns_left;
    669                         tb = (TranslationBlock *)(long)(next_tb & ~3);
    670                         /* Restore PC.  */
    671                         cpu_pc_from_tb(env, tb);
    672                         insns_left = env->icount_decr.u32;
    673                         if (env->icount_extra && insns_left >= 0) {
    674                             /* Refill decrementer and continue execution.  */
    675                             env->icount_extra += insns_left;
    676                             if (env->icount_extra > 0xffff) {
    677                                 insns_left = 0xffff;
    678                             } else {
    679                                 insns_left = env->icount_extra;
    680                             }
    681                             env->icount_extra -= insns_left;
    682                             env->icount_decr.u16.low = insns_left;
    683                         } else {
    684                             if (insns_left > 0) {
    685                                 /* Execute remaining instructions.  */
    686                                 cpu_exec_nocache(insns_left, tb);
    687                             }
    688                             env->exception_index = EXCP_INTERRUPT;
    689                             next_tb = 0;
    690                             cpu_loop_exit();
    691                         }
    692                     }
    693                 }
    694                 env->current_tb = NULL;
    695 #ifdef CONFIG_HAX
    696                 if (hax_enabled() && hax_stop_emulation(env))
    697                     cpu_loop_exit();
    698 #endif
    699                 /* reset soft MMU for next block (it can currently
    700                    only be set by a memory fault) */
    701             } /* for(;;) */
    702         }
    703     } /* for(;;) */
    704 
    705 
    706 #if defined(TARGET_I386)
    707     /* restore flags in standard format */
    708     env->eflags = env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
    709 #elif defined(TARGET_ARM)
    710     /* XXX: Save/restore host fpu exception state?.  */
    711 #elif defined(TARGET_UNICORE32)
    712 #elif defined(TARGET_SPARC)
    713 #elif defined(TARGET_PPC)
    714 #elif defined(TARGET_LM32)
    715 #elif defined(TARGET_M68K)
    716     cpu_m68k_flush_flags(env, env->cc_op);
    717     env->cc_op = CC_OP_FLAGS;
    718     env->sr = (env->sr & 0xffe0)
    719               | env->cc_dest | (env->cc_x << 4);
    720 #elif defined(TARGET_MICROBLAZE)
    721 #elif defined(TARGET_MIPS)
    722 #elif defined(TARGET_SH4)
    723 #elif defined(TARGET_ALPHA)
    724 #elif defined(TARGET_CRIS)
    725 #elif defined(TARGET_S390X)
    726     /* XXXXX */
    727 #else
    728 #error unsupported target CPU
    729 #endif
    730 
    731     /* restore global registers */
    732     barrier();
    733     env = (void *) saved_env_reg;
    734 
    735     /* fail safe : never use cpu_single_env outside cpu_exec() */
    736     cpu_single_env = NULL;
    737     return ret;
    738 }
    739 
    740 /* must only be called from the generated code as an exception can be
    741    generated */
    742 void tb_invalidate_page_range(target_ulong start, target_ulong end)
    743 {
    744     /* XXX: cannot enable it yet because it yields to MMU exception
    745        where NIP != read address on PowerPC */
    746 #if 0
    747     target_ulong phys_addr;
    748     phys_addr = get_phys_addr_code(env, start);
    749     tb_invalidate_phys_page_range(phys_addr, phys_addr + end - start, 0);
    750 #endif
    751 }
    752 
    753 #if defined(TARGET_I386) && defined(CONFIG_USER_ONLY)
    754 
    755 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
    756 {
    757     CPUX86State *saved_env;
    758 
    759     saved_env = env;
    760     env = s;
    761     if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
    762         selector &= 0xffff;
    763         cpu_x86_load_seg_cache(env, seg_reg, selector,
    764                                (selector << 4), 0xffff, 0);
    765     } else {
    766         helper_load_seg(seg_reg, selector);
    767     }
    768     env = saved_env;
    769 }
    770 
    771 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
    772 {
    773     CPUX86State *saved_env;
    774 
    775     saved_env = env;
    776     env = s;
    777 
    778     helper_fsave(ptr, data32);
    779 
    780     env = saved_env;
    781 }
    782 
    783 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
    784 {
    785     CPUX86State *saved_env;
    786 
    787     saved_env = env;
    788     env = s;
    789 
    790     helper_frstor(ptr, data32);
    791 
    792     env = saved_env;
    793 }
    794 
    795 #endif /* TARGET_I386 */
    796 
    797 #if !defined(CONFIG_SOFTMMU)
    798 
    799 #if defined(TARGET_I386)
    800 #define EXCEPTION_ACTION raise_exception_err(env->exception_index, env->error_code)
    801 #else
    802 #define EXCEPTION_ACTION cpu_loop_exit()
    803 #endif
    804 
    805 /* 'pc' is the host PC at which the exception was raised. 'address' is
    806    the effective address of the memory exception. 'is_write' is 1 if a
    807    write caused the exception and otherwise 0'. 'old_set' is the
    808    signal set which should be restored */
    809 static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
    810                                     int is_write, sigset_t *old_set,
    811                                     void *puc)
    812 {
    813     TranslationBlock *tb;
    814     int ret;
    815 
    816     if (cpu_single_env)
    817         env = cpu_single_env; /* XXX: find a correct solution for multithread */
    818 #if defined(DEBUG_SIGNAL)
    819     qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
    820                 pc, address, is_write, *(unsigned long *)old_set);
    821 #endif
    822     /* XXX: locking issue */
    823     if (is_write && page_unprotect(h2g(address), pc, puc)) {
    824         return 1;
    825     }
    826 
    827     /* see if it is an MMU fault */
    828     ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX, 0);
    829     if (ret < 0)
    830         return 0; /* not an MMU fault */
    831     if (ret == 0)
    832         return 1; /* the MMU fault was handled without causing real CPU fault */
    833     /* now we have a real cpu fault */
    834     tb = tb_find_pc(pc);
    835     if (tb) {
    836         /* the PC is inside the translated code. It means that we have
    837            a virtual CPU fault */
    838         cpu_restore_state(tb, env, pc);
    839     }
    840 
    841     /* we restore the process signal mask as the sigreturn should
    842        do it (XXX: use sigsetjmp) */
    843     sigprocmask(SIG_SETMASK, old_set, NULL);
    844     EXCEPTION_ACTION;
    845 
    846     /* never comes here */
    847     return 1;
    848 }
    849 
    850 #if defined(__i386__)
    851 
    852 #if defined(__APPLE__)
    853 # include <sys/ucontext.h>
    854 
    855 # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext->ss.eip))
    856 # define TRAP_sig(context)    ((context)->uc_mcontext->es.trapno)
    857 # define ERROR_sig(context)   ((context)->uc_mcontext->es.err)
    858 # define MASK_sig(context)    ((context)->uc_sigmask)
    859 #elif defined (__NetBSD__)
    860 # include <ucontext.h>
    861 
    862 # define EIP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_EIP])
    863 # define TRAP_sig(context)    ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
    864 # define ERROR_sig(context)   ((context)->uc_mcontext.__gregs[_REG_ERR])
    865 # define MASK_sig(context)    ((context)->uc_sigmask)
    866 #elif defined (__FreeBSD__) || defined(__DragonFly__)
    867 # include <ucontext.h>
    868 
    869 # define EIP_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_eip))
    870 # define TRAP_sig(context)    ((context)->uc_mcontext.mc_trapno)
    871 # define ERROR_sig(context)   ((context)->uc_mcontext.mc_err)
    872 # define MASK_sig(context)    ((context)->uc_sigmask)
    873 #elif defined(__OpenBSD__)
    874 # define EIP_sig(context)     ((context)->sc_eip)
    875 # define TRAP_sig(context)    ((context)->sc_trapno)
    876 # define ERROR_sig(context)   ((context)->sc_err)
    877 # define MASK_sig(context)    ((context)->sc_mask)
    878 #else
    879 # define EIP_sig(context)     ((context)->uc_mcontext.gregs[REG_EIP])
    880 # define TRAP_sig(context)    ((context)->uc_mcontext.gregs[REG_TRAPNO])
    881 # define ERROR_sig(context)   ((context)->uc_mcontext.gregs[REG_ERR])
    882 # define MASK_sig(context)    ((context)->uc_sigmask)
    883 #endif
    884 
    885 int cpu_signal_handler(int host_signum, void *pinfo,
    886                        void *puc)
    887 {
    888     siginfo_t *info = pinfo;
    889 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
    890     ucontext_t *uc = puc;
    891 #elif defined(__OpenBSD__)
    892     struct sigcontext *uc = puc;
    893 #else
    894     struct ucontext *uc = puc;
    895 #endif
    896     unsigned long pc;
    897     int trapno;
    898 
    899 #ifndef REG_EIP
    900 /* for glibc 2.1 */
    901 #define REG_EIP    EIP
    902 #define REG_ERR    ERR
    903 #define REG_TRAPNO TRAPNO
    904 #endif
    905     pc = EIP_sig(uc);
    906     trapno = TRAP_sig(uc);
    907     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
    908                              trapno == 0xe ?
    909                              (ERROR_sig(uc) >> 1) & 1 : 0,
    910                              &MASK_sig(uc), puc);
    911 }
    912 
    913 #elif defined(__x86_64__)
    914 
    915 #ifdef __NetBSD__
    916 #define PC_sig(context)       _UC_MACHINE_PC(context)
    917 #define TRAP_sig(context)     ((context)->uc_mcontext.__gregs[_REG_TRAPNO])
    918 #define ERROR_sig(context)    ((context)->uc_mcontext.__gregs[_REG_ERR])
    919 #define MASK_sig(context)     ((context)->uc_sigmask)
    920 #elif defined(__OpenBSD__)
    921 #define PC_sig(context)       ((context)->sc_rip)
    922 #define TRAP_sig(context)     ((context)->sc_trapno)
    923 #define ERROR_sig(context)    ((context)->sc_err)
    924 #define MASK_sig(context)     ((context)->sc_mask)
    925 #elif defined (__FreeBSD__) || defined(__DragonFly__)
    926 #include <ucontext.h>
    927 
    928 #define PC_sig(context)  (*((unsigned long*)&(context)->uc_mcontext.mc_rip))
    929 #define TRAP_sig(context)     ((context)->uc_mcontext.mc_trapno)
    930 #define ERROR_sig(context)    ((context)->uc_mcontext.mc_err)
    931 #define MASK_sig(context)     ((context)->uc_sigmask)
    932 #else
    933 #define PC_sig(context)       ((context)->uc_mcontext.gregs[REG_RIP])
    934 #define TRAP_sig(context)     ((context)->uc_mcontext.gregs[REG_TRAPNO])
    935 #define ERROR_sig(context)    ((context)->uc_mcontext.gregs[REG_ERR])
    936 #define MASK_sig(context)     ((context)->uc_sigmask)
    937 #endif
    938 
    939 int cpu_signal_handler(int host_signum, void *pinfo,
    940                        void *puc)
    941 {
    942     siginfo_t *info = pinfo;
    943     unsigned long pc;
    944 #if defined(__NetBSD__) || defined (__FreeBSD__) || defined(__DragonFly__)
    945     ucontext_t *uc = puc;
    946 #elif defined(__OpenBSD__)
    947     struct sigcontext *uc = puc;
    948 #else
    949     struct ucontext *uc = puc;
    950 #endif
    951 
    952     pc = PC_sig(uc);
    953     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
    954                              TRAP_sig(uc) == 0xe ?
    955                              (ERROR_sig(uc) >> 1) & 1 : 0,
    956                              &MASK_sig(uc), puc);
    957 }
    958 
    959 #elif defined(_ARCH_PPC)
    960 
    961 /***********************************************************************
    962  * signal context platform-specific definitions
    963  * From Wine
    964  */
    965 #ifdef linux
    966 /* All Registers access - only for local access */
    967 # define REG_sig(reg_name, context)		((context)->uc_mcontext.regs->reg_name)
    968 /* Gpr Registers access  */
    969 # define GPR_sig(reg_num, context)		REG_sig(gpr[reg_num], context)
    970 # define IAR_sig(context)			REG_sig(nip, context)	/* Program counter */
    971 # define MSR_sig(context)			REG_sig(msr, context)   /* Machine State Register (Supervisor) */
    972 # define CTR_sig(context)			REG_sig(ctr, context)   /* Count register */
    973 # define XER_sig(context)			REG_sig(xer, context) /* User's integer exception register */
    974 # define LR_sig(context)			REG_sig(link, context) /* Link register */
    975 # define CR_sig(context)			REG_sig(ccr, context) /* Condition register */
    976 /* Float Registers access  */
    977 # define FLOAT_sig(reg_num, context)		(((double*)((char*)((context)->uc_mcontext.regs+48*4)))[reg_num])
    978 # define FPSCR_sig(context)			(*(int*)((char*)((context)->uc_mcontext.regs+(48+32*2)*4)))
    979 /* Exception Registers access */
    980 # define DAR_sig(context)			REG_sig(dar, context)
    981 # define DSISR_sig(context)			REG_sig(dsisr, context)
    982 # define TRAP_sig(context)			REG_sig(trap, context)
    983 #endif /* linux */
    984 
    985 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
    986 #include <ucontext.h>
    987 # define IAR_sig(context)		((context)->uc_mcontext.mc_srr0)
    988 # define MSR_sig(context)		((context)->uc_mcontext.mc_srr1)
    989 # define CTR_sig(context)		((context)->uc_mcontext.mc_ctr)
    990 # define XER_sig(context)		((context)->uc_mcontext.mc_xer)
    991 # define LR_sig(context)		((context)->uc_mcontext.mc_lr)
    992 # define CR_sig(context)		((context)->uc_mcontext.mc_cr)
    993 /* Exception Registers access */
    994 # define DAR_sig(context)		((context)->uc_mcontext.mc_dar)
    995 # define DSISR_sig(context)		((context)->uc_mcontext.mc_dsisr)
    996 # define TRAP_sig(context)		((context)->uc_mcontext.mc_exc)
    997 #endif /* __FreeBSD__|| __FreeBSD_kernel__ */
    998 
    999 #ifdef __APPLE__
   1000 # include <sys/ucontext.h>
   1001 typedef struct ucontext SIGCONTEXT;
   1002 /* All Registers access - only for local access */
   1003 # define REG_sig(reg_name, context)		((context)->uc_mcontext->ss.reg_name)
   1004 # define FLOATREG_sig(reg_name, context)	((context)->uc_mcontext->fs.reg_name)
   1005 # define EXCEPREG_sig(reg_name, context)	((context)->uc_mcontext->es.reg_name)
   1006 # define VECREG_sig(reg_name, context)		((context)->uc_mcontext->vs.reg_name)
   1007 /* Gpr Registers access */
   1008 # define GPR_sig(reg_num, context)		REG_sig(r##reg_num, context)
   1009 # define IAR_sig(context)			REG_sig(srr0, context)	/* Program counter */
   1010 # define MSR_sig(context)			REG_sig(srr1, context)  /* Machine State Register (Supervisor) */
   1011 # define CTR_sig(context)			REG_sig(ctr, context)
   1012 # define XER_sig(context)			REG_sig(xer, context) /* Link register */
   1013 # define LR_sig(context)			REG_sig(lr, context)  /* User's integer exception register */
   1014 # define CR_sig(context)			REG_sig(cr, context)  /* Condition register */
   1015 /* Float Registers access */
   1016 # define FLOAT_sig(reg_num, context)		FLOATREG_sig(fpregs[reg_num], context)
   1017 # define FPSCR_sig(context)			((double)FLOATREG_sig(fpscr, context))
   1018 /* Exception Registers access */
   1019 # define DAR_sig(context)			EXCEPREG_sig(dar, context)     /* Fault registers for coredump */
   1020 # define DSISR_sig(context)			EXCEPREG_sig(dsisr, context)
   1021 # define TRAP_sig(context)			EXCEPREG_sig(exception, context) /* number of powerpc exception taken */
   1022 #endif /* __APPLE__ */
   1023 
   1024 int cpu_signal_handler(int host_signum, void *pinfo,
   1025                        void *puc)
   1026 {
   1027     siginfo_t *info = pinfo;
   1028 #if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
   1029     ucontext_t *uc = puc;
   1030 #else
   1031     struct ucontext *uc = puc;
   1032 #endif
   1033     unsigned long pc;
   1034     int is_write;
   1035 
   1036     pc = IAR_sig(uc);
   1037     is_write = 0;
   1038 #if 0
   1039     /* ppc 4xx case */
   1040     if (DSISR_sig(uc) & 0x00800000)
   1041         is_write = 1;
   1042 #else
   1043     if (TRAP_sig(uc) != 0x400 && (DSISR_sig(uc) & 0x02000000))
   1044         is_write = 1;
   1045 #endif
   1046     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1047                              is_write, &uc->uc_sigmask, puc);
   1048 }
   1049 
   1050 #elif defined(__alpha__)
   1051 
   1052 int cpu_signal_handler(int host_signum, void *pinfo,
   1053                            void *puc)
   1054 {
   1055     siginfo_t *info = pinfo;
   1056     struct ucontext *uc = puc;
   1057     uint32_t *pc = uc->uc_mcontext.sc_pc;
   1058     uint32_t insn = *pc;
   1059     int is_write = 0;
   1060 
   1061     /* XXX: need kernel patch to get write flag faster */
   1062     switch (insn >> 26) {
   1063     case 0x0d: // stw
   1064     case 0x0e: // stb
   1065     case 0x0f: // stq_u
   1066     case 0x24: // stf
   1067     case 0x25: // stg
   1068     case 0x26: // sts
   1069     case 0x27: // stt
   1070     case 0x2c: // stl
   1071     case 0x2d: // stq
   1072     case 0x2e: // stl_c
   1073     case 0x2f: // stq_c
   1074 	is_write = 1;
   1075     }
   1076 
   1077     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1078                              is_write, &uc->uc_sigmask, puc);
   1079 }
   1080 #elif defined(__sparc__)
   1081 
   1082 int cpu_signal_handler(int host_signum, void *pinfo,
   1083                        void *puc)
   1084 {
   1085     siginfo_t *info = pinfo;
   1086     int is_write;
   1087     uint32_t insn;
   1088 #if !defined(__arch64__) || defined(CONFIG_SOLARIS)
   1089     uint32_t *regs = (uint32_t *)(info + 1);
   1090     void *sigmask = (regs + 20);
   1091     /* XXX: is there a standard glibc define ? */
   1092     unsigned long pc = regs[1];
   1093 #else
   1094 #ifdef __linux__
   1095     struct sigcontext *sc = puc;
   1096     unsigned long pc = sc->sigc_regs.tpc;
   1097     void *sigmask = (void *)sc->sigc_mask;
   1098 #elif defined(__OpenBSD__)
   1099     struct sigcontext *uc = puc;
   1100     unsigned long pc = uc->sc_pc;
   1101     void *sigmask = (void *)(long)uc->sc_mask;
   1102 #endif
   1103 #endif
   1104 
   1105     /* XXX: need kernel patch to get write flag faster */
   1106     is_write = 0;
   1107     insn = *(uint32_t *)pc;
   1108     if ((insn >> 30) == 3) {
   1109       switch((insn >> 19) & 0x3f) {
   1110       case 0x05: // stb
   1111       case 0x15: // stba
   1112       case 0x06: // sth
   1113       case 0x16: // stha
   1114       case 0x04: // st
   1115       case 0x14: // sta
   1116       case 0x07: // std
   1117       case 0x17: // stda
   1118       case 0x0e: // stx
   1119       case 0x1e: // stxa
   1120       case 0x24: // stf
   1121       case 0x34: // stfa
   1122       case 0x27: // stdf
   1123       case 0x37: // stdfa
   1124       case 0x26: // stqf
   1125       case 0x36: // stqfa
   1126       case 0x25: // stfsr
   1127       case 0x3c: // casa
   1128       case 0x3e: // casxa
   1129 	is_write = 1;
   1130 	break;
   1131       }
   1132     }
   1133     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1134                              is_write, sigmask, NULL);
   1135 }
   1136 
   1137 #elif defined(__arm__)
   1138 
   1139 int cpu_signal_handler(int host_signum, void *pinfo,
   1140                        void *puc)
   1141 {
   1142     siginfo_t *info = pinfo;
   1143     struct ucontext *uc = puc;
   1144     unsigned long pc;
   1145     int is_write;
   1146 
   1147 #if (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
   1148     pc = uc->uc_mcontext.gregs[R15];
   1149 #else
   1150     pc = uc->uc_mcontext.arm_pc;
   1151 #endif
   1152     /* XXX: compute is_write */
   1153     is_write = 0;
   1154     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1155                              is_write,
   1156                              &uc->uc_sigmask, puc);
   1157 }
   1158 
   1159 #elif defined(__mc68000)
   1160 
   1161 int cpu_signal_handler(int host_signum, void *pinfo,
   1162                        void *puc)
   1163 {
   1164     siginfo_t *info = pinfo;
   1165     struct ucontext *uc = puc;
   1166     unsigned long pc;
   1167     int is_write;
   1168 
   1169     pc = uc->uc_mcontext.gregs[16];
   1170     /* XXX: compute is_write */
   1171     is_write = 0;
   1172     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1173                              is_write,
   1174                              &uc->uc_sigmask, puc);
   1175 }
   1176 
   1177 #elif defined(__ia64)
   1178 
   1179 #ifndef __ISR_VALID
   1180   /* This ought to be in <bits/siginfo.h>... */
   1181 # define __ISR_VALID	1
   1182 #endif
   1183 
   1184 int cpu_signal_handler(int host_signum, void *pinfo, void *puc)
   1185 {
   1186     siginfo_t *info = pinfo;
   1187     struct ucontext *uc = puc;
   1188     unsigned long ip;
   1189     int is_write = 0;
   1190 
   1191     ip = uc->uc_mcontext.sc_ip;
   1192     switch (host_signum) {
   1193       case SIGILL:
   1194       case SIGFPE:
   1195       case SIGSEGV:
   1196       case SIGBUS:
   1197       case SIGTRAP:
   1198 	  if (info->si_code && (info->si_segvflags & __ISR_VALID))
   1199 	      /* ISR.W (write-access) is bit 33:  */
   1200 	      is_write = (info->si_isr >> 33) & 1;
   1201 	  break;
   1202 
   1203       default:
   1204 	  break;
   1205     }
   1206     return handle_cpu_signal(ip, (unsigned long)info->si_addr,
   1207                              is_write,
   1208                              (sigset_t *)&uc->uc_sigmask, puc);
   1209 }
   1210 
   1211 #elif defined(__s390__)
   1212 
   1213 int cpu_signal_handler(int host_signum, void *pinfo,
   1214                        void *puc)
   1215 {
   1216     siginfo_t *info = pinfo;
   1217     struct ucontext *uc = puc;
   1218     unsigned long pc;
   1219     uint16_t *pinsn;
   1220     int is_write = 0;
   1221 
   1222     pc = uc->uc_mcontext.psw.addr;
   1223 
   1224     /* ??? On linux, the non-rt signal handler has 4 (!) arguments instead
   1225        of the normal 2 arguments.  The 3rd argument contains the "int_code"
   1226        from the hardware which does in fact contain the is_write value.
   1227        The rt signal handler, as far as I can tell, does not give this value
   1228        at all.  Not that we could get to it from here even if it were.  */
   1229     /* ??? This is not even close to complete, since it ignores all
   1230        of the read-modify-write instructions.  */
   1231     pinsn = (uint16_t *)pc;
   1232     switch (pinsn[0] >> 8) {
   1233     case 0x50: /* ST */
   1234     case 0x42: /* STC */
   1235     case 0x40: /* STH */
   1236         is_write = 1;
   1237         break;
   1238     case 0xc4: /* RIL format insns */
   1239         switch (pinsn[0] & 0xf) {
   1240         case 0xf: /* STRL */
   1241         case 0xb: /* STGRL */
   1242         case 0x7: /* STHRL */
   1243             is_write = 1;
   1244         }
   1245         break;
   1246     case 0xe3: /* RXY format insns */
   1247         switch (pinsn[2] & 0xff) {
   1248         case 0x50: /* STY */
   1249         case 0x24: /* STG */
   1250         case 0x72: /* STCY */
   1251         case 0x70: /* STHY */
   1252         case 0x8e: /* STPQ */
   1253         case 0x3f: /* STRVH */
   1254         case 0x3e: /* STRV */
   1255         case 0x2f: /* STRVG */
   1256             is_write = 1;
   1257         }
   1258         break;
   1259     }
   1260     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1261                              is_write, &uc->uc_sigmask, puc);
   1262 }
   1263 
   1264 #elif defined(__mips__)
   1265 
   1266 int cpu_signal_handler(int host_signum, void *pinfo,
   1267                        void *puc)
   1268 {
   1269     siginfo_t *info = pinfo;
   1270     struct ucontext *uc = puc;
   1271     greg_t pc = uc->uc_mcontext.pc;
   1272     int is_write;
   1273 
   1274     /* XXX: compute is_write */
   1275     is_write = 0;
   1276     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1277                              is_write, &uc->uc_sigmask, puc);
   1278 }
   1279 
   1280 #elif defined(__hppa__)
   1281 
   1282 int cpu_signal_handler(int host_signum, void *pinfo,
   1283                        void *puc)
   1284 {
   1285     struct siginfo *info = pinfo;
   1286     struct ucontext *uc = puc;
   1287     unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
   1288     uint32_t insn = *(uint32_t *)pc;
   1289     int is_write = 0;
   1290 
   1291     /* XXX: need kernel patch to get write flag faster.  */
   1292     switch (insn >> 26) {
   1293     case 0x1a: /* STW */
   1294     case 0x19: /* STH */
   1295     case 0x18: /* STB */
   1296     case 0x1b: /* STWM */
   1297         is_write = 1;
   1298         break;
   1299 
   1300     case 0x09: /* CSTWX, FSTWX, FSTWS */
   1301     case 0x0b: /* CSTDX, FSTDX, FSTDS */
   1302         /* Distinguish from coprocessor load ... */
   1303         is_write = (insn >> 9) & 1;
   1304         break;
   1305 
   1306     case 0x03:
   1307         switch ((insn >> 6) & 15) {
   1308         case 0xa: /* STWS */
   1309         case 0x9: /* STHS */
   1310         case 0x8: /* STBS */
   1311         case 0xe: /* STWAS */
   1312         case 0xc: /* STBYS */
   1313             is_write = 1;
   1314         }
   1315         break;
   1316     }
   1317 
   1318     return handle_cpu_signal(pc, (unsigned long)info->si_addr,
   1319                              is_write, &uc->uc_sigmask, puc);
   1320 }
   1321 
   1322 #else
   1323 
   1324 #error host CPU specific signal handler needed
   1325 
   1326 #endif
   1327 
   1328 #endif /* !defined(CONFIG_SOFTMMU) */
   1329