Home | History | Annotate | Download | only in target-mips
      1 /*
      2  *  MIPS emulation helpers for qemu.
      3  *
      4  *  Copyright (c) 2004-2005 Jocelyn Mayer
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 #include <stdlib.h>
     20 #include "exec.h"
     21 
     22 #include "host-utils.h"
     23 
     24 #include "helper.h"
     25 /*****************************************************************************/
     26 /* Exceptions processing helpers */
     27 
     28 void helper_raise_exception_err (uint32_t exception, int error_code)
     29 {
     30 #if 1
     31     if (exception < 0x100)
     32         qemu_log("%s: %d %d\n", __func__, exception, error_code);
     33 #endif
     34     env->exception_index = exception;
     35     env->error_code = error_code;
     36     cpu_loop_exit();
     37 }
     38 
     39 void helper_raise_exception (uint32_t exception)
     40 {
     41     helper_raise_exception_err(exception, 0);
     42 }
     43 
     44 void helper_interrupt_restart (void)
     45 {
     46     if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
     47         !(env->CP0_Status & (1 << CP0St_ERL)) &&
     48         !(env->hflags & MIPS_HFLAG_DM) &&
     49         (env->CP0_Status & (1 << CP0St_IE)) &&
     50         (env->CP0_Status & env->CP0_Cause & CP0Ca_IP_mask)) {
     51         env->CP0_Cause &= ~(0x1f << CP0Ca_EC);
     52         helper_raise_exception(EXCP_EXT_INTERRUPT);
     53     }
     54 }
     55 
     56 #if !defined(CONFIG_USER_ONLY)
     57 static void do_restore_state (void *pc_ptr)
     58 {
     59     TranslationBlock *tb;
     60     unsigned long pc = (unsigned long) pc_ptr;
     61 
     62     tb = tb_find_pc (pc);
     63     if (tb) {
     64         cpu_restore_state (tb, env, pc);
     65     }
     66 }
     67 #endif
     68 
     69 #if defined(CONFIG_USER_ONLY)
     70 #define HELPER_LD(name, insn, type)                                     \
     71 static inline type do_##name(target_ulong addr, int mem_idx)            \
     72 {                                                                       \
     73     return (type) insn##_raw(addr);                                     \
     74 }
     75 #else
     76 #define HELPER_LD(name, insn, type)                                     \
     77 static inline type do_##name(target_ulong addr, int mem_idx)            \
     78 {                                                                       \
     79     switch (mem_idx)                                                    \
     80     {                                                                   \
     81     case 0: return (type) insn##_kernel(addr); break;                   \
     82     case 1: return (type) insn##_super(addr); break;                    \
     83     default:                                                            \
     84     case 2: return (type) insn##_user(addr); break;                     \
     85     }                                                                   \
     86 }
     87 #endif
     88 HELPER_LD(lbu, ldub, uint8_t)
     89 HELPER_LD(lw, ldl, int32_t)
     90 #ifdef TARGET_MIPS64
     91 HELPER_LD(ld, ldq, int64_t)
     92 #endif
     93 #undef HELPER_LD
     94 
     95 #if defined(CONFIG_USER_ONLY)
     96 #define HELPER_ST(name, insn, type)                                     \
     97 static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
     98 {                                                                       \
     99     insn##_raw(addr, val);                                              \
    100 }
    101 #else
    102 #define HELPER_ST(name, insn, type)                                     \
    103 static inline void do_##name(target_ulong addr, type val, int mem_idx)  \
    104 {                                                                       \
    105     switch (mem_idx)                                                    \
    106     {                                                                   \
    107     case 0: insn##_kernel(addr, val); break;                            \
    108     case 1: insn##_super(addr, val); break;                             \
    109     default:                                                            \
    110     case 2: insn##_user(addr, val); break;                              \
    111     }                                                                   \
    112 }
    113 #endif
    114 HELPER_ST(sb, stb, uint8_t)
    115 HELPER_ST(sw, stl, uint32_t)
    116 #ifdef TARGET_MIPS64
    117 HELPER_ST(sd, stq, uint64_t)
    118 #endif
    119 #undef HELPER_ST
    120 
    121 target_ulong helper_clo (target_ulong arg1)
    122 {
    123     return clo32(arg1);
    124 }
    125 
    126 target_ulong helper_clz (target_ulong arg1)
    127 {
    128     return clz32(arg1);
    129 }
    130 
    131 #if defined(TARGET_MIPS64)
    132 target_ulong helper_dclo (target_ulong arg1)
    133 {
    134     return clo64(arg1);
    135 }
    136 
    137 target_ulong helper_dclz (target_ulong arg1)
    138 {
    139     return clz64(arg1);
    140 }
    141 #endif /* TARGET_MIPS64 */
    142 
    143 /* 64 bits arithmetic for 32 bits hosts */
    144 static inline uint64_t get_HILO (void)
    145 {
    146     return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
    147 }
    148 
    149 static inline void set_HILO (uint64_t HILO)
    150 {
    151     env->active_tc.LO[0] = (int32_t)HILO;
    152     env->active_tc.HI[0] = (int32_t)(HILO >> 32);
    153 }
    154 
    155 static inline void set_HIT0_LO (target_ulong arg1, uint64_t HILO)
    156 {
    157     env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
    158     arg1 = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
    159 }
    160 
    161 static inline void set_HI_LOT0 (target_ulong arg1, uint64_t HILO)
    162 {
    163     arg1 = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
    164     env->active_tc.HI[0] = (int32_t)(HILO >> 32);
    165 }
    166 
    167 /* Multiplication variants of the vr54xx. */
    168 target_ulong helper_muls (target_ulong arg1, target_ulong arg2)
    169 {
    170     set_HI_LOT0(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
    171 
    172     return arg1;
    173 }
    174 
    175 target_ulong helper_mulsu (target_ulong arg1, target_ulong arg2)
    176 {
    177     set_HI_LOT0(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
    178 
    179     return arg1;
    180 }
    181 
    182 target_ulong helper_macc (target_ulong arg1, target_ulong arg2)
    183 {
    184     set_HI_LOT0(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
    185 
    186     return arg1;
    187 }
    188 
    189 target_ulong helper_macchi (target_ulong arg1, target_ulong arg2)
    190 {
    191     set_HIT0_LO(arg1, ((int64_t)get_HILO()) + ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
    192 
    193     return arg1;
    194 }
    195 
    196 target_ulong helper_maccu (target_ulong arg1, target_ulong arg2)
    197 {
    198     set_HI_LOT0(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
    199 
    200     return arg1;
    201 }
    202 
    203 target_ulong helper_macchiu (target_ulong arg1, target_ulong arg2)
    204 {
    205     set_HIT0_LO(arg1, ((uint64_t)get_HILO()) + ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
    206 
    207     return arg1;
    208 }
    209 
    210 target_ulong helper_msac (target_ulong arg1, target_ulong arg2)
    211 {
    212     set_HI_LOT0(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
    213 
    214     return arg1;
    215 }
    216 
    217 target_ulong helper_msachi (target_ulong arg1, target_ulong arg2)
    218 {
    219     set_HIT0_LO(arg1, ((int64_t)get_HILO()) - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
    220 
    221     return arg1;
    222 }
    223 
    224 target_ulong helper_msacu (target_ulong arg1, target_ulong arg2)
    225 {
    226     set_HI_LOT0(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
    227 
    228     return arg1;
    229 }
    230 
    231 target_ulong helper_msachiu (target_ulong arg1, target_ulong arg2)
    232 {
    233     set_HIT0_LO(arg1, ((uint64_t)get_HILO()) - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
    234 
    235     return arg1;
    236 }
    237 
    238 target_ulong helper_mulhi (target_ulong arg1, target_ulong arg2)
    239 {
    240     set_HIT0_LO(arg1, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
    241 
    242     return arg1;
    243 }
    244 
    245 target_ulong helper_mulhiu (target_ulong arg1, target_ulong arg2)
    246 {
    247     set_HIT0_LO(arg1, (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
    248 
    249     return arg1;
    250 }
    251 
    252 target_ulong helper_mulshi (target_ulong arg1, target_ulong arg2)
    253 {
    254     set_HIT0_LO(arg1, 0 - ((int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2));
    255 
    256     return arg1;
    257 }
    258 
    259 target_ulong helper_mulshiu (target_ulong arg1, target_ulong arg2)
    260 {
    261     set_HIT0_LO(arg1, 0 - ((uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2));
    262 
    263     return arg1;
    264 }
    265 
    266 #ifdef TARGET_MIPS64
    267 void helper_dmult (target_ulong arg1, target_ulong arg2)
    268 {
    269     muls64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
    270 }
    271 
    272 void helper_dmultu (target_ulong arg1, target_ulong arg2)
    273 {
    274     mulu64(&(env->active_tc.LO[0]), &(env->active_tc.HI[0]), arg1, arg2);
    275 }
    276 #endif
    277 
    278 #ifndef CONFIG_USER_ONLY
    279 
    280 static inline target_phys_addr_t do_translate_address(target_ulong address, int rw)
    281 {
    282     target_phys_addr_t lladdr;
    283 
    284     lladdr = cpu_mips_translate_address(env, address, rw);
    285 
    286     if (lladdr == -1LL) {
    287         cpu_loop_exit();
    288     } else {
    289         return lladdr;
    290     }
    291 }
    292 
    293 #define HELPER_LD_ATOMIC(name, insn)                                          \
    294 target_ulong helper_##name(target_ulong arg, int mem_idx)                     \
    295 {                                                                             \
    296     env->lladdr = do_translate_address(arg, 0);                               \
    297     env->llval = do_##insn(arg, mem_idx);                                     \
    298     return env->llval;                                                        \
    299 }
    300 HELPER_LD_ATOMIC(ll, lw)
    301 #ifdef TARGET_MIPS64
    302 HELPER_LD_ATOMIC(lld, ld)
    303 #endif
    304 #undef HELPER_LD_ATOMIC
    305 
    306 #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask)                      \
    307 target_ulong helper_##name(target_ulong arg1, target_ulong arg2, int mem_idx) \
    308 {                                                                             \
    309     target_long tmp;                                                          \
    310                                                                               \
    311     if (arg2 & almask) {                                                      \
    312         env->CP0_BadVAddr = arg2;                                             \
    313         helper_raise_exception(EXCP_AdES);                                    \
    314     }                                                                         \
    315     if (do_translate_address(arg2, 1) == env->lladdr) {                       \
    316         tmp = do_##ld_insn(arg2, mem_idx);                                    \
    317         if (tmp == env->llval) {                                              \
    318             do_##st_insn(arg2, arg1, mem_idx);                                \
    319             return 1;                                                         \
    320         }                                                                     \
    321     }                                                                         \
    322     return 0;                                                                 \
    323 }
    324 HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
    325 #ifdef TARGET_MIPS64
    326 HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
    327 #endif
    328 #undef HELPER_ST_ATOMIC
    329 #endif
    330 
    331 #ifdef TARGET_WORDS_BIGENDIAN
    332 #define GET_LMASK(v) ((v) & 3)
    333 #define GET_OFFSET(addr, offset) (addr + (offset))
    334 #else
    335 #define GET_LMASK(v) (((v) & 3) ^ 3)
    336 #define GET_OFFSET(addr, offset) (addr - (offset))
    337 #endif
    338 
    339 target_ulong helper_lwl(target_ulong arg1, target_ulong arg2, int mem_idx)
    340 {
    341     target_ulong tmp;
    342 
    343     tmp = do_lbu(arg2, mem_idx);
    344     arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
    345 
    346     if (GET_LMASK(arg2) <= 2) {
    347         tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
    348         arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
    349     }
    350 
    351     if (GET_LMASK(arg2) <= 1) {
    352         tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
    353         arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
    354     }
    355 
    356     if (GET_LMASK(arg2) == 0) {
    357         tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
    358         arg1 = (arg1 & 0xFFFFFF00) | tmp;
    359     }
    360     return (int32_t)arg1;
    361 }
    362 
    363 target_ulong helper_lwr(target_ulong arg1, target_ulong arg2, int mem_idx)
    364 {
    365     target_ulong tmp;
    366 
    367     tmp = do_lbu(arg2, mem_idx);
    368     arg1 = (arg1 & 0xFFFFFF00) | tmp;
    369 
    370     if (GET_LMASK(arg2) >= 1) {
    371         tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
    372         arg1 = (arg1 & 0xFFFF00FF) | (tmp << 8);
    373     }
    374 
    375     if (GET_LMASK(arg2) >= 2) {
    376         tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
    377         arg1 = (arg1 & 0xFF00FFFF) | (tmp << 16);
    378     }
    379 
    380     if (GET_LMASK(arg2) == 3) {
    381         tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
    382         arg1 = (arg1 & 0x00FFFFFF) | (tmp << 24);
    383     }
    384     return (int32_t)arg1;
    385 }
    386 
    387 void helper_swl(target_ulong arg1, target_ulong arg2, int mem_idx)
    388 {
    389     do_sb(arg2, (uint8_t)(arg1 >> 24), mem_idx);
    390 
    391     if (GET_LMASK(arg2) <= 2)
    392         do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
    393 
    394     if (GET_LMASK(arg2) <= 1)
    395         do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
    396 
    397     if (GET_LMASK(arg2) == 0)
    398         do_sb(GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
    399 }
    400 
    401 void helper_swr(target_ulong arg1, target_ulong arg2, int mem_idx)
    402 {
    403     do_sb(arg2, (uint8_t)arg1, mem_idx);
    404 
    405     if (GET_LMASK(arg2) >= 1)
    406         do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
    407 
    408     if (GET_LMASK(arg2) >= 2)
    409         do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
    410 
    411     if (GET_LMASK(arg2) == 3)
    412         do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
    413 }
    414 
    415 #if defined(TARGET_MIPS64)
    416 /* "half" load and stores.  We must do the memory access inline,
    417    or fault handling won't work.  */
    418 
    419 #ifdef TARGET_WORDS_BIGENDIAN
    420 #define GET_LMASK64(v) ((v) & 7)
    421 #else
    422 #define GET_LMASK64(v) (((v) & 7) ^ 7)
    423 #endif
    424 
    425 target_ulong helper_ldl(target_ulong arg1, target_ulong arg2, int mem_idx)
    426 {
    427     uint64_t tmp;
    428 
    429     tmp = do_lbu(arg2, mem_idx);
    430     arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
    431 
    432     if (GET_LMASK64(arg2) <= 6) {
    433         tmp = do_lbu(GET_OFFSET(arg2, 1), mem_idx);
    434         arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
    435     }
    436 
    437     if (GET_LMASK64(arg2) <= 5) {
    438         tmp = do_lbu(GET_OFFSET(arg2, 2), mem_idx);
    439         arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
    440     }
    441 
    442     if (GET_LMASK64(arg2) <= 4) {
    443         tmp = do_lbu(GET_OFFSET(arg2, 3), mem_idx);
    444         arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
    445     }
    446 
    447     if (GET_LMASK64(arg2) <= 3) {
    448         tmp = do_lbu(GET_OFFSET(arg2, 4), mem_idx);
    449         arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
    450     }
    451 
    452     if (GET_LMASK64(arg2) <= 2) {
    453         tmp = do_lbu(GET_OFFSET(arg2, 5), mem_idx);
    454         arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
    455     }
    456 
    457     if (GET_LMASK64(arg2) <= 1) {
    458         tmp = do_lbu(GET_OFFSET(arg2, 6), mem_idx);
    459         arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp << 8);
    460     }
    461 
    462     if (GET_LMASK64(arg2) == 0) {
    463         tmp = do_lbu(GET_OFFSET(arg2, 7), mem_idx);
    464         arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
    465     }
    466 
    467     return arg1;
    468 }
    469 
    470 target_ulong helper_ldr(target_ulong arg1, target_ulong arg2, int mem_idx)
    471 {
    472     uint64_t tmp;
    473 
    474     tmp = do_lbu(arg2, mem_idx);
    475     arg1 = (arg1 & 0xFFFFFFFFFFFFFF00ULL) | tmp;
    476 
    477     if (GET_LMASK64(arg2) >= 1) {
    478         tmp = do_lbu(GET_OFFSET(arg2, -1), mem_idx);
    479         arg1 = (arg1 & 0xFFFFFFFFFFFF00FFULL) | (tmp  << 8);
    480     }
    481 
    482     if (GET_LMASK64(arg2) >= 2) {
    483         tmp = do_lbu(GET_OFFSET(arg2, -2), mem_idx);
    484         arg1 = (arg1 & 0xFFFFFFFFFF00FFFFULL) | (tmp << 16);
    485     }
    486 
    487     if (GET_LMASK64(arg2) >= 3) {
    488         tmp = do_lbu(GET_OFFSET(arg2, -3), mem_idx);
    489         arg1 = (arg1 & 0xFFFFFFFF00FFFFFFULL) | (tmp << 24);
    490     }
    491 
    492     if (GET_LMASK64(arg2) >= 4) {
    493         tmp = do_lbu(GET_OFFSET(arg2, -4), mem_idx);
    494         arg1 = (arg1 & 0xFFFFFF00FFFFFFFFULL) | (tmp << 32);
    495     }
    496 
    497     if (GET_LMASK64(arg2) >= 5) {
    498         tmp = do_lbu(GET_OFFSET(arg2, -5), mem_idx);
    499         arg1 = (arg1 & 0xFFFF00FFFFFFFFFFULL) | (tmp << 40);
    500     }
    501 
    502     if (GET_LMASK64(arg2) >= 6) {
    503         tmp = do_lbu(GET_OFFSET(arg2, -6), mem_idx);
    504         arg1 = (arg1 & 0xFF00FFFFFFFFFFFFULL) | (tmp << 48);
    505     }
    506 
    507     if (GET_LMASK64(arg2) == 7) {
    508         tmp = do_lbu(GET_OFFSET(arg2, -7), mem_idx);
    509         arg1 = (arg1 & 0x00FFFFFFFFFFFFFFULL) | (tmp << 56);
    510     }
    511 
    512     return arg1;
    513 }
    514 
    515 void helper_sdl(target_ulong arg1, target_ulong arg2, int mem_idx)
    516 {
    517     do_sb(arg2, (uint8_t)(arg1 >> 56), mem_idx);
    518 
    519     if (GET_LMASK64(arg2) <= 6)
    520         do_sb(GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
    521 
    522     if (GET_LMASK64(arg2) <= 5)
    523         do_sb(GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
    524 
    525     if (GET_LMASK64(arg2) <= 4)
    526         do_sb(GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
    527 
    528     if (GET_LMASK64(arg2) <= 3)
    529         do_sb(GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
    530 
    531     if (GET_LMASK64(arg2) <= 2)
    532         do_sb(GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
    533 
    534     if (GET_LMASK64(arg2) <= 1)
    535         do_sb(GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
    536 
    537     if (GET_LMASK64(arg2) <= 0)
    538         do_sb(GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
    539 }
    540 
    541 void helper_sdr(target_ulong arg1, target_ulong arg2, int mem_idx)
    542 {
    543     do_sb(arg2, (uint8_t)arg1, mem_idx);
    544 
    545     if (GET_LMASK64(arg2) >= 1)
    546         do_sb(GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
    547 
    548     if (GET_LMASK64(arg2) >= 2)
    549         do_sb(GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
    550 
    551     if (GET_LMASK64(arg2) >= 3)
    552         do_sb(GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
    553 
    554     if (GET_LMASK64(arg2) >= 4)
    555         do_sb(GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
    556 
    557     if (GET_LMASK64(arg2) >= 5)
    558         do_sb(GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
    559 
    560     if (GET_LMASK64(arg2) >= 6)
    561         do_sb(GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
    562 
    563     if (GET_LMASK64(arg2) == 7)
    564         do_sb(GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
    565 }
    566 #endif /* TARGET_MIPS64 */
    567 
    568 #ifndef CONFIG_USER_ONLY
    569 /* CP0 helpers */
    570 target_ulong helper_mfc0_mvpcontrol (void)
    571 {
    572     return env->mvp->CP0_MVPControl;
    573 }
    574 
    575 target_ulong helper_mfc0_mvpconf0 (void)
    576 {
    577     return env->mvp->CP0_MVPConf0;
    578 }
    579 
    580 target_ulong helper_mfc0_mvpconf1 (void)
    581 {
    582     return env->mvp->CP0_MVPConf1;
    583 }
    584 
    585 target_ulong helper_mfc0_random (void)
    586 {
    587     return (int32_t)cpu_mips_get_random(env);
    588 }
    589 
    590 target_ulong helper_mfc0_tcstatus (void)
    591 {
    592     return env->active_tc.CP0_TCStatus;
    593 }
    594 
    595 target_ulong helper_mftc0_tcstatus(void)
    596 {
    597     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    598 
    599     if (other_tc == env->current_tc)
    600         return env->active_tc.CP0_TCStatus;
    601     else
    602         return env->tcs[other_tc].CP0_TCStatus;
    603 }
    604 
    605 target_ulong helper_mfc0_tcbind (void)
    606 {
    607     return env->active_tc.CP0_TCBind;
    608 }
    609 
    610 target_ulong helper_mftc0_tcbind(void)
    611 {
    612     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    613 
    614     if (other_tc == env->current_tc)
    615         return env->active_tc.CP0_TCBind;
    616     else
    617         return env->tcs[other_tc].CP0_TCBind;
    618 }
    619 
    620 target_ulong helper_mfc0_tcrestart (void)
    621 {
    622     return env->active_tc.PC;
    623 }
    624 
    625 target_ulong helper_mftc0_tcrestart(void)
    626 {
    627     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    628 
    629     if (other_tc == env->current_tc)
    630         return env->active_tc.PC;
    631     else
    632         return env->tcs[other_tc].PC;
    633 }
    634 
    635 target_ulong helper_mfc0_tchalt (void)
    636 {
    637     return env->active_tc.CP0_TCHalt;
    638 }
    639 
    640 target_ulong helper_mftc0_tchalt(void)
    641 {
    642     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    643 
    644     if (other_tc == env->current_tc)
    645         return env->active_tc.CP0_TCHalt;
    646     else
    647         return env->tcs[other_tc].CP0_TCHalt;
    648 }
    649 
    650 target_ulong helper_mfc0_tccontext (void)
    651 {
    652     return env->active_tc.CP0_TCContext;
    653 }
    654 
    655 target_ulong helper_mftc0_tccontext(void)
    656 {
    657     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    658 
    659     if (other_tc == env->current_tc)
    660         return env->active_tc.CP0_TCContext;
    661     else
    662         return env->tcs[other_tc].CP0_TCContext;
    663 }
    664 
    665 target_ulong helper_mfc0_tcschedule (void)
    666 {
    667     return env->active_tc.CP0_TCSchedule;
    668 }
    669 
    670 target_ulong helper_mftc0_tcschedule(void)
    671 {
    672     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    673 
    674     if (other_tc == env->current_tc)
    675         return env->active_tc.CP0_TCSchedule;
    676     else
    677         return env->tcs[other_tc].CP0_TCSchedule;
    678 }
    679 
    680 target_ulong helper_mfc0_tcschefback (void)
    681 {
    682     return env->active_tc.CP0_TCScheFBack;
    683 }
    684 
    685 target_ulong helper_mftc0_tcschefback(void)
    686 {
    687     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    688 
    689     if (other_tc == env->current_tc)
    690         return env->active_tc.CP0_TCScheFBack;
    691     else
    692         return env->tcs[other_tc].CP0_TCScheFBack;
    693 }
    694 
    695 target_ulong helper_mfc0_count (void)
    696 {
    697     return (int32_t)cpu_mips_get_count(env);
    698 }
    699 
    700 target_ulong helper_mftc0_entryhi(void)
    701 {
    702     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    703     int32_t tcstatus;
    704 
    705     if (other_tc == env->current_tc)
    706         tcstatus = env->active_tc.CP0_TCStatus;
    707     else
    708         tcstatus = env->tcs[other_tc].CP0_TCStatus;
    709 
    710     return (env->CP0_EntryHi & ~0xff) | (tcstatus & 0xff);
    711 }
    712 
    713 target_ulong helper_mftc0_status(void)
    714 {
    715     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    716     target_ulong t0;
    717     int32_t tcstatus;
    718 
    719     if (other_tc == env->current_tc)
    720         tcstatus = env->active_tc.CP0_TCStatus;
    721     else
    722         tcstatus = env->tcs[other_tc].CP0_TCStatus;
    723 
    724     t0 = env->CP0_Status & ~0xf1000018;
    725     t0 |= tcstatus & (0xf << CP0TCSt_TCU0);
    726     t0 |= (tcstatus & (1 << CP0TCSt_TMX)) >> (CP0TCSt_TMX - CP0St_MX);
    727     t0 |= (tcstatus & (0x3 << CP0TCSt_TKSU)) >> (CP0TCSt_TKSU - CP0St_KSU);
    728 
    729     return t0;
    730 }
    731 
    732 target_ulong helper_mfc0_lladdr (void)
    733 {
    734     return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
    735 }
    736 
    737 target_ulong helper_mfc0_watchlo (uint32_t sel)
    738 {
    739     return (int32_t)env->CP0_WatchLo[sel];
    740 }
    741 
    742 target_ulong helper_mfc0_watchhi (uint32_t sel)
    743 {
    744     return env->CP0_WatchHi[sel];
    745 }
    746 
    747 target_ulong helper_mfc0_debug (void)
    748 {
    749     target_ulong t0 = env->CP0_Debug;
    750     if (env->hflags & MIPS_HFLAG_DM)
    751         t0 |= 1 << CP0DB_DM;
    752 
    753     return t0;
    754 }
    755 
    756 target_ulong helper_mftc0_debug(void)
    757 {
    758     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    759     int32_t tcstatus;
    760 
    761     if (other_tc == env->current_tc)
    762         tcstatus = env->active_tc.CP0_Debug_tcstatus;
    763     else
    764         tcstatus = env->tcs[other_tc].CP0_Debug_tcstatus;
    765 
    766     /* XXX: Might be wrong, check with EJTAG spec. */
    767     return (env->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
    768             (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
    769 }
    770 
    771 #if defined(TARGET_MIPS64)
    772 target_ulong helper_dmfc0_tcrestart (void)
    773 {
    774     return env->active_tc.PC;
    775 }
    776 
    777 target_ulong helper_dmfc0_tchalt (void)
    778 {
    779     return env->active_tc.CP0_TCHalt;
    780 }
    781 
    782 target_ulong helper_dmfc0_tccontext (void)
    783 {
    784     return env->active_tc.CP0_TCContext;
    785 }
    786 
    787 target_ulong helper_dmfc0_tcschedule (void)
    788 {
    789     return env->active_tc.CP0_TCSchedule;
    790 }
    791 
    792 target_ulong helper_dmfc0_tcschefback (void)
    793 {
    794     return env->active_tc.CP0_TCScheFBack;
    795 }
    796 
    797 target_ulong helper_dmfc0_lladdr (void)
    798 {
    799     return env->lladdr >> env->CP0_LLAddr_shift;
    800 }
    801 
    802 target_ulong helper_dmfc0_watchlo (uint32_t sel)
    803 {
    804     return env->CP0_WatchLo[sel];
    805 }
    806 #endif /* TARGET_MIPS64 */
    807 
    808 void helper_mtc0_index (target_ulong arg1)
    809 {
    810     int num = 1;
    811     unsigned int tmp = env->tlb->nb_tlb;
    812 
    813     do {
    814         tmp >>= 1;
    815         num <<= 1;
    816     } while (tmp);
    817     env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
    818 }
    819 
    820 void helper_mtc0_mvpcontrol (target_ulong arg1)
    821 {
    822     uint32_t mask = 0;
    823     uint32_t newval;
    824 
    825     if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
    826         mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
    827                 (1 << CP0MVPCo_EVP);
    828     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
    829         mask |= (1 << CP0MVPCo_STLB);
    830     newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
    831 
    832     // TODO: Enable/disable shared TLB, enable/disable VPEs.
    833 
    834     env->mvp->CP0_MVPControl = newval;
    835 }
    836 
    837 void helper_mtc0_vpecontrol (target_ulong arg1)
    838 {
    839     uint32_t mask;
    840     uint32_t newval;
    841 
    842     mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
    843            (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
    844     newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
    845 
    846     /* Yield scheduler intercept not implemented. */
    847     /* Gating storage scheduler intercept not implemented. */
    848 
    849     // TODO: Enable/disable TCs.
    850 
    851     env->CP0_VPEControl = newval;
    852 }
    853 
    854 void helper_mtc0_vpeconf0 (target_ulong arg1)
    855 {
    856     uint32_t mask = 0;
    857     uint32_t newval;
    858 
    859     if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
    860         if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
    861             mask |= (0xff << CP0VPEC0_XTC);
    862         mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
    863     }
    864     newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
    865 
    866     // TODO: TC exclusive handling due to ERL/EXL.
    867 
    868     env->CP0_VPEConf0 = newval;
    869 }
    870 
    871 void helper_mtc0_vpeconf1 (target_ulong arg1)
    872 {
    873     uint32_t mask = 0;
    874     uint32_t newval;
    875 
    876     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
    877         mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
    878                 (0xff << CP0VPEC1_NCP1);
    879     newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
    880 
    881     /* UDI not implemented. */
    882     /* CP2 not implemented. */
    883 
    884     // TODO: Handle FPU (CP1) binding.
    885 
    886     env->CP0_VPEConf1 = newval;
    887 }
    888 
    889 void helper_mtc0_yqmask (target_ulong arg1)
    890 {
    891     /* Yield qualifier inputs not implemented. */
    892     env->CP0_YQMask = 0x00000000;
    893 }
    894 
    895 void helper_mtc0_vpeopt (target_ulong arg1)
    896 {
    897     env->CP0_VPEOpt = arg1 & 0x0000ffff;
    898 }
    899 
    900 void helper_mtc0_entrylo0 (target_ulong arg1)
    901 {
    902     /* Large physaddr (PABITS) not implemented */
    903     /* 1k pages not implemented */
    904     env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
    905 }
    906 
    907 void helper_mtc0_tcstatus (target_ulong arg1)
    908 {
    909     uint32_t mask = env->CP0_TCStatus_rw_bitmask;
    910     uint32_t newval;
    911 
    912     newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
    913 
    914     // TODO: Sync with CP0_Status.
    915 
    916     env->active_tc.CP0_TCStatus = newval;
    917 }
    918 
    919 void helper_mttc0_tcstatus (target_ulong arg1)
    920 {
    921     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    922 
    923     // TODO: Sync with CP0_Status.
    924 
    925     if (other_tc == env->current_tc)
    926         env->active_tc.CP0_TCStatus = arg1;
    927     else
    928         env->tcs[other_tc].CP0_TCStatus = arg1;
    929 }
    930 
    931 void helper_mtc0_tcbind (target_ulong arg1)
    932 {
    933     uint32_t mask = (1 << CP0TCBd_TBE);
    934     uint32_t newval;
    935 
    936     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
    937         mask |= (1 << CP0TCBd_CurVPE);
    938     newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
    939     env->active_tc.CP0_TCBind = newval;
    940 }
    941 
    942 void helper_mttc0_tcbind (target_ulong arg1)
    943 {
    944     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    945     uint32_t mask = (1 << CP0TCBd_TBE);
    946     uint32_t newval;
    947 
    948     if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
    949         mask |= (1 << CP0TCBd_CurVPE);
    950     if (other_tc == env->current_tc) {
    951         newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
    952         env->active_tc.CP0_TCBind = newval;
    953     } else {
    954         newval = (env->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
    955         env->tcs[other_tc].CP0_TCBind = newval;
    956     }
    957 }
    958 
    959 void helper_mtc0_tcrestart (target_ulong arg1)
    960 {
    961     env->active_tc.PC = arg1;
    962     env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
    963     env->lladdr = 0ULL;
    964     /* MIPS16 not implemented. */
    965 }
    966 
    967 void helper_mttc0_tcrestart (target_ulong arg1)
    968 {
    969     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    970 
    971     if (other_tc == env->current_tc) {
    972         env->active_tc.PC = arg1;
    973         env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
    974         env->lladdr = 0ULL;
    975         /* MIPS16 not implemented. */
    976     } else {
    977         env->tcs[other_tc].PC = arg1;
    978         env->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
    979         env->lladdr = 0ULL;
    980         /* MIPS16 not implemented. */
    981     }
    982 }
    983 
    984 void helper_mtc0_tchalt (target_ulong arg1)
    985 {
    986     env->active_tc.CP0_TCHalt = arg1 & 0x1;
    987 
    988     // TODO: Halt TC / Restart (if allocated+active) TC.
    989 }
    990 
    991 void helper_mttc0_tchalt (target_ulong arg1)
    992 {
    993     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
    994 
    995     // TODO: Halt TC / Restart (if allocated+active) TC.
    996 
    997     if (other_tc == env->current_tc)
    998         env->active_tc.CP0_TCHalt = arg1;
    999     else
   1000         env->tcs[other_tc].CP0_TCHalt = arg1;
   1001 }
   1002 
   1003 void helper_mtc0_tccontext (target_ulong arg1)
   1004 {
   1005     env->active_tc.CP0_TCContext = arg1;
   1006 }
   1007 
   1008 void helper_mttc0_tccontext (target_ulong arg1)
   1009 {
   1010     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1011 
   1012     if (other_tc == env->current_tc)
   1013         env->active_tc.CP0_TCContext = arg1;
   1014     else
   1015         env->tcs[other_tc].CP0_TCContext = arg1;
   1016 }
   1017 
   1018 void helper_mtc0_tcschedule (target_ulong arg1)
   1019 {
   1020     env->active_tc.CP0_TCSchedule = arg1;
   1021 }
   1022 
   1023 void helper_mttc0_tcschedule (target_ulong arg1)
   1024 {
   1025     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1026 
   1027     if (other_tc == env->current_tc)
   1028         env->active_tc.CP0_TCSchedule = arg1;
   1029     else
   1030         env->tcs[other_tc].CP0_TCSchedule = arg1;
   1031 }
   1032 
   1033 void helper_mtc0_tcschefback (target_ulong arg1)
   1034 {
   1035     env->active_tc.CP0_TCScheFBack = arg1;
   1036 }
   1037 
   1038 void helper_mttc0_tcschefback (target_ulong arg1)
   1039 {
   1040     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1041 
   1042     if (other_tc == env->current_tc)
   1043         env->active_tc.CP0_TCScheFBack = arg1;
   1044     else
   1045         env->tcs[other_tc].CP0_TCScheFBack = arg1;
   1046 }
   1047 
   1048 void helper_mtc0_entrylo1 (target_ulong arg1)
   1049 {
   1050     /* Large physaddr (PABITS) not implemented */
   1051     /* 1k pages not implemented */
   1052     env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
   1053 }
   1054 
   1055 void helper_mtc0_context (target_ulong arg1)
   1056 {
   1057     env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
   1058 }
   1059 
   1060 void helper_mtc0_pagemask (target_ulong arg1)
   1061 {
   1062     /* 1k pages not implemented */
   1063     env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
   1064 }
   1065 
   1066 void helper_mtc0_pagegrain (target_ulong arg1)
   1067 {
   1068     /* SmartMIPS not implemented */
   1069     /* Large physaddr (PABITS) not implemented */
   1070     /* 1k pages not implemented */
   1071     env->CP0_PageGrain = 0;
   1072 }
   1073 
   1074 void helper_mtc0_wired (target_ulong arg1)
   1075 {
   1076     env->CP0_Wired = arg1 % env->tlb->nb_tlb;
   1077 }
   1078 
   1079 void helper_mtc0_srsconf0 (target_ulong arg1)
   1080 {
   1081     env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
   1082 }
   1083 
   1084 void helper_mtc0_srsconf1 (target_ulong arg1)
   1085 {
   1086     env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
   1087 }
   1088 
   1089 void helper_mtc0_srsconf2 (target_ulong arg1)
   1090 {
   1091     env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
   1092 }
   1093 
   1094 void helper_mtc0_srsconf3 (target_ulong arg1)
   1095 {
   1096     env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
   1097 }
   1098 
   1099 void helper_mtc0_srsconf4 (target_ulong arg1)
   1100 {
   1101     env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
   1102 }
   1103 
   1104 void helper_mtc0_hwrena (target_ulong arg1)
   1105 {
   1106     env->CP0_HWREna = arg1 & 0x0000000F;
   1107 }
   1108 
   1109 void helper_mtc0_count (target_ulong arg1)
   1110 {
   1111     cpu_mips_store_count(env, arg1);
   1112 }
   1113 
   1114 void helper_mtc0_entryhi (target_ulong arg1)
   1115 {
   1116     target_ulong old, val;
   1117 
   1118     /* 1k pages not implemented */
   1119     val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
   1120 #if defined(TARGET_MIPS64)
   1121     val &= env->SEGMask;
   1122 #endif
   1123     old = env->CP0_EntryHi;
   1124     env->CP0_EntryHi = val;
   1125     if (env->CP0_Config3 & (1 << CP0C3_MT)) {
   1126         uint32_t tcst = env->active_tc.CP0_TCStatus & ~0xff;
   1127         env->active_tc.CP0_TCStatus = tcst | (val & 0xff);
   1128     }
   1129     /* If the ASID changes, flush qemu's TLB.  */
   1130     if ((old & 0xFF) != (val & 0xFF))
   1131         cpu_mips_tlb_flush(env, 1);
   1132 }
   1133 
   1134 void helper_mttc0_entryhi(target_ulong arg1)
   1135 {
   1136     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1137     int32_t tcstatus;
   1138 
   1139     env->CP0_EntryHi = (env->CP0_EntryHi & 0xff) | (arg1 & ~0xff);
   1140     if (other_tc == env->current_tc) {
   1141         tcstatus = (env->active_tc.CP0_TCStatus & ~0xff) | (arg1 & 0xff);
   1142         env->active_tc.CP0_TCStatus = tcstatus;
   1143     } else {
   1144         tcstatus = (env->tcs[other_tc].CP0_TCStatus & ~0xff) | (arg1 & 0xff);
   1145         env->tcs[other_tc].CP0_TCStatus = tcstatus;
   1146     }
   1147 }
   1148 
   1149 void helper_mtc0_compare (target_ulong arg1)
   1150 {
   1151     cpu_mips_store_compare(env, arg1);
   1152 }
   1153 
   1154 void helper_mtc0_status (target_ulong arg1)
   1155 {
   1156     uint32_t val, old;
   1157     uint32_t mask = env->CP0_Status_rw_bitmask;
   1158 
   1159     val = arg1 & mask;
   1160     old = env->CP0_Status;
   1161     env->CP0_Status = (env->CP0_Status & ~mask) | val;
   1162     compute_hflags(env);
   1163     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
   1164         qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
   1165                 old, old & env->CP0_Cause & CP0Ca_IP_mask,
   1166                 val, val & env->CP0_Cause & CP0Ca_IP_mask,
   1167                 env->CP0_Cause);
   1168         switch (env->hflags & MIPS_HFLAG_KSU) {
   1169         case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
   1170         case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
   1171         case MIPS_HFLAG_KM: qemu_log("\n"); break;
   1172         default: cpu_abort(env, "Invalid MMU mode!\n"); break;
   1173         }
   1174     }
   1175     cpu_mips_update_irq(env);
   1176 }
   1177 
   1178 void helper_mttc0_status(target_ulong arg1)
   1179 {
   1180     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1181     int32_t tcstatus = env->tcs[other_tc].CP0_TCStatus;
   1182 
   1183     env->CP0_Status = arg1 & ~0xf1000018;
   1184     tcstatus = (tcstatus & ~(0xf << CP0TCSt_TCU0)) | (arg1 & (0xf << CP0St_CU0));
   1185     tcstatus = (tcstatus & ~(1 << CP0TCSt_TMX)) | ((arg1 & (1 << CP0St_MX)) << (CP0TCSt_TMX - CP0St_MX));
   1186     tcstatus = (tcstatus & ~(0x3 << CP0TCSt_TKSU)) | ((arg1 & (0x3 << CP0St_KSU)) << (CP0TCSt_TKSU - CP0St_KSU));
   1187     if (other_tc == env->current_tc)
   1188         env->active_tc.CP0_TCStatus = tcstatus;
   1189     else
   1190         env->tcs[other_tc].CP0_TCStatus = tcstatus;
   1191 }
   1192 
   1193 void helper_mtc0_intctl (target_ulong arg1)
   1194 {
   1195     /* vectored interrupts not implemented, no performance counters. */
   1196     env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000002e0) | (arg1 & 0x000002e0);
   1197 }
   1198 
   1199 void helper_mtc0_srsctl (target_ulong arg1)
   1200 {
   1201     uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
   1202     env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
   1203 }
   1204 
   1205 void helper_mtc0_cause (target_ulong arg1)
   1206 {
   1207     uint32_t mask = 0x00C00300;
   1208     uint32_t old = env->CP0_Cause;
   1209 
   1210     if (env->insn_flags & ISA_MIPS32R2)
   1211         mask |= 1 << CP0Ca_DC;
   1212 
   1213     env->CP0_Cause = (env->CP0_Cause & ~mask) | (arg1 & mask);
   1214 
   1215     if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) {
   1216         if (env->CP0_Cause & (1 << CP0Ca_DC))
   1217             cpu_mips_stop_count(env);
   1218         else
   1219             cpu_mips_start_count(env);
   1220     }
   1221 
   1222     /* Handle the software interrupt as an hardware one, as they
   1223        are very similar */
   1224     if (arg1 & CP0Ca_IP_mask) {
   1225         cpu_mips_update_irq(env);
   1226     }
   1227 }
   1228 
   1229 void helper_mtc0_ebase (target_ulong arg1)
   1230 {
   1231     /* vectored interrupts not implemented */
   1232     /* Multi-CPU not implemented */
   1233     env->CP0_EBase = 0x80000000 | (arg1 & 0x3FFFF000);
   1234 }
   1235 
   1236 void helper_mtc0_config0 (target_ulong arg1)
   1237 {
   1238     env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
   1239 }
   1240 
   1241 void helper_mtc0_config2 (target_ulong arg1)
   1242 {
   1243     /* tertiary/secondary caches not implemented */
   1244     env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
   1245 }
   1246 
   1247 void helper_mtc0_lladdr (target_ulong arg1)
   1248 {
   1249     target_long mask = env->CP0_LLAddr_rw_bitmask;
   1250     arg1 = arg1 << env->CP0_LLAddr_shift;
   1251     env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
   1252 }
   1253 
   1254 void helper_mtc0_watchlo (target_ulong arg1, uint32_t sel)
   1255 {
   1256     /* Watch exceptions for instructions, data loads, data stores
   1257        not implemented. */
   1258     env->CP0_WatchLo[sel] = (arg1 & ~0x7);
   1259 }
   1260 
   1261 void helper_mtc0_watchhi (target_ulong arg1, uint32_t sel)
   1262 {
   1263     env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
   1264     env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
   1265 }
   1266 
   1267 void helper_mtc0_xcontext (target_ulong arg1)
   1268 {
   1269     target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
   1270     env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
   1271 }
   1272 
   1273 void helper_mtc0_framemask (target_ulong arg1)
   1274 {
   1275     env->CP0_Framemask = arg1; /* XXX */
   1276 }
   1277 
   1278 void helper_mtc0_debug (target_ulong arg1)
   1279 {
   1280     env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
   1281     if (arg1 & (1 << CP0DB_DM))
   1282         env->hflags |= MIPS_HFLAG_DM;
   1283     else
   1284         env->hflags &= ~MIPS_HFLAG_DM;
   1285 }
   1286 
   1287 void helper_mttc0_debug(target_ulong arg1)
   1288 {
   1289     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1290     uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
   1291 
   1292     /* XXX: Might be wrong, check with EJTAG spec. */
   1293     if (other_tc == env->current_tc)
   1294         env->active_tc.CP0_Debug_tcstatus = val;
   1295     else
   1296         env->tcs[other_tc].CP0_Debug_tcstatus = val;
   1297     env->CP0_Debug = (env->CP0_Debug & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
   1298                      (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
   1299 }
   1300 
   1301 void helper_mtc0_performance0 (target_ulong arg1)
   1302 {
   1303     env->CP0_Performance0 = arg1 & 0x000007ff;
   1304 }
   1305 
   1306 void helper_mtc0_taglo (target_ulong arg1)
   1307 {
   1308     env->CP0_TagLo = arg1 & 0xFFFFFCF6;
   1309 }
   1310 
   1311 void helper_mtc0_datalo (target_ulong arg1)
   1312 {
   1313     env->CP0_DataLo = arg1; /* XXX */
   1314 }
   1315 
   1316 void helper_mtc0_taghi (target_ulong arg1)
   1317 {
   1318     env->CP0_TagHi = arg1; /* XXX */
   1319 }
   1320 
   1321 void helper_mtc0_datahi (target_ulong arg1)
   1322 {
   1323     env->CP0_DataHi = arg1; /* XXX */
   1324 }
   1325 
   1326 /* MIPS MT functions */
   1327 target_ulong helper_mftgpr(uint32_t sel)
   1328 {
   1329     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1330 
   1331     if (other_tc == env->current_tc)
   1332         return env->active_tc.gpr[sel];
   1333     else
   1334         return env->tcs[other_tc].gpr[sel];
   1335 }
   1336 
   1337 target_ulong helper_mftlo(uint32_t sel)
   1338 {
   1339     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1340 
   1341     if (other_tc == env->current_tc)
   1342         return env->active_tc.LO[sel];
   1343     else
   1344         return env->tcs[other_tc].LO[sel];
   1345 }
   1346 
   1347 target_ulong helper_mfthi(uint32_t sel)
   1348 {
   1349     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1350 
   1351     if (other_tc == env->current_tc)
   1352         return env->active_tc.HI[sel];
   1353     else
   1354         return env->tcs[other_tc].HI[sel];
   1355 }
   1356 
   1357 target_ulong helper_mftacx(uint32_t sel)
   1358 {
   1359     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1360 
   1361     if (other_tc == env->current_tc)
   1362         return env->active_tc.ACX[sel];
   1363     else
   1364         return env->tcs[other_tc].ACX[sel];
   1365 }
   1366 
   1367 target_ulong helper_mftdsp(void)
   1368 {
   1369     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1370 
   1371     if (other_tc == env->current_tc)
   1372         return env->active_tc.DSPControl;
   1373     else
   1374         return env->tcs[other_tc].DSPControl;
   1375 }
   1376 
   1377 void helper_mttgpr(target_ulong arg1, uint32_t sel)
   1378 {
   1379     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1380 
   1381     if (other_tc == env->current_tc)
   1382         env->active_tc.gpr[sel] = arg1;
   1383     else
   1384         env->tcs[other_tc].gpr[sel] = arg1;
   1385 }
   1386 
   1387 void helper_mttlo(target_ulong arg1, uint32_t sel)
   1388 {
   1389     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1390 
   1391     if (other_tc == env->current_tc)
   1392         env->active_tc.LO[sel] = arg1;
   1393     else
   1394         env->tcs[other_tc].LO[sel] = arg1;
   1395 }
   1396 
   1397 void helper_mtthi(target_ulong arg1, uint32_t sel)
   1398 {
   1399     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1400 
   1401     if (other_tc == env->current_tc)
   1402         env->active_tc.HI[sel] = arg1;
   1403     else
   1404         env->tcs[other_tc].HI[sel] = arg1;
   1405 }
   1406 
   1407 void helper_mttacx(target_ulong arg1, uint32_t sel)
   1408 {
   1409     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1410 
   1411     if (other_tc == env->current_tc)
   1412         env->active_tc.ACX[sel] = arg1;
   1413     else
   1414         env->tcs[other_tc].ACX[sel] = arg1;
   1415 }
   1416 
   1417 void helper_mttdsp(target_ulong arg1)
   1418 {
   1419     int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
   1420 
   1421     if (other_tc == env->current_tc)
   1422         env->active_tc.DSPControl = arg1;
   1423     else
   1424         env->tcs[other_tc].DSPControl = arg1;
   1425 }
   1426 
   1427 /* MIPS MT functions */
   1428 target_ulong helper_dmt(target_ulong arg1)
   1429 {
   1430     // TODO
   1431     arg1 = 0;
   1432     // rt = arg1
   1433 
   1434     return arg1;
   1435 }
   1436 
   1437 target_ulong helper_emt(target_ulong arg1)
   1438 {
   1439     // TODO
   1440     arg1 = 0;
   1441     // rt = arg1
   1442 
   1443     return arg1;
   1444 }
   1445 
   1446 target_ulong helper_dvpe(target_ulong arg1)
   1447 {
   1448     // TODO
   1449     arg1 = 0;
   1450     // rt = arg1
   1451 
   1452     return arg1;
   1453 }
   1454 
   1455 target_ulong helper_evpe(target_ulong arg1)
   1456 {
   1457     // TODO
   1458     arg1 = 0;
   1459     // rt = arg1
   1460 
   1461     return arg1;
   1462 }
   1463 #endif /* !CONFIG_USER_ONLY */
   1464 
   1465 void helper_fork(target_ulong arg1, target_ulong arg2)
   1466 {
   1467     // arg1 = rt, arg2 = rs
   1468     arg1 = 0;
   1469     // TODO: store to TC register
   1470 }
   1471 
   1472 target_ulong helper_yield(target_ulong arg1)
   1473 {
   1474     if (arg1 < 0) {
   1475         /* No scheduling policy implemented. */
   1476         if (arg1 != -2) {
   1477             if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
   1478                 env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
   1479                 env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
   1480                 env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
   1481                 helper_raise_exception(EXCP_THREAD);
   1482             }
   1483         }
   1484     } else if (arg1 == 0) {
   1485         if (0 /* TODO: TC underflow */) {
   1486             env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
   1487             helper_raise_exception(EXCP_THREAD);
   1488         } else {
   1489             // TODO: Deallocate TC
   1490         }
   1491     } else if (arg1 > 0) {
   1492         /* Yield qualifier inputs not implemented. */
   1493         env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
   1494         env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
   1495         helper_raise_exception(EXCP_THREAD);
   1496     }
   1497     return env->CP0_YQMask;
   1498 }
   1499 
   1500 #ifndef CONFIG_USER_ONLY
   1501 /* TLB management */
   1502 void cpu_mips_tlb_flush (CPUState *env, int flush_global)
   1503 {
   1504     /* Flush qemu's TLB and discard all shadowed entries.  */
   1505     tlb_flush (env, flush_global);
   1506     env->tlb->tlb_in_use = env->tlb->nb_tlb;
   1507 }
   1508 
   1509 static void r4k_mips_tlb_flush_extra (CPUState *env, int first)
   1510 {
   1511     /* Discard entries from env->tlb[first] onwards.  */
   1512     while (env->tlb->tlb_in_use > first) {
   1513         r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
   1514     }
   1515 }
   1516 
   1517 static void r4k_fill_tlb (int idx)
   1518 {
   1519     r4k_tlb_t *tlb;
   1520 
   1521     /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
   1522     tlb = &env->tlb->mmu.r4k.tlb[idx];
   1523     tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
   1524 #if defined(TARGET_MIPS64)
   1525     tlb->VPN &= env->SEGMask;
   1526 #endif
   1527     tlb->ASID = env->CP0_EntryHi & 0xFF;
   1528     tlb->PageMask = env->CP0_PageMask;
   1529     tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
   1530     tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
   1531     tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
   1532     tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
   1533     tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
   1534     tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
   1535     tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
   1536     tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
   1537     tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
   1538 }
   1539 
   1540 void r4k_helper_tlbwi (void)
   1541 {
   1542     int idx;
   1543 
   1544     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
   1545 
   1546     /* Discard cached TLB entries.  We could avoid doing this if the
   1547        tlbwi is just upgrading access permissions on the current entry;
   1548        that might be a further win.  */
   1549     r4k_mips_tlb_flush_extra (env, env->tlb->nb_tlb);
   1550 
   1551     r4k_invalidate_tlb(env, idx, 0);
   1552     r4k_fill_tlb(idx);
   1553 }
   1554 
   1555 void r4k_helper_tlbwr (void)
   1556 {
   1557     int r = cpu_mips_get_random(env);
   1558 
   1559     r4k_invalidate_tlb(env, r, 1);
   1560     r4k_fill_tlb(r);
   1561 }
   1562 
   1563 void r4k_helper_tlbp (void)
   1564 {
   1565     r4k_tlb_t *tlb;
   1566     target_ulong mask;
   1567     target_ulong tag;
   1568     target_ulong VPN;
   1569     uint8_t ASID;
   1570     int i;
   1571 
   1572     ASID = env->CP0_EntryHi & 0xFF;
   1573     for (i = 0; i < env->tlb->nb_tlb; i++) {
   1574         tlb = &env->tlb->mmu.r4k.tlb[i];
   1575         /* 1k pages are not supported. */
   1576         mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
   1577         tag = env->CP0_EntryHi & ~mask;
   1578         VPN = tlb->VPN & ~mask;
   1579         /* Check ASID, virtual page number & size */
   1580         if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
   1581             /* TLB match */
   1582             env->CP0_Index = i;
   1583             break;
   1584         }
   1585     }
   1586     if (i == env->tlb->nb_tlb) {
   1587         /* No match.  Discard any shadow entries, if any of them match.  */
   1588         for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
   1589             tlb = &env->tlb->mmu.r4k.tlb[i];
   1590             /* 1k pages are not supported. */
   1591             mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
   1592             tag = env->CP0_EntryHi & ~mask;
   1593             VPN = tlb->VPN & ~mask;
   1594             /* Check ASID, virtual page number & size */
   1595             if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
   1596                 r4k_mips_tlb_flush_extra (env, i);
   1597                 break;
   1598             }
   1599         }
   1600 
   1601         env->CP0_Index |= 0x80000000;
   1602     }
   1603 }
   1604 
   1605 void r4k_helper_tlbr (void)
   1606 {
   1607     r4k_tlb_t *tlb;
   1608     uint8_t ASID;
   1609     int idx;
   1610 
   1611     ASID = env->CP0_EntryHi & 0xFF;
   1612     idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
   1613     tlb = &env->tlb->mmu.r4k.tlb[idx];
   1614 
   1615     /* If this will change the current ASID, flush qemu's TLB.  */
   1616     if (ASID != tlb->ASID)
   1617         cpu_mips_tlb_flush (env, 1);
   1618 
   1619     r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
   1620 
   1621     env->CP0_EntryHi = tlb->VPN | tlb->ASID;
   1622     env->CP0_PageMask = tlb->PageMask;
   1623     env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
   1624                         (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
   1625     env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
   1626                         (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
   1627 }
   1628 
   1629 void helper_tlbwi(void)
   1630 {
   1631     env->tlb->helper_tlbwi();
   1632 }
   1633 
   1634 void helper_tlbwr(void)
   1635 {
   1636     env->tlb->helper_tlbwr();
   1637 }
   1638 
   1639 void helper_tlbp(void)
   1640 {
   1641     env->tlb->helper_tlbp();
   1642 }
   1643 
   1644 void helper_tlbr(void)
   1645 {
   1646     env->tlb->helper_tlbr();
   1647 }
   1648 
   1649 /* Specials */
   1650 target_ulong helper_di (void)
   1651 {
   1652     target_ulong t0 = env->CP0_Status;
   1653 
   1654     env->CP0_Status = t0 & ~(1 << CP0St_IE);
   1655     cpu_mips_update_irq(env);
   1656 
   1657     return t0;
   1658 }
   1659 
   1660 target_ulong helper_ei (void)
   1661 {
   1662     target_ulong t0 = env->CP0_Status;
   1663 
   1664     env->CP0_Status = t0 | (1 << CP0St_IE);
   1665     cpu_mips_update_irq(env);
   1666 
   1667     return t0;
   1668 }
   1669 
   1670 static void debug_pre_eret (void)
   1671 {
   1672     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
   1673         qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
   1674                 env->active_tc.PC, env->CP0_EPC);
   1675         if (env->CP0_Status & (1 << CP0St_ERL))
   1676             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
   1677         if (env->hflags & MIPS_HFLAG_DM)
   1678             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
   1679         qemu_log("\n");
   1680     }
   1681 }
   1682 
   1683 static void debug_post_eret (void)
   1684 {
   1685     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
   1686         qemu_log("  =>  PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
   1687                 env->active_tc.PC, env->CP0_EPC);
   1688         if (env->CP0_Status & (1 << CP0St_ERL))
   1689             qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
   1690         if (env->hflags & MIPS_HFLAG_DM)
   1691             qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
   1692         switch (env->hflags & MIPS_HFLAG_KSU) {
   1693         case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
   1694         case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
   1695         case MIPS_HFLAG_KM: qemu_log("\n"); break;
   1696         default: cpu_abort(env, "Invalid MMU mode!\n"); break;
   1697         }
   1698     }
   1699 }
   1700 
   1701 void helper_eret (void)
   1702 {
   1703     debug_pre_eret();
   1704     if (env->CP0_Status & (1 << CP0St_ERL)) {
   1705         env->active_tc.PC = env->CP0_ErrorEPC;
   1706         env->CP0_Status &= ~(1 << CP0St_ERL);
   1707     } else {
   1708         env->active_tc.PC = env->CP0_EPC;
   1709         env->CP0_Status &= ~(1 << CP0St_EXL);
   1710     }
   1711     compute_hflags(env);
   1712     debug_post_eret();
   1713     env->lladdr = 1;
   1714 }
   1715 
   1716 void helper_deret (void)
   1717 {
   1718     debug_pre_eret();
   1719     env->active_tc.PC = env->CP0_DEPC;
   1720     env->hflags &= MIPS_HFLAG_DM;
   1721     compute_hflags(env);
   1722     debug_post_eret();
   1723     env->lladdr = 1;
   1724 }
   1725 #endif /* !CONFIG_USER_ONLY */
   1726 
   1727 target_ulong helper_rdhwr_cpunum(void)
   1728 {
   1729     if ((env->hflags & MIPS_HFLAG_CP0) ||
   1730         (env->CP0_HWREna & (1 << 0)))
   1731         return env->CP0_EBase & 0x3ff;
   1732     else
   1733         helper_raise_exception(EXCP_RI);
   1734 
   1735     return 0;
   1736 }
   1737 
   1738 target_ulong helper_rdhwr_synci_step(void)
   1739 {
   1740     if ((env->hflags & MIPS_HFLAG_CP0) ||
   1741         (env->CP0_HWREna & (1 << 1)))
   1742         return env->SYNCI_Step;
   1743     else
   1744         helper_raise_exception(EXCP_RI);
   1745 
   1746     return 0;
   1747 }
   1748 
   1749 target_ulong helper_rdhwr_cc(void)
   1750 {
   1751     if ((env->hflags & MIPS_HFLAG_CP0) ||
   1752         (env->CP0_HWREna & (1 << 2)))
   1753         return env->CP0_Count;
   1754     else
   1755         helper_raise_exception(EXCP_RI);
   1756 
   1757     return 0;
   1758 }
   1759 
   1760 target_ulong helper_rdhwr_ccres(void)
   1761 {
   1762     if ((env->hflags & MIPS_HFLAG_CP0) ||
   1763         (env->CP0_HWREna & (1 << 3)))
   1764         return env->CCRes;
   1765     else
   1766         helper_raise_exception(EXCP_RI);
   1767 
   1768     return 0;
   1769 }
   1770 
   1771 void helper_pmon (int function)
   1772 {
   1773     function /= 2;
   1774     switch (function) {
   1775     case 2: /* TODO: char inbyte(int waitflag); */
   1776         if (env->active_tc.gpr[4] == 0)
   1777             env->active_tc.gpr[2] = -1;
   1778         /* Fall through */
   1779     case 11: /* TODO: char inbyte (void); */
   1780         env->active_tc.gpr[2] = -1;
   1781         break;
   1782     case 3:
   1783     case 12:
   1784         printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
   1785         break;
   1786     case 17:
   1787         break;
   1788     case 158:
   1789         {
   1790             unsigned char *fmt = (void *)(unsigned long)env->active_tc.gpr[4];
   1791             printf("%s", fmt);
   1792         }
   1793         break;
   1794     }
   1795 }
   1796 
   1797 void helper_wait (void)
   1798 {
   1799     env->halted = 1;
   1800     helper_raise_exception(EXCP_HLT);
   1801 }
   1802 
   1803 #if !defined(CONFIG_USER_ONLY)
   1804 
   1805 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr);
   1806 
   1807 #define MMUSUFFIX _mmu
   1808 #define ALIGNED_ONLY
   1809 
   1810 #define SHIFT 0
   1811 #include "softmmu_template.h"
   1812 
   1813 #define SHIFT 1
   1814 #include "softmmu_template.h"
   1815 
   1816 #define SHIFT 2
   1817 #include "softmmu_template.h"
   1818 
   1819 #define SHIFT 3
   1820 #include "softmmu_template.h"
   1821 
   1822 static void do_unaligned_access (target_ulong addr, int is_write, int is_user, void *retaddr)
   1823 {
   1824     env->CP0_BadVAddr = addr;
   1825     do_restore_state (retaddr);
   1826     helper_raise_exception ((is_write == 1) ? EXCP_AdES : EXCP_AdEL);
   1827 }
   1828 
   1829 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
   1830 {
   1831     TranslationBlock *tb;
   1832     CPUState *saved_env;
   1833     unsigned long pc;
   1834     int ret;
   1835 
   1836     /* XXX: hack to restore env in all cases, even if not called from
   1837        generated code */
   1838     saved_env = env;
   1839     env = cpu_single_env;
   1840     ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
   1841     if (ret) {
   1842         if (retaddr) {
   1843             /* now we have a real cpu fault */
   1844             pc = (unsigned long)retaddr;
   1845             tb = tb_find_pc(pc);
   1846             if (tb) {
   1847                 /* the PC is inside the translated code. It means that we have
   1848                    a virtual CPU fault */
   1849                 cpu_restore_state(tb, env, pc);
   1850             }
   1851         }
   1852         helper_raise_exception_err(env->exception_index, env->error_code);
   1853     }
   1854     env = saved_env;
   1855 }
   1856 
   1857 void do_unassigned_access(target_phys_addr_t addr, int is_write, int is_exec,
   1858                           int unused, int size)
   1859 {
   1860     if (is_exec)
   1861         helper_raise_exception(EXCP_IBE);
   1862     else
   1863         helper_raise_exception(EXCP_DBE);
   1864 }
   1865 /*
   1866  * The following functions are address translation helper functions
   1867  * for fast memory access in QEMU.
   1868  */
   1869 static unsigned long v2p_mmu(target_ulong addr, int is_user)
   1870 {
   1871     int index;
   1872     target_ulong tlb_addr;
   1873     target_phys_addr_t physaddr;
   1874     void *retaddr;
   1875 
   1876     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
   1877 redo:
   1878     tlb_addr = env->tlb_table[is_user][index].addr_read;
   1879     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
   1880         physaddr = addr + env->tlb_table[is_user][index].addend;
   1881     } else {
   1882         /* the page is not in the TLB : fill it */
   1883         retaddr = GETPC();
   1884         tlb_fill(addr, 0, is_user, retaddr);
   1885         goto redo;
   1886     }
   1887     return physaddr;
   1888 }
   1889 
   1890 /*
   1891  * translation from virtual address of simulated OS
   1892  * to the address of simulation host (not the physical
   1893  * address of simulated OS.
   1894  */
   1895 target_phys_addr_t v2p(target_ulong ptr, int is_user)
   1896 {
   1897     CPUState *saved_env;
   1898     int index;
   1899     target_ulong addr;
   1900     target_phys_addr_t physaddr;
   1901 
   1902     saved_env = env;
   1903     env = cpu_single_env;
   1904     addr = ptr;
   1905     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
   1906     if (__builtin_expect(env->tlb_table[is_user][index].addr_read !=
   1907                 (addr & TARGET_PAGE_MASK), 0)) {
   1908         physaddr = v2p_mmu(addr, is_user);
   1909     } else {
   1910 	    physaddr = (target_phys_addr_t)addr + env->tlb_table[is_user][index].addend;
   1911     }
   1912     env = saved_env;
   1913     return physaddr;
   1914 }
   1915 
   1916 /* copy a string from the simulated virtual space to a buffer in QEMU */
   1917 void vstrcpy(target_ulong ptr, char *buf, int max)
   1918 {
   1919     char *phys = 0;
   1920     unsigned long page = 0;
   1921 
   1922     if (buf == NULL) return;
   1923 
   1924     while (max) {
   1925         if ((ptr & TARGET_PAGE_MASK) != page) {
   1926             phys = (char *)v2p(ptr, 0);
   1927             page = ptr & TARGET_PAGE_MASK;
   1928         }
   1929         *buf = *phys;
   1930         if (*phys == '\0')
   1931             return;
   1932         ptr ++;
   1933         buf ++;
   1934         phys ++;
   1935         max --;
   1936     }
   1937 }
   1938 
   1939 #endif /* !CONFIG_USER_ONLY */
   1940 
   1941 /* Complex FPU operations which may need stack space. */
   1942 
   1943 #define FLOAT_ONE32 make_float32(0x3f8 << 20)
   1944 #define FLOAT_ONE64 make_float64(0x3ffULL << 52)
   1945 #define FLOAT_TWO32 make_float32(1 << 30)
   1946 #define FLOAT_TWO64 make_float64(1ULL << 62)
   1947 #define FLOAT_QNAN32 0x7fbfffff
   1948 #define FLOAT_QNAN64 0x7ff7ffffffffffffULL
   1949 #define FLOAT_SNAN32 0x7fffffff
   1950 #define FLOAT_SNAN64 0x7fffffffffffffffULL
   1951 
   1952 /* convert MIPS rounding mode in FCR31 to IEEE library */
   1953 static unsigned int ieee_rm[] = {
   1954     float_round_nearest_even,
   1955     float_round_to_zero,
   1956     float_round_up,
   1957     float_round_down
   1958 };
   1959 
   1960 #define RESTORE_ROUNDING_MODE \
   1961     set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3], &env->active_fpu.fp_status)
   1962 
   1963 #define RESTORE_FLUSH_MODE \
   1964     set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0, &env->active_fpu.fp_status);
   1965 
   1966 target_ulong helper_cfc1 (uint32_t reg)
   1967 {
   1968     target_ulong arg1;
   1969 
   1970     switch (reg) {
   1971     case 0:
   1972         arg1 = (int32_t)env->active_fpu.fcr0;
   1973         break;
   1974     case 25:
   1975         arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
   1976         break;
   1977     case 26:
   1978         arg1 = env->active_fpu.fcr31 & 0x0003f07c;
   1979         break;
   1980     case 28:
   1981         arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
   1982         break;
   1983     default:
   1984         arg1 = (int32_t)env->active_fpu.fcr31;
   1985         break;
   1986     }
   1987 
   1988     return arg1;
   1989 }
   1990 
   1991 void helper_ctc1 (target_ulong arg1, uint32_t reg)
   1992 {
   1993     switch(reg) {
   1994     case 25:
   1995         if (arg1 & 0xffffff00)
   1996             return;
   1997         env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
   1998                      ((arg1 & 0x1) << 23);
   1999         break;
   2000     case 26:
   2001         if (arg1 & 0x007c0000)
   2002             return;
   2003         env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
   2004         break;
   2005     case 28:
   2006         if (arg1 & 0x007c0000)
   2007             return;
   2008         env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
   2009                      ((arg1 & 0x4) << 22);
   2010         break;
   2011     case 31:
   2012         if (arg1 & 0x007c0000)
   2013             return;
   2014         env->active_fpu.fcr31 = arg1;
   2015         break;
   2016     default:
   2017         return;
   2018     }
   2019     /* set rounding mode */
   2020     RESTORE_ROUNDING_MODE;
   2021     /* set flush-to-zero mode */
   2022     RESTORE_FLUSH_MODE;
   2023     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2024     if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
   2025         helper_raise_exception(EXCP_FPE);
   2026 }
   2027 
   2028 static inline char ieee_ex_to_mips(char xcpt)
   2029 {
   2030     return (xcpt & float_flag_inexact) >> 5 |
   2031            (xcpt & float_flag_underflow) >> 3 |
   2032            (xcpt & float_flag_overflow) >> 1 |
   2033            (xcpt & float_flag_divbyzero) << 1 |
   2034            (xcpt & float_flag_invalid) << 4;
   2035 }
   2036 
   2037 static inline char mips_ex_to_ieee(char xcpt)
   2038 {
   2039     return (xcpt & FP_INEXACT) << 5 |
   2040            (xcpt & FP_UNDERFLOW) << 3 |
   2041            (xcpt & FP_OVERFLOW) << 1 |
   2042            (xcpt & FP_DIV0) >> 1 |
   2043            (xcpt & FP_INVALID) >> 4;
   2044 }
   2045 
   2046 static inline void update_fcr31(void)
   2047 {
   2048     int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
   2049 
   2050     SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
   2051     if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp)
   2052         helper_raise_exception(EXCP_FPE);
   2053     else
   2054         UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
   2055 }
   2056 
   2057 /* Float support.
   2058    Single precition routines have a "s" suffix, double precision a
   2059    "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
   2060    paired single lower "pl", paired single upper "pu".  */
   2061 
   2062 /* unary operations, modifying fp status  */
   2063 uint64_t helper_float_sqrt_d(uint64_t fdt0)
   2064 {
   2065     return float64_sqrt(fdt0, &env->active_fpu.fp_status);
   2066 }
   2067 
   2068 uint32_t helper_float_sqrt_s(uint32_t fst0)
   2069 {
   2070     return float32_sqrt(fst0, &env->active_fpu.fp_status);
   2071 }
   2072 
   2073 uint64_t helper_float_cvtd_s(uint32_t fst0)
   2074 {
   2075     uint64_t fdt2;
   2076 
   2077     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2078     fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
   2079     update_fcr31();
   2080     return fdt2;
   2081 }
   2082 
   2083 uint64_t helper_float_cvtd_w(uint32_t wt0)
   2084 {
   2085     uint64_t fdt2;
   2086 
   2087     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2088     fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
   2089     update_fcr31();
   2090     return fdt2;
   2091 }
   2092 
   2093 uint64_t helper_float_cvtd_l(uint64_t dt0)
   2094 {
   2095     uint64_t fdt2;
   2096 
   2097     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2098     fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
   2099     update_fcr31();
   2100     return fdt2;
   2101 }
   2102 
   2103 uint64_t helper_float_cvtl_d(uint64_t fdt0)
   2104 {
   2105     uint64_t dt2;
   2106 
   2107     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2108     dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
   2109     update_fcr31();
   2110     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2111         dt2 = FLOAT_SNAN64;
   2112     return dt2;
   2113 }
   2114 
   2115 uint64_t helper_float_cvtl_s(uint32_t fst0)
   2116 {
   2117     uint64_t dt2;
   2118 
   2119     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2120     dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
   2121     update_fcr31();
   2122     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2123         dt2 = FLOAT_SNAN64;
   2124     return dt2;
   2125 }
   2126 
   2127 uint64_t helper_float_cvtps_pw(uint64_t dt0)
   2128 {
   2129     uint32_t fst2;
   2130     uint32_t fsth2;
   2131 
   2132     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2133     fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
   2134     fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
   2135     update_fcr31();
   2136     return ((uint64_t)fsth2 << 32) | fst2;
   2137 }
   2138 
   2139 uint64_t helper_float_cvtpw_ps(uint64_t fdt0)
   2140 {
   2141     uint32_t wt2;
   2142     uint32_t wth2;
   2143 
   2144     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2145     wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
   2146     wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
   2147     update_fcr31();
   2148     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID)) {
   2149         wt2 = FLOAT_SNAN32;
   2150         wth2 = FLOAT_SNAN32;
   2151     }
   2152     return ((uint64_t)wth2 << 32) | wt2;
   2153 }
   2154 
   2155 uint32_t helper_float_cvts_d(uint64_t fdt0)
   2156 {
   2157     uint32_t fst2;
   2158 
   2159     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2160     fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
   2161     update_fcr31();
   2162     return fst2;
   2163 }
   2164 
   2165 uint32_t helper_float_cvts_w(uint32_t wt0)
   2166 {
   2167     uint32_t fst2;
   2168 
   2169     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2170     fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
   2171     update_fcr31();
   2172     return fst2;
   2173 }
   2174 
   2175 uint32_t helper_float_cvts_l(uint64_t dt0)
   2176 {
   2177     uint32_t fst2;
   2178 
   2179     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2180     fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
   2181     update_fcr31();
   2182     return fst2;
   2183 }
   2184 
   2185 uint32_t helper_float_cvts_pl(uint32_t wt0)
   2186 {
   2187     uint32_t wt2;
   2188 
   2189     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2190     wt2 = wt0;
   2191     update_fcr31();
   2192     return wt2;
   2193 }
   2194 
   2195 uint32_t helper_float_cvts_pu(uint32_t wth0)
   2196 {
   2197     uint32_t wt2;
   2198 
   2199     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2200     wt2 = wth0;
   2201     update_fcr31();
   2202     return wt2;
   2203 }
   2204 
   2205 uint32_t helper_float_cvtw_s(uint32_t fst0)
   2206 {
   2207     uint32_t wt2;
   2208 
   2209     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2210     wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
   2211     update_fcr31();
   2212     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2213         wt2 = FLOAT_SNAN32;
   2214     return wt2;
   2215 }
   2216 
   2217 uint32_t helper_float_cvtw_d(uint64_t fdt0)
   2218 {
   2219     uint32_t wt2;
   2220 
   2221     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2222     wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
   2223     update_fcr31();
   2224     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2225         wt2 = FLOAT_SNAN32;
   2226     return wt2;
   2227 }
   2228 
   2229 uint64_t helper_float_roundl_d(uint64_t fdt0)
   2230 {
   2231     uint64_t dt2;
   2232 
   2233     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
   2234     dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
   2235     RESTORE_ROUNDING_MODE;
   2236     update_fcr31();
   2237     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2238         dt2 = FLOAT_SNAN64;
   2239     return dt2;
   2240 }
   2241 
   2242 uint64_t helper_float_roundl_s(uint32_t fst0)
   2243 {
   2244     uint64_t dt2;
   2245 
   2246     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
   2247     dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
   2248     RESTORE_ROUNDING_MODE;
   2249     update_fcr31();
   2250     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2251         dt2 = FLOAT_SNAN64;
   2252     return dt2;
   2253 }
   2254 
   2255 uint32_t helper_float_roundw_d(uint64_t fdt0)
   2256 {
   2257     uint32_t wt2;
   2258 
   2259     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
   2260     wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
   2261     RESTORE_ROUNDING_MODE;
   2262     update_fcr31();
   2263     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2264         wt2 = FLOAT_SNAN32;
   2265     return wt2;
   2266 }
   2267 
   2268 uint32_t helper_float_roundw_s(uint32_t fst0)
   2269 {
   2270     uint32_t wt2;
   2271 
   2272     set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
   2273     wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
   2274     RESTORE_ROUNDING_MODE;
   2275     update_fcr31();
   2276     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2277         wt2 = FLOAT_SNAN32;
   2278     return wt2;
   2279 }
   2280 
   2281 uint64_t helper_float_truncl_d(uint64_t fdt0)
   2282 {
   2283     uint64_t dt2;
   2284 
   2285     dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
   2286     update_fcr31();
   2287     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2288         dt2 = FLOAT_SNAN64;
   2289     return dt2;
   2290 }
   2291 
   2292 uint64_t helper_float_truncl_s(uint32_t fst0)
   2293 {
   2294     uint64_t dt2;
   2295 
   2296     dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
   2297     update_fcr31();
   2298     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2299         dt2 = FLOAT_SNAN64;
   2300     return dt2;
   2301 }
   2302 
   2303 uint32_t helper_float_truncw_d(uint64_t fdt0)
   2304 {
   2305     uint32_t wt2;
   2306 
   2307     wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
   2308     update_fcr31();
   2309     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2310         wt2 = FLOAT_SNAN32;
   2311     return wt2;
   2312 }
   2313 
   2314 uint32_t helper_float_truncw_s(uint32_t fst0)
   2315 {
   2316     uint32_t wt2;
   2317 
   2318     wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
   2319     update_fcr31();
   2320     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2321         wt2 = FLOAT_SNAN32;
   2322     return wt2;
   2323 }
   2324 
   2325 uint64_t helper_float_ceill_d(uint64_t fdt0)
   2326 {
   2327     uint64_t dt2;
   2328 
   2329     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
   2330     dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
   2331     RESTORE_ROUNDING_MODE;
   2332     update_fcr31();
   2333     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2334         dt2 = FLOAT_SNAN64;
   2335     return dt2;
   2336 }
   2337 
   2338 uint64_t helper_float_ceill_s(uint32_t fst0)
   2339 {
   2340     uint64_t dt2;
   2341 
   2342     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
   2343     dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
   2344     RESTORE_ROUNDING_MODE;
   2345     update_fcr31();
   2346     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2347         dt2 = FLOAT_SNAN64;
   2348     return dt2;
   2349 }
   2350 
   2351 uint32_t helper_float_ceilw_d(uint64_t fdt0)
   2352 {
   2353     uint32_t wt2;
   2354 
   2355     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
   2356     wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
   2357     RESTORE_ROUNDING_MODE;
   2358     update_fcr31();
   2359     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2360         wt2 = FLOAT_SNAN32;
   2361     return wt2;
   2362 }
   2363 
   2364 uint32_t helper_float_ceilw_s(uint32_t fst0)
   2365 {
   2366     uint32_t wt2;
   2367 
   2368     set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
   2369     wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
   2370     RESTORE_ROUNDING_MODE;
   2371     update_fcr31();
   2372     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2373         wt2 = FLOAT_SNAN32;
   2374     return wt2;
   2375 }
   2376 
   2377 uint64_t helper_float_floorl_d(uint64_t fdt0)
   2378 {
   2379     uint64_t dt2;
   2380 
   2381     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
   2382     dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
   2383     RESTORE_ROUNDING_MODE;
   2384     update_fcr31();
   2385     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2386         dt2 = FLOAT_SNAN64;
   2387     return dt2;
   2388 }
   2389 
   2390 uint64_t helper_float_floorl_s(uint32_t fst0)
   2391 {
   2392     uint64_t dt2;
   2393 
   2394     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
   2395     dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
   2396     RESTORE_ROUNDING_MODE;
   2397     update_fcr31();
   2398     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2399         dt2 = FLOAT_SNAN64;
   2400     return dt2;
   2401 }
   2402 
   2403 uint32_t helper_float_floorw_d(uint64_t fdt0)
   2404 {
   2405     uint32_t wt2;
   2406 
   2407     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
   2408     wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
   2409     RESTORE_ROUNDING_MODE;
   2410     update_fcr31();
   2411     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2412         wt2 = FLOAT_SNAN32;
   2413     return wt2;
   2414 }
   2415 
   2416 uint32_t helper_float_floorw_s(uint32_t fst0)
   2417 {
   2418     uint32_t wt2;
   2419 
   2420     set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
   2421     wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
   2422     RESTORE_ROUNDING_MODE;
   2423     update_fcr31();
   2424     if (GET_FP_CAUSE(env->active_fpu.fcr31) & (FP_OVERFLOW | FP_INVALID))
   2425         wt2 = FLOAT_SNAN32;
   2426     return wt2;
   2427 }
   2428 
   2429 /* unary operations, not modifying fp status  */
   2430 #define FLOAT_UNOP(name)                                       \
   2431 uint64_t helper_float_ ## name ## _d(uint64_t fdt0)                \
   2432 {                                                              \
   2433     return float64_ ## name(fdt0);                             \
   2434 }                                                              \
   2435 uint32_t helper_float_ ## name ## _s(uint32_t fst0)                \
   2436 {                                                              \
   2437     return float32_ ## name(fst0);                             \
   2438 }                                                              \
   2439 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0)               \
   2440 {                                                              \
   2441     uint32_t wt0;                                              \
   2442     uint32_t wth0;                                             \
   2443                                                                \
   2444     wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF);                 \
   2445     wth0 = float32_ ## name(fdt0 >> 32);                       \
   2446     return ((uint64_t)wth0 << 32) | wt0;                       \
   2447 }
   2448 FLOAT_UNOP(abs)
   2449 FLOAT_UNOP(chs)
   2450 #undef FLOAT_UNOP
   2451 
   2452 /* MIPS specific unary operations */
   2453 uint64_t helper_float_recip_d(uint64_t fdt0)
   2454 {
   2455     uint64_t fdt2;
   2456 
   2457     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2458     fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
   2459     update_fcr31();
   2460     return fdt2;
   2461 }
   2462 
   2463 uint32_t helper_float_recip_s(uint32_t fst0)
   2464 {
   2465     uint32_t fst2;
   2466 
   2467     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2468     fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
   2469     update_fcr31();
   2470     return fst2;
   2471 }
   2472 
   2473 uint64_t helper_float_rsqrt_d(uint64_t fdt0)
   2474 {
   2475     uint64_t fdt2;
   2476 
   2477     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2478     fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
   2479     fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
   2480     update_fcr31();
   2481     return fdt2;
   2482 }
   2483 
   2484 uint32_t helper_float_rsqrt_s(uint32_t fst0)
   2485 {
   2486     uint32_t fst2;
   2487 
   2488     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2489     fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
   2490     fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
   2491     update_fcr31();
   2492     return fst2;
   2493 }
   2494 
   2495 uint64_t helper_float_recip1_d(uint64_t fdt0)
   2496 {
   2497     uint64_t fdt2;
   2498 
   2499     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2500     fdt2 = float64_div(FLOAT_ONE64, fdt0, &env->active_fpu.fp_status);
   2501     update_fcr31();
   2502     return fdt2;
   2503 }
   2504 
   2505 uint32_t helper_float_recip1_s(uint32_t fst0)
   2506 {
   2507     uint32_t fst2;
   2508 
   2509     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2510     fst2 = float32_div(FLOAT_ONE32, fst0, &env->active_fpu.fp_status);
   2511     update_fcr31();
   2512     return fst2;
   2513 }
   2514 
   2515 uint64_t helper_float_recip1_ps(uint64_t fdt0)
   2516 {
   2517     uint32_t fst2;
   2518     uint32_t fsth2;
   2519 
   2520     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2521     fst2 = float32_div(FLOAT_ONE32, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
   2522     fsth2 = float32_div(FLOAT_ONE32, fdt0 >> 32, &env->active_fpu.fp_status);
   2523     update_fcr31();
   2524     return ((uint64_t)fsth2 << 32) | fst2;
   2525 }
   2526 
   2527 uint64_t helper_float_rsqrt1_d(uint64_t fdt0)
   2528 {
   2529     uint64_t fdt2;
   2530 
   2531     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2532     fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
   2533     fdt2 = float64_div(FLOAT_ONE64, fdt2, &env->active_fpu.fp_status);
   2534     update_fcr31();
   2535     return fdt2;
   2536 }
   2537 
   2538 uint32_t helper_float_rsqrt1_s(uint32_t fst0)
   2539 {
   2540     uint32_t fst2;
   2541 
   2542     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2543     fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
   2544     fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
   2545     update_fcr31();
   2546     return fst2;
   2547 }
   2548 
   2549 uint64_t helper_float_rsqrt1_ps(uint64_t fdt0)
   2550 {
   2551     uint32_t fst2;
   2552     uint32_t fsth2;
   2553 
   2554     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2555     fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
   2556     fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
   2557     fst2 = float32_div(FLOAT_ONE32, fst2, &env->active_fpu.fp_status);
   2558     fsth2 = float32_div(FLOAT_ONE32, fsth2, &env->active_fpu.fp_status);
   2559     update_fcr31();
   2560     return ((uint64_t)fsth2 << 32) | fst2;
   2561 }
   2562 
   2563 #define FLOAT_OP(name, p) void helper_float_##name##_##p(void)
   2564 
   2565 /* binary operations */
   2566 #define FLOAT_BINOP(name)                                          \
   2567 uint64_t helper_float_ ## name ## _d(uint64_t fdt0, uint64_t fdt1)     \
   2568 {                                                                  \
   2569     uint64_t dt2;                                                  \
   2570                                                                    \
   2571     set_float_exception_flags(0, &env->active_fpu.fp_status);            \
   2572     dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status);     \
   2573     update_fcr31();                                                \
   2574     if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
   2575         dt2 = FLOAT_QNAN64;                                        \
   2576     return dt2;                                                    \
   2577 }                                                                  \
   2578                                                                    \
   2579 uint32_t helper_float_ ## name ## _s(uint32_t fst0, uint32_t fst1)     \
   2580 {                                                                  \
   2581     uint32_t wt2;                                                  \
   2582                                                                    \
   2583     set_float_exception_flags(0, &env->active_fpu.fp_status);            \
   2584     wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
   2585     update_fcr31();                                                \
   2586     if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID)                \
   2587         wt2 = FLOAT_QNAN32;                                        \
   2588     return wt2;                                                    \
   2589 }                                                                  \
   2590                                                                    \
   2591 uint64_t helper_float_ ## name ## _ps(uint64_t fdt0, uint64_t fdt1)    \
   2592 {                                                                  \
   2593     uint32_t fst0 = fdt0 & 0XFFFFFFFF;                             \
   2594     uint32_t fsth0 = fdt0 >> 32;                                   \
   2595     uint32_t fst1 = fdt1 & 0XFFFFFFFF;                             \
   2596     uint32_t fsth1 = fdt1 >> 32;                                   \
   2597     uint32_t wt2;                                                  \
   2598     uint32_t wth2;                                                 \
   2599                                                                    \
   2600     set_float_exception_flags(0, &env->active_fpu.fp_status);            \
   2601     wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status);     \
   2602     wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status);  \
   2603     update_fcr31();                                                \
   2604     if (GET_FP_CAUSE(env->active_fpu.fcr31) & FP_INVALID) {              \
   2605         wt2 = FLOAT_QNAN32;                                        \
   2606         wth2 = FLOAT_QNAN32;                                       \
   2607     }                                                              \
   2608     return ((uint64_t)wth2 << 32) | wt2;                           \
   2609 }
   2610 
   2611 FLOAT_BINOP(add)
   2612 FLOAT_BINOP(sub)
   2613 FLOAT_BINOP(mul)
   2614 FLOAT_BINOP(div)
   2615 #undef FLOAT_BINOP
   2616 
   2617 /* ternary operations */
   2618 #define FLOAT_TERNOP(name1, name2)                                        \
   2619 uint64_t helper_float_ ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1,  \
   2620                                            uint64_t fdt2)                 \
   2621 {                                                                         \
   2622     fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
   2623     return float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
   2624 }                                                                         \
   2625                                                                           \
   2626 uint32_t helper_float_ ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1,  \
   2627                                            uint32_t fst2)                 \
   2628 {                                                                         \
   2629     fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
   2630     return float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
   2631 }                                                                         \
   2632                                                                           \
   2633 uint64_t helper_float_ ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1, \
   2634                                             uint64_t fdt2)                \
   2635 {                                                                         \
   2636     uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
   2637     uint32_t fsth0 = fdt0 >> 32;                                          \
   2638     uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
   2639     uint32_t fsth1 = fdt1 >> 32;                                          \
   2640     uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
   2641     uint32_t fsth2 = fdt2 >> 32;                                          \
   2642                                                                           \
   2643     fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
   2644     fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
   2645     fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
   2646     fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
   2647     return ((uint64_t)fsth2 << 32) | fst2;                                \
   2648 }
   2649 
   2650 FLOAT_TERNOP(mul, add)
   2651 FLOAT_TERNOP(mul, sub)
   2652 #undef FLOAT_TERNOP
   2653 
   2654 /* negated ternary operations */
   2655 #define FLOAT_NTERNOP(name1, name2)                                       \
   2656 uint64_t helper_float_n ## name1 ## name2 ## _d(uint64_t fdt0, uint64_t fdt1, \
   2657                                            uint64_t fdt2)                 \
   2658 {                                                                         \
   2659     fdt0 = float64_ ## name1 (fdt0, fdt1, &env->active_fpu.fp_status);          \
   2660     fdt2 = float64_ ## name2 (fdt0, fdt2, &env->active_fpu.fp_status);          \
   2661     return float64_chs(fdt2);                                             \
   2662 }                                                                         \
   2663                                                                           \
   2664 uint32_t helper_float_n ## name1 ## name2 ## _s(uint32_t fst0, uint32_t fst1, \
   2665                                            uint32_t fst2)                 \
   2666 {                                                                         \
   2667     fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
   2668     fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
   2669     return float32_chs(fst2);                                             \
   2670 }                                                                         \
   2671                                                                           \
   2672 uint64_t helper_float_n ## name1 ## name2 ## _ps(uint64_t fdt0, uint64_t fdt1,\
   2673                                            uint64_t fdt2)                 \
   2674 {                                                                         \
   2675     uint32_t fst0 = fdt0 & 0XFFFFFFFF;                                    \
   2676     uint32_t fsth0 = fdt0 >> 32;                                          \
   2677     uint32_t fst1 = fdt1 & 0XFFFFFFFF;                                    \
   2678     uint32_t fsth1 = fdt1 >> 32;                                          \
   2679     uint32_t fst2 = fdt2 & 0XFFFFFFFF;                                    \
   2680     uint32_t fsth2 = fdt2 >> 32;                                          \
   2681                                                                           \
   2682     fst0 = float32_ ## name1 (fst0, fst1, &env->active_fpu.fp_status);          \
   2683     fsth0 = float32_ ## name1 (fsth0, fsth1, &env->active_fpu.fp_status);       \
   2684     fst2 = float32_ ## name2 (fst0, fst2, &env->active_fpu.fp_status);          \
   2685     fsth2 = float32_ ## name2 (fsth0, fsth2, &env->active_fpu.fp_status);       \
   2686     fst2 = float32_chs(fst2);                                             \
   2687     fsth2 = float32_chs(fsth2);                                           \
   2688     return ((uint64_t)fsth2 << 32) | fst2;                                \
   2689 }
   2690 
   2691 FLOAT_NTERNOP(mul, add)
   2692 FLOAT_NTERNOP(mul, sub)
   2693 #undef FLOAT_NTERNOP
   2694 
   2695 /* MIPS specific binary operations */
   2696 uint64_t helper_float_recip2_d(uint64_t fdt0, uint64_t fdt2)
   2697 {
   2698     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2699     fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
   2700     fdt2 = float64_chs(float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status));
   2701     update_fcr31();
   2702     return fdt2;
   2703 }
   2704 
   2705 uint32_t helper_float_recip2_s(uint32_t fst0, uint32_t fst2)
   2706 {
   2707     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2708     fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
   2709     fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
   2710     update_fcr31();
   2711     return fst2;
   2712 }
   2713 
   2714 uint64_t helper_float_recip2_ps(uint64_t fdt0, uint64_t fdt2)
   2715 {
   2716     uint32_t fst0 = fdt0 & 0XFFFFFFFF;
   2717     uint32_t fsth0 = fdt0 >> 32;
   2718     uint32_t fst2 = fdt2 & 0XFFFFFFFF;
   2719     uint32_t fsth2 = fdt2 >> 32;
   2720 
   2721     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2722     fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
   2723     fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
   2724     fst2 = float32_chs(float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status));
   2725     fsth2 = float32_chs(float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status));
   2726     update_fcr31();
   2727     return ((uint64_t)fsth2 << 32) | fst2;
   2728 }
   2729 
   2730 uint64_t helper_float_rsqrt2_d(uint64_t fdt0, uint64_t fdt2)
   2731 {
   2732     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2733     fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
   2734     fdt2 = float64_sub(fdt2, FLOAT_ONE64, &env->active_fpu.fp_status);
   2735     fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
   2736     update_fcr31();
   2737     return fdt2;
   2738 }
   2739 
   2740 uint32_t helper_float_rsqrt2_s(uint32_t fst0, uint32_t fst2)
   2741 {
   2742     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2743     fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
   2744     fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
   2745     fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
   2746     update_fcr31();
   2747     return fst2;
   2748 }
   2749 
   2750 uint64_t helper_float_rsqrt2_ps(uint64_t fdt0, uint64_t fdt2)
   2751 {
   2752     uint32_t fst0 = fdt0 & 0XFFFFFFFF;
   2753     uint32_t fsth0 = fdt0 >> 32;
   2754     uint32_t fst2 = fdt2 & 0XFFFFFFFF;
   2755     uint32_t fsth2 = fdt2 >> 32;
   2756 
   2757     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2758     fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
   2759     fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
   2760     fst2 = float32_sub(fst2, FLOAT_ONE32, &env->active_fpu.fp_status);
   2761     fsth2 = float32_sub(fsth2, FLOAT_ONE32, &env->active_fpu.fp_status);
   2762     fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
   2763     fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
   2764     update_fcr31();
   2765     return ((uint64_t)fsth2 << 32) | fst2;
   2766 }
   2767 
   2768 uint64_t helper_float_addr_ps(uint64_t fdt0, uint64_t fdt1)
   2769 {
   2770     uint32_t fst0 = fdt0 & 0XFFFFFFFF;
   2771     uint32_t fsth0 = fdt0 >> 32;
   2772     uint32_t fst1 = fdt1 & 0XFFFFFFFF;
   2773     uint32_t fsth1 = fdt1 >> 32;
   2774     uint32_t fst2;
   2775     uint32_t fsth2;
   2776 
   2777     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2778     fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
   2779     fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
   2780     update_fcr31();
   2781     return ((uint64_t)fsth2 << 32) | fst2;
   2782 }
   2783 
   2784 uint64_t helper_float_mulr_ps(uint64_t fdt0, uint64_t fdt1)
   2785 {
   2786     uint32_t fst0 = fdt0 & 0XFFFFFFFF;
   2787     uint32_t fsth0 = fdt0 >> 32;
   2788     uint32_t fst1 = fdt1 & 0XFFFFFFFF;
   2789     uint32_t fsth1 = fdt1 >> 32;
   2790     uint32_t fst2;
   2791     uint32_t fsth2;
   2792 
   2793     set_float_exception_flags(0, &env->active_fpu.fp_status);
   2794     fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
   2795     fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
   2796     update_fcr31();
   2797     return ((uint64_t)fsth2 << 32) | fst2;
   2798 }
   2799 
   2800 /* compare operations */
   2801 #define FOP_COND_D(op, cond)                                   \
   2802 void helper_cmp_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
   2803 {                                                              \
   2804     int c = cond;                                              \
   2805     update_fcr31();                                            \
   2806     if (c)                                                     \
   2807         SET_FP_COND(cc, env->active_fpu);                      \
   2808     else                                                       \
   2809         CLEAR_FP_COND(cc, env->active_fpu);                    \
   2810 }                                                              \
   2811 void helper_cmpabs_d_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
   2812 {                                                              \
   2813     int c;                                                     \
   2814     fdt0 = float64_abs(fdt0);                                  \
   2815     fdt1 = float64_abs(fdt1);                                  \
   2816     c = cond;                                                  \
   2817     update_fcr31();                                            \
   2818     if (c)                                                     \
   2819         SET_FP_COND(cc, env->active_fpu);                      \
   2820     else                                                       \
   2821         CLEAR_FP_COND(cc, env->active_fpu);                    \
   2822 }
   2823 
   2824 static int float64_is_unordered(int sig, float64 a, float64 b STATUS_PARAM)
   2825 {
   2826     if (float64_is_signaling_nan(a) ||
   2827         float64_is_signaling_nan(b) ||
   2828         (sig && (float64_is_any_nan(a) || float64_is_any_nan(b)))) {
   2829         float_raise(float_flag_invalid, status);
   2830         return 1;
   2831     } else if (float64_is_any_nan(a) || float64_is_any_nan(b)) {
   2832         return 1;
   2833     } else {
   2834         return 0;
   2835     }
   2836 }
   2837 
   2838 /* NOTE: the comma operator will make "cond" to eval to false,
   2839  * but float*_is_unordered() is still called. */
   2840 FOP_COND_D(f,   (float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status), 0))
   2841 FOP_COND_D(un,  float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status))
   2842 FOP_COND_D(eq,  !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
   2843 FOP_COND_D(ueq, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
   2844 FOP_COND_D(olt, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
   2845 FOP_COND_D(ult, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
   2846 FOP_COND_D(ole, !float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
   2847 FOP_COND_D(ule, float64_is_unordered(0, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
   2848 /* NOTE: the comma operator will make "cond" to eval to false,
   2849  * but float*_is_unordered() is still called. */
   2850 FOP_COND_D(sf,  (float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status), 0))
   2851 FOP_COND_D(ngle,float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status))
   2852 FOP_COND_D(seq, !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
   2853 FOP_COND_D(ngl, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
   2854 FOP_COND_D(lt,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
   2855 FOP_COND_D(nge, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
   2856 FOP_COND_D(le,  !float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status) && float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
   2857 FOP_COND_D(ngt, float64_is_unordered(1, fdt1, fdt0, &env->active_fpu.fp_status)  || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
   2858 
   2859 #define FOP_COND_S(op, cond)                                   \
   2860 void helper_cmp_s_ ## op (uint32_t fst0, uint32_t fst1, int cc)    \
   2861 {                                                              \
   2862     int c = cond;                                              \
   2863     update_fcr31();                                            \
   2864     if (c)                                                     \
   2865         SET_FP_COND(cc, env->active_fpu);                      \
   2866     else                                                       \
   2867         CLEAR_FP_COND(cc, env->active_fpu);                    \
   2868 }                                                              \
   2869 void helper_cmpabs_s_ ## op (uint32_t fst0, uint32_t fst1, int cc) \
   2870 {                                                              \
   2871     int c;                                                     \
   2872     fst0 = float32_abs(fst0);                                  \
   2873     fst1 = float32_abs(fst1);                                  \
   2874     c = cond;                                                  \
   2875     update_fcr31();                                            \
   2876     if (c)                                                     \
   2877         SET_FP_COND(cc, env->active_fpu);                      \
   2878     else                                                       \
   2879         CLEAR_FP_COND(cc, env->active_fpu);                    \
   2880 }
   2881 
   2882 static flag float32_is_unordered(int sig, float32 a, float32 b STATUS_PARAM)
   2883 {
   2884     if (float32_is_signaling_nan(a) ||
   2885         float32_is_signaling_nan(b) ||
   2886         (sig && (float32_is_any_nan(a) || float32_is_any_nan(b)))) {
   2887         float_raise(float_flag_invalid, status);
   2888         return 1;
   2889     } else if (float32_is_any_nan(a) || float32_is_any_nan(b)) {
   2890         return 1;
   2891     } else {
   2892         return 0;
   2893     }
   2894 }
   2895 
   2896 /* NOTE: the comma operator will make "cond" to eval to false,
   2897  * but float*_is_unordered() is still called. */
   2898 FOP_COND_S(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0))
   2899 FOP_COND_S(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status))
   2900 FOP_COND_S(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
   2901 FOP_COND_S(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
   2902 FOP_COND_S(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
   2903 FOP_COND_S(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
   2904 FOP_COND_S(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
   2905 FOP_COND_S(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
   2906 /* NOTE: the comma operator will make "cond" to eval to false,
   2907  * but float*_is_unordered() is still called. */
   2908 FOP_COND_S(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0))
   2909 FOP_COND_S(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status))
   2910 FOP_COND_S(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_eq(fst0, fst1, &env->active_fpu.fp_status))
   2911 FOP_COND_S(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
   2912 FOP_COND_S(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_lt(fst0, fst1, &env->active_fpu.fp_status))
   2913 FOP_COND_S(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
   2914 FOP_COND_S(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status) && float32_le(fst0, fst1, &env->active_fpu.fp_status))
   2915 FOP_COND_S(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)  || float32_le(fst0, fst1, &env->active_fpu.fp_status))
   2916 
   2917 #define FOP_COND_PS(op, condl, condh)                           \
   2918 void helper_cmp_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc)    \
   2919 {                                                               \
   2920     uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
   2921     uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
   2922     uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
   2923     uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
   2924     int cl = condl;                                             \
   2925     int ch = condh;                                             \
   2926                                                                 \
   2927     update_fcr31();                                             \
   2928     if (cl)                                                     \
   2929         SET_FP_COND(cc, env->active_fpu);                       \
   2930     else                                                        \
   2931         CLEAR_FP_COND(cc, env->active_fpu);                     \
   2932     if (ch)                                                     \
   2933         SET_FP_COND(cc + 1, env->active_fpu);                   \
   2934     else                                                        \
   2935         CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
   2936 }                                                               \
   2937 void helper_cmpabs_ps_ ## op (uint64_t fdt0, uint64_t fdt1, int cc) \
   2938 {                                                               \
   2939     uint32_t fst0 = float32_abs(fdt0 & 0XFFFFFFFF);             \
   2940     uint32_t fsth0 = float32_abs(fdt0 >> 32);                   \
   2941     uint32_t fst1 = float32_abs(fdt1 & 0XFFFFFFFF);             \
   2942     uint32_t fsth1 = float32_abs(fdt1 >> 32);                   \
   2943     int cl = condl;                                             \
   2944     int ch = condh;                                             \
   2945                                                                 \
   2946     update_fcr31();                                             \
   2947     if (cl)                                                     \
   2948         SET_FP_COND(cc, env->active_fpu);                       \
   2949     else                                                        \
   2950         CLEAR_FP_COND(cc, env->active_fpu);                     \
   2951     if (ch)                                                     \
   2952         SET_FP_COND(cc + 1, env->active_fpu);                   \
   2953     else                                                        \
   2954         CLEAR_FP_COND(cc + 1, env->active_fpu);                 \
   2955 }
   2956 
   2957 /* NOTE: the comma operator will make "cond" to eval to false,
   2958  * but float*_is_unordered() is still called. */
   2959 FOP_COND_PS(f,   (float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status), 0),
   2960                  (float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status), 0))
   2961 FOP_COND_PS(un,  float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status),
   2962                  float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status))
   2963 FOP_COND_PS(eq,  !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
   2964                  !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
   2965 FOP_COND_PS(ueq, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
   2966                  float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
   2967 FOP_COND_PS(olt, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
   2968                  !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
   2969 FOP_COND_PS(ult, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
   2970                  float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
   2971 FOP_COND_PS(ole, !float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
   2972                  !float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
   2973 FOP_COND_PS(ule, float32_is_unordered(0, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
   2974                  float32_is_unordered(0, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
   2975 /* NOTE: the comma operator will make "cond" to eval to false,
   2976  * but float*_is_unordered() is still called. */
   2977 FOP_COND_PS(sf,  (float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status), 0),
   2978                  (float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status), 0))
   2979 FOP_COND_PS(ngle,float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status),
   2980                  float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status))
   2981 FOP_COND_PS(seq, !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_eq(fst0, fst1, &env->active_fpu.fp_status),
   2982                  !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
   2983 FOP_COND_PS(ngl, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
   2984                  float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
   2985 FOP_COND_PS(lt,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_lt(fst0, fst1, &env->active_fpu.fp_status),
   2986                  !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
   2987 FOP_COND_PS(nge, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
   2988                  float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
   2989 FOP_COND_PS(le,  !float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)   && float32_le(fst0, fst1, &env->active_fpu.fp_status),
   2990                  !float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status) && float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
   2991 FOP_COND_PS(ngt, float32_is_unordered(1, fst1, fst0, &env->active_fpu.fp_status)    || float32_le(fst0, fst1, &env->active_fpu.fp_status),
   2992                  float32_is_unordered(1, fsth1, fsth0, &env->active_fpu.fp_status)  || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
   2993