Home | History | Annotate | Download | only in qemu
      1 /*
      2  *  Software MMU support
      3  *
      4  *  Copyright (c) 2003 Fabrice Bellard
      5  *
      6  * This library is free software; you can redistribute it and/or
      7  * modify it under the terms of the GNU Lesser General Public
      8  * License as published by the Free Software Foundation; either
      9  * version 2 of the License, or (at your option) any later version.
     10  *
     11  * This library is distributed in the hope that it will be useful,
     12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  * Lesser General Public License for more details.
     15  *
     16  * You should have received a copy of the GNU Lesser General Public
     17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
     18  */
     19 #include "qemu-timer.h"
     20 
     21 #define DATA_SIZE (1 << SHIFT)
     22 
     23 #if DATA_SIZE == 8
     24 #define SUFFIX q
     25 #define USUFFIX q
     26 #define DATA_TYPE uint64_t
     27 #elif DATA_SIZE == 4
     28 #define SUFFIX l
     29 #define USUFFIX l
     30 #define DATA_TYPE uint32_t
     31 #elif DATA_SIZE == 2
     32 #define SUFFIX w
     33 #define USUFFIX uw
     34 #define DATA_TYPE uint16_t
     35 #elif DATA_SIZE == 1
     36 #define SUFFIX b
     37 #define USUFFIX ub
     38 #define DATA_TYPE uint8_t
     39 #else
     40 #error unsupported data size
     41 #endif
     42 
     43 #ifdef SOFTMMU_CODE_ACCESS
     44 #define READ_ACCESS_TYPE 2
     45 #define ADDR_READ addr_code
     46 #else
     47 #define READ_ACCESS_TYPE 0
     48 #define ADDR_READ addr_read
     49 #endif
     50 
     51 #if defined(CONFIG_MEMCHECK) && !defined(OUTSIDE_JIT) && !defined(SOFTMMU_CODE_ACCESS)
     52 /*
     53  * Support for memory access checker.
     54  * We need to instrument __ldx/__stx_mmu routines implemented in this file with
     55  * callbacks to access validation routines implemented by the memory checker.
     56  * Note that (at least for now) we don't do that instrumentation for memory
     57  * addressing the code (SOFTMMU_CODE_ACCESS controls that). Also, we don't want
     58  * to instrument code that is used by emulator itself (OUTSIDE_JIT controls
     59  * that).
     60  */
     61 #define CONFIG_MEMCHECK_MMU
     62 #include "memcheck/memcheck_api.h"
     63 #endif  // CONFIG_MEMCHECK && !OUTSIDE_JIT && !SOFTMMU_CODE_ACCESS
     64 
     65 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
     66                                                         int mmu_idx,
     67                                                         void *retaddr);
     68 static inline DATA_TYPE glue(io_read, SUFFIX)(target_phys_addr_t physaddr,
     69                                               target_ulong addr,
     70                                               void *retaddr)
     71 {
     72     DATA_TYPE res;
     73     int index;
     74     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
     75     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
     76     env->mem_io_pc = (unsigned long)retaddr;
     77     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
     78             && !can_do_io(env)) {
     79         cpu_io_recompile(env, retaddr);
     80     }
     81 
     82     env->mem_io_vaddr = addr;
     83 #if SHIFT <= 2
     84     res = io_mem_read[index][SHIFT](io_mem_opaque[index], physaddr);
     85 #else
     86 #ifdef TARGET_WORDS_BIGENDIAN
     87     res = (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr) << 32;
     88     res |= io_mem_read[index][2](io_mem_opaque[index], physaddr + 4);
     89 #else
     90     res = io_mem_read[index][2](io_mem_opaque[index], physaddr);
     91     res |= (uint64_t)io_mem_read[index][2](io_mem_opaque[index], physaddr + 4) << 32;
     92 #endif
     93 #endif /* SHIFT > 2 */
     94     return res;
     95 }
     96 
     97 /* handle all cases except unaligned access which span two pages */
     98 DATA_TYPE REGPARM glue(glue(__ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
     99                                                       int mmu_idx)
    100 {
    101     DATA_TYPE res;
    102     int index;
    103     target_ulong tlb_addr;
    104     target_phys_addr_t ioaddr;
    105     unsigned long addend;
    106     void *retaddr;
    107 #ifdef CONFIG_MEMCHECK_MMU
    108     int invalidate_cache = 0;
    109 #endif  // CONFIG_MEMCHECK_MMU
    110 
    111     /* test if there is match for unaligned or IO access */
    112     /* XXX: could done more in memory macro in a non portable way */
    113     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    114  redo:
    115     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    116     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    117         if (tlb_addr & ~TARGET_PAGE_MASK) {
    118             /* IO access */
    119             if ((addr & (DATA_SIZE - 1)) != 0)
    120                 goto do_unaligned_access;
    121             retaddr = GETPC();
    122             ioaddr = env->iotlb[mmu_idx][index];
    123             res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
    124         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
    125             /* This is not I/O access: do access verification. */
    126 #ifdef CONFIG_MEMCHECK_MMU
    127             /* We only validate access to the guest's user space, for which
    128              * mmu_idx is set to 1. */
    129             if (memcheck_instrument_mmu && mmu_idx == 1 &&
    130                 memcheck_validate_ld(addr, DATA_SIZE, (target_ulong)(ptrdiff_t)GETPC())) {
    131                 /* Memory read breaks page boundary. So, if required, we
    132                  * must invalidate two caches in TLB. */
    133                 invalidate_cache = 2;
    134             }
    135 #endif  // CONFIG_MEMCHECK_MMU
    136             /* slow unaligned access (it spans two pages or IO) */
    137         do_unaligned_access:
    138             retaddr = GETPC();
    139 #ifdef ALIGNED_ONLY
    140             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
    141 #endif
    142             res = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr,
    143                                                          mmu_idx, retaddr);
    144         } else {
    145 #ifdef CONFIG_MEMCHECK_MMU
    146             /* We only validate access to the guest's user space, for which
    147              * mmu_idx is set to 1. */
    148             if (memcheck_instrument_mmu && mmu_idx == 1) {
    149                 invalidate_cache = memcheck_validate_ld(addr, DATA_SIZE,
    150                                                         (target_ulong)(ptrdiff_t)GETPC());
    151             }
    152 #endif  // CONFIG_MEMCHECK_MMU
    153             /* unaligned/aligned access in the same page */
    154 #ifdef ALIGNED_ONLY
    155             if ((addr & (DATA_SIZE - 1)) != 0) {
    156                 retaddr = GETPC();
    157                 do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
    158             }
    159 #endif
    160             addend = env->tlb_table[mmu_idx][index].addend;
    161             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
    162         }
    163 #ifdef CONFIG_MEMCHECK_MMU
    164         if (invalidate_cache) {
    165             /* Accessed memory is under memchecker control. We must invalidate
    166              * containing page(s) in order to make sure that next access to them
    167              * will invoke _ld/_st_mmu. */
    168             env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
    169             env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
    170             if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
    171                 // Read crossed page boundaris. Invalidate second cache too.
    172                 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
    173                 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
    174             }
    175         }
    176 #endif  // CONFIG_MEMCHECK_MMU
    177     } else {
    178         /* the page is not in the TLB : fill it */
    179         retaddr = GETPC();
    180 #ifdef ALIGNED_ONLY
    181         if ((addr & (DATA_SIZE - 1)) != 0)
    182             do_unaligned_access(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
    183 #endif
    184         tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
    185         goto redo;
    186     }
    187     return res;
    188 }
    189 
    190 /* handle all unaligned cases */
    191 static DATA_TYPE glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(target_ulong addr,
    192                                                         int mmu_idx,
    193                                                         void *retaddr)
    194 {
    195     DATA_TYPE res, res1, res2;
    196     int index, shift;
    197     target_phys_addr_t ioaddr;
    198     unsigned long addend;
    199     target_ulong tlb_addr, addr1, addr2;
    200 
    201     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    202  redo:
    203     tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    204     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    205         if (tlb_addr & ~TARGET_PAGE_MASK) {
    206             /* IO access */
    207             if ((addr & (DATA_SIZE - 1)) != 0)
    208                 goto do_unaligned_access;
    209             ioaddr = env->iotlb[mmu_idx][index];
    210             res = glue(io_read, SUFFIX)(ioaddr, addr, retaddr);
    211         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
    212         do_unaligned_access:
    213             /* slow unaligned access (it spans two pages) */
    214             addr1 = addr & ~(DATA_SIZE - 1);
    215             addr2 = addr1 + DATA_SIZE;
    216             res1 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr1,
    217                                                           mmu_idx, retaddr);
    218             res2 = glue(glue(slow_ld, SUFFIX), MMUSUFFIX)(addr2,
    219                                                           mmu_idx, retaddr);
    220             shift = (addr & (DATA_SIZE - 1)) * 8;
    221 #ifdef TARGET_WORDS_BIGENDIAN
    222             res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
    223 #else
    224             res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
    225 #endif
    226             res = (DATA_TYPE)res;
    227         } else {
    228             /* unaligned/aligned access in the same page */
    229             addend = env->tlb_table[mmu_idx][index].addend;
    230             res = glue(glue(ld, USUFFIX), _raw)((uint8_t *)(long)(addr+addend));
    231         }
    232     } else {
    233         /* the page is not in the TLB : fill it */
    234         tlb_fill(addr, READ_ACCESS_TYPE, mmu_idx, retaddr);
    235         goto redo;
    236     }
    237     return res;
    238 }
    239 
    240 #ifndef SOFTMMU_CODE_ACCESS
    241 
    242 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
    243                                                    DATA_TYPE val,
    244                                                    int mmu_idx,
    245                                                    void *retaddr);
    246 
    247 static inline void glue(io_write, SUFFIX)(target_phys_addr_t physaddr,
    248                                           DATA_TYPE val,
    249                                           target_ulong addr,
    250                                           void *retaddr)
    251 {
    252     int index;
    253     index = (physaddr >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
    254     physaddr = (physaddr & TARGET_PAGE_MASK) + addr;
    255     if (index > (IO_MEM_NOTDIRTY >> IO_MEM_SHIFT)
    256             && !can_do_io(env)) {
    257         cpu_io_recompile(env, retaddr);
    258     }
    259 
    260     env->mem_io_vaddr = addr;
    261     env->mem_io_pc = (unsigned long)retaddr;
    262 #if SHIFT <= 2
    263     io_mem_write[index][SHIFT](io_mem_opaque[index], physaddr, val);
    264 #else
    265 #ifdef TARGET_WORDS_BIGENDIAN
    266     io_mem_write[index][2](io_mem_opaque[index], physaddr, val >> 32);
    267     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val);
    268 #else
    269     io_mem_write[index][2](io_mem_opaque[index], physaddr, val);
    270     io_mem_write[index][2](io_mem_opaque[index], physaddr + 4, val >> 32);
    271 #endif
    272 #endif /* SHIFT > 2 */
    273 }
    274 
    275 void REGPARM glue(glue(__st, SUFFIX), MMUSUFFIX)(target_ulong addr,
    276                                                  DATA_TYPE val,
    277                                                  int mmu_idx)
    278 {
    279     target_phys_addr_t ioaddr;
    280     unsigned long addend;
    281     target_ulong tlb_addr;
    282     void *retaddr;
    283     int index;
    284 #ifdef CONFIG_MEMCHECK_MMU
    285     int invalidate_cache = 0;
    286 #endif  // CONFIG_MEMCHECK_MMU
    287 
    288     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    289  redo:
    290     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    291     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    292         if (tlb_addr & ~TARGET_PAGE_MASK) {
    293             /* IO access */
    294             if ((addr & (DATA_SIZE - 1)) != 0)
    295                 goto do_unaligned_access;
    296             retaddr = GETPC();
    297             ioaddr = env->iotlb[mmu_idx][index];
    298             glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
    299         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
    300             /* This is not I/O access: do access verification. */
    301 #ifdef CONFIG_MEMCHECK_MMU
    302             /* We only validate access to the guest's user space, for which
    303              * mmu_idx is set to 1. */
    304             if (memcheck_instrument_mmu && mmu_idx == 1 &&
    305                 memcheck_validate_st(addr, DATA_SIZE, (uint64_t)val,
    306                                      (target_ulong)(ptrdiff_t)GETPC())) {
    307                 /* Memory write breaks page boundary. So, if required, we
    308                  * must invalidate two caches in TLB. */
    309                 invalidate_cache = 2;
    310             }
    311 #endif  // CONFIG_MEMCHECK_MMU
    312         do_unaligned_access:
    313             retaddr = GETPC();
    314 #ifdef ALIGNED_ONLY
    315             do_unaligned_access(addr, 1, mmu_idx, retaddr);
    316 #endif
    317             glue(glue(slow_st, SUFFIX), MMUSUFFIX)(addr, val,
    318                                                    mmu_idx, retaddr);
    319         } else {
    320 #ifdef CONFIG_MEMCHECK_MMU
    321             /* We only validate access to the guest's user space, for which
    322              * mmu_idx is set to 1. */
    323             if (memcheck_instrument_mmu && mmu_idx == 1) {
    324                 invalidate_cache = memcheck_validate_st(addr, DATA_SIZE,
    325                                                         (uint64_t)val,
    326                                                         (target_ulong)(ptrdiff_t)GETPC());
    327             }
    328 #endif  // CONFIG_MEMCHECK_MMU
    329             /* aligned/unaligned access in the same page */
    330 #ifdef ALIGNED_ONLY
    331             if ((addr & (DATA_SIZE - 1)) != 0) {
    332                 retaddr = GETPC();
    333                 do_unaligned_access(addr, 1, mmu_idx, retaddr);
    334             }
    335 #endif
    336             addend = env->tlb_table[mmu_idx][index].addend;
    337             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
    338         }
    339 #ifdef CONFIG_MEMCHECK_MMU
    340         if (invalidate_cache) {
    341             /* Accessed memory is under memchecker control. We must invalidate
    342              * containing page(s) in order to make sure that next access to them
    343              * will invoke _ld/_st_mmu. */
    344             env->tlb_table[mmu_idx][index].addr_read ^= TARGET_PAGE_MASK;
    345             env->tlb_table[mmu_idx][index].addr_write ^= TARGET_PAGE_MASK;
    346             if ((invalidate_cache == 2) && (index < CPU_TLB_SIZE)) {
    347                 // Write crossed page boundaris. Invalidate second cache too.
    348                 env->tlb_table[mmu_idx][index + 1].addr_read ^= TARGET_PAGE_MASK;
    349                 env->tlb_table[mmu_idx][index + 1].addr_write ^= TARGET_PAGE_MASK;
    350             }
    351         }
    352 #endif  // CONFIG_MEMCHECK_MMU
    353     } else {
    354         /* the page is not in the TLB : fill it */
    355         retaddr = GETPC();
    356 #ifdef ALIGNED_ONLY
    357         if ((addr & (DATA_SIZE - 1)) != 0)
    358             do_unaligned_access(addr, 1, mmu_idx, retaddr);
    359 #endif
    360         tlb_fill(addr, 1, mmu_idx, retaddr);
    361         goto redo;
    362     }
    363 }
    364 
    365 /* handles all unaligned cases */
    366 static void glue(glue(slow_st, SUFFIX), MMUSUFFIX)(target_ulong addr,
    367                                                    DATA_TYPE val,
    368                                                    int mmu_idx,
    369                                                    void *retaddr)
    370 {
    371     target_phys_addr_t ioaddr;
    372     unsigned long addend;
    373     target_ulong tlb_addr;
    374     int index, i;
    375 
    376     index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    377  redo:
    378     tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
    379     if ((addr & TARGET_PAGE_MASK) == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
    380         if (tlb_addr & ~TARGET_PAGE_MASK) {
    381             /* IO access */
    382             if ((addr & (DATA_SIZE - 1)) != 0)
    383                 goto do_unaligned_access;
    384             ioaddr = env->iotlb[mmu_idx][index];
    385             glue(io_write, SUFFIX)(ioaddr, val, addr, retaddr);
    386         } else if (((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1) >= TARGET_PAGE_SIZE) {
    387         do_unaligned_access:
    388             /* XXX: not efficient, but simple */
    389             /* Note: relies on the fact that tlb_fill() does not remove the
    390              * previous page from the TLB cache.  */
    391             for(i = DATA_SIZE - 1; i >= 0; i--) {
    392 #ifdef TARGET_WORDS_BIGENDIAN
    393                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (((DATA_SIZE - 1) * 8) - (i * 8)),
    394                                           mmu_idx, retaddr);
    395 #else
    396                 glue(slow_stb, MMUSUFFIX)(addr + i, val >> (i * 8),
    397                                           mmu_idx, retaddr);
    398 #endif
    399             }
    400         } else {
    401             /* aligned/unaligned access in the same page */
    402             addend = env->tlb_table[mmu_idx][index].addend;
    403             glue(glue(st, SUFFIX), _raw)((uint8_t *)(long)(addr+addend), val);
    404         }
    405     } else {
    406         /* the page is not in the TLB : fill it */
    407         tlb_fill(addr, 1, mmu_idx, retaddr);
    408         goto redo;
    409     }
    410 }
    411 
    412 #endif /* !defined(SOFTMMU_CODE_ACCESS) */
    413 
    414 #undef READ_ACCESS_TYPE
    415 #undef SHIFT
    416 #undef DATA_TYPE
    417 #undef SUFFIX
    418 #undef USUFFIX
    419 #undef DATA_SIZE
    420 #undef ADDR_READ
    421