HomeSort by relevance Sort by last modified time
    Searched defs:npages (Results 1 - 10 of 10) sorted by null

  /external/strace/
numa.c 163 const kernel_ulong_t npages = tcp->u_arg[1]; local
167 tprintf("%d, %" PRI_klu ", ", (int) tcp->u_arg[0], npages);
168 print_array(tcp, tcp->u_arg[2], npages, &buf, current_wordsize,
171 print_array(tcp, tcp->u_arg[3], npages, &buf, sizeof(int),
175 print_array(tcp, tcp->u_arg[4], npages, &buf, sizeof(int),
  /bionic/libc/kernel/uapi/xen/
privcmd.h 31 __u64 npages; member in struct:privcmd_mmap_entry
  /external/kernel-headers/original/uapi/xen/
privcmd.h 53 __u64 npages; member in struct:privcmd_mmap_entry
  /external/jemalloc_new/include/jemalloc/internal/
extent_structs.h 202 * The synchronization here is a little tricky. Modifications to npages
203 * must hold mtx, but reads need not (though, a reader who sees npages
207 atomic_zu_t npages; member in struct:extents_s
  /external/f2fs-tools/tools/
f2fstat.c 158 int npages; local
161 npages = strtoul(head, &tail, 10);
164 node_kb = npages * 4;
167 meta_kb = npages * 4;
  /external/linux-kselftest/tools/testing/selftests/kvm/lib/
kvm_util.c 496 * npages - Number of physical pages
503 * Allocates a memory area of the number of pages specified by npages
511 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
523 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
526 " guest_paddr: 0x%lx npages: 0x%lx\n"
528 guest_paddr, npages, vm->max_gfn, vm->page_size);
535 vm, guest_paddr, guest_paddr + npages * vm->page_size);
539 " requested guest_paddr: 0x%lx npages: 0x%lx "
542 guest_paddr, npages, vm->page_size,
553 && ((guest_paddr + npages * vm->page_size
931 size_t npages = size \/ page_size; local
    [all...]
  /external/jemalloc_new/src/
arena.c 761 size_t npages = extent_size_get(extent) >> LG_PAGE; local
762 npurged += npages;
783 nunmapped += npages;
    [all...]
extent.c 292 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
305 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
322 size_t npages = size >> LG_PAGE; local
324 * All modifications to npages hold the mutex (as asserted above), so we
329 atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330 atomic_store_zu(&extents->npages, cur_extents_npages + npages,
348 size_t npages = size >> LG_PAGE; local
351 * atomic operations for updating extents->npages.
354 atomic_load_zu(&extents->npages, ATOMIC_RELAXED)
    [all...]
  /external/jemalloc/src/
arena.c 187 size_t npages)
191 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
193 assert((npages << LG_PAGE) < chunksize);
201 size_t npages)
205 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
207 assert((npages << LG_PAGE) < chunksize);
215 size_t npages)
220 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
223 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
228 arena->ndirty += npages;
1538 size_t npages; local
1575 size_t npages; local
1671 size_t npages; local
1865 size_t pageind, npages; local
3096 size_t npages = (oldsize + large_pad) >> LG_PAGE; local
    [all...]
  /external/stressapptest/src/
worker.cc 3266 int npages = size \/ plength; local
    [all...]

Completed in 721 milliseconds