HomeSort by relevance Sort by last modified time
    Searched refs:kPageSize (Results 1 - 25 of 107) sorted by null

1 2 3 4 5

  /external/compiler-rt/test/tsan/
large_malloc_meta.cc 16 const int kPageSize = 4 << 10;
18 for (int j = 0; j < kSize; j += kPageSize / sizeof(*p))
21 mmap(0, kSize * sizeof(*p) + kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANON,
  /external/compiler-rt/test/asan/TestCases/Posix/
large_allocator_unpoisons_on_free.cc 27 const long kPageSize = sysconf(_SC_PAGESIZE);
28 void *p = my_memalign(kPageSize, 1024 * 1024);
31 char *q = (char *)mmap(p, kPageSize, PROT_READ | PROT_WRITE,
35 memset(q, 42, kPageSize);
37 munmap(q, kPageSize);
  /art/runtime/arch/
instruction_set.cc 150 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
151 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
152 static_assert(IsAligned<kPageSize>(kMipsStackOverflowReservedBytes), "Mips gap not page aligned");
153 static_assert(IsAligned<kPageSize>(kMips64StackOverflowReservedBytes),
155 static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned");
156 static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
163 // TODO: Should we require an extra page (RoundUp(SIZE) + kPageSize)?
  /external/compiler-rt/lib/tsan/rtl/
tsan_sync.cc 130 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
131 if (sz <= 4 * kPageSize) {
137 uptr diff = RoundUp(p, kPageSize) - p;
143 diff = p + sz - RoundDown(p + sz, kPageSize);
150 CHECK_EQ(p, RoundUp(p, kPageSize));
151 CHECK_EQ(sz, RoundUp(sz, kPageSize));
155 for (uptr checked = 0; sz > 0; checked += kPageSize) {
156 bool has_something = FreeRange(proc, p, kPageSize);
157 p += kPageSize;
158 sz -= kPageSize;
    [all...]
  /art/runtime/gc/allocator/
rosalloc.cc 51 size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
65 DCHECK_ALIGNED(base, kPageSize);
66 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
67 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
69 CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
90 size_t num_of_pages = footprint_ / kPageSize;
91 size_t max_num_of_pages = max_capacity_ / kPageSize;
94 RoundUp(max_num_of_pages, kPageSize),
106 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
131 const size_t req_byte_size = num_pages * kPageSize;
    [all...]
dlmalloc.cc 68 start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::kPageSize));
69 end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::kPageSize));
  /art/runtime/
globals.h 41 static constexpr int kPageSize = 4096;
46 return offset < kPageSize;
52 static constexpr size_t kLargeObjectAlignment = kPageSize;
mem_map.cc 94 // & ~(kPageSize - 1) =~0000000000000001111
117 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
298 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
385 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
422 int page_offset = start % kPageSize;
425 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
433 redzone_size = kPageSize;
539 DCHECK_ALIGNED(begin_, kPageSize);
540 DCHECK_ALIGNED(base_begin_, kPageSize);
541 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize);
    [all...]
mem_map_test.cc 54 const size_t page_size = static_cast<size_t>(kPageSize);
132 uintptr_t random_start = CreateStartPos(i * kPageSize);
158 kPageSize,
171 reinterpret_cast<uint8_t*>(kPageSize),
186 kPageSize,
199 constexpr size_t kMapSize = kPageSize;
221 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
225 kPageSize,
236 kPageSize,
247 kPageSize,
    [all...]
oat.cc 108 if (!IsAligned<kPageSize>(executable_offset_)) {
111 if (!IsAligned<kPageSize>(image_patch_delta_)) {
133 if (!IsAligned<kPageSize>(executable_offset_)) {
136 if (!IsAligned<kPageSize>(image_patch_delta_)) {
208 DCHECK_ALIGNED(executable_offset_, kPageSize);
214 DCHECK_ALIGNED(executable_offset, kPageSize);
356 CHECK_ALIGNED(delta, kPageSize);
365 CHECK_ALIGNED(off, kPageSize);
386 CHECK_ALIGNED(image_file_location_oat_data_begin, kPageSize);
image.cc 67 CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize));
68 CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize));
69 CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize));
81 CHECK_ALIGNED(delta, kPageSize) << " patch delta must be page aligned";
122 if (!IsAligned<kPageSize>(patch_delta_)) {
  /art/runtime/gc/collector/
immune_spaces_test.cc 71 reinterpret_cast<uint8_t*>(kPageSize),
72 kPageSize));
208 constexpr size_t kImageSize = 123 * kPageSize;
209 constexpr size_t kImageOatSize = 321 * kPageSize;
210 constexpr size_t kOtherSpaceSize= 100 * kPageSize;
255 constexpr size_t kImage1Size = kPageSize * 17;
256 constexpr size_t kImage2Size = kPageSize * 13;
257 constexpr size_t kImage3Size = kPageSize * 3;
258 constexpr size_t kImage1OatSize = kPageSize * 5;
259 constexpr size_t kImage2OatSize = kPageSize * 8
    [all...]
semi_space-inl.h 67 CHECK_ALIGNED(ref, kPageSize);
  /system/core/libmemunreachable/
Allocator.cpp 55 static constexpr size_t kPageSize = 4096;
57 static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
63 / kPageSize;
123 size = (size + kPageSize - 1) & ~(kPageSize - 1);
126 size_t map_size = size + align - kPageSize;
221 static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
266 unsigned int page = n * allocation_size_ / kPageSize;
301 if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
309 //unsigned int allocsPerPage = kPageSize / allocation_size_
    [all...]
  /external/libmojo/base/android/library_loader/
library_prefetcher.cc 29 const size_t kPageSize = 4096;
55 const uintptr_t page_mask = kPageSize - 1;
64 for (unsigned char* ptr = start_ptr; ptr < end_ptr; ptr += kPageSize) {
164 const uintptr_t page_mask = kPageSize - 1;
170 size_t pages = length / kPageSize;
  /external/v8/src/heap/
remembered-set.h 31 slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
42 slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
55 if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
61 int start_chunk = static_cast<int>(start_offset / Page::kPageSize);
62 int end_chunk = static_cast<int>((end_offset - 1) / Page::kPageSize);
64 static_cast<int>(start_offset % Page::kPageSize);
65 // Note that using end_offset % Page::kPageSize would be incorrect
68 end_offset - static_cast<uintptr_t>(end_chunk) * Page::kPageSize);
    [all...]
  /bionic/linker/tests/
linker_block_allocator_test.cpp 60 static size_t kPageSize = sysconf(_SC_PAGE_SIZE);
108 size_t n = kPageSize/sizeof(test_struct_larger) + 1 - 2;
123 size_t n = kPageSize/sizeof(test_struct_larger) - 1;
  /external/libchrome/base/
security_unittest.cc 141 size_t kPageSize = 4096; // We support x86_64 only.
148 mmap(0, kPageSize, PROT_READ|PROT_WRITE,
152 ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0);
  /art/runtime/gc/space/
malloc_space.cc 92 *growth_limit = RoundUp(*growth_limit, kPageSize);
93 *capacity = RoundUp(*capacity, kPageSize);
127 growth_limit = RoundUp(growth_limit, kPageSize);
170 SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize)));
173 DCHECK_ALIGNED(begin_, kPageSize);
174 DCHECK_ALIGNED(End(), kPageSize);
175 size_t size = RoundUp(Size(), kPageSize);
187 SetGrowthLimit(RoundUp(size, kPageSize));
  /external/ltp/testcases/kernel/mem/mmapstress/
mmap-corruption01.c 64 int kPageSize = 4096;
160 if (!byte_good && ((i % kPageSize) == 0)) {
161 //printf("%d ", i / kPageSize);
  /art/compiler/
elf_builder.h 136 header_.sh_addralign = kPageSize;
513 rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
514 text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0),
515 bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
516 dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize),
519 dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kPageSize, sizeof(Elf_Dyn)),
520 eh_frame_(this, ".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0),
528 abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0,
612 CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kPageSize))
681 DCHECK_EQ(rodata_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize));
    [all...]
  /external/compiler-rt/lib/sanitizer_common/
sanitizer_stacktrace.cc 69 const uptr kPageSize = GetPageSizeCached();
99 if (pc1 < kPageSize)
sanitizer_unwind_linux_libcdep.cc 111 const uptr kPageSize = GetPageSizeCached();
115 if (pc < kPageSize) return UNWIND_STOP;
  /external/google-breakpad/src/client/windows/unittests/
exception_handler_death_test.cc 416 const DWORD kPageSize = sSysInfo.dwPageSize;
423 kPageSize * 2,
427 char* memory = all_memory + kPageSize;
428 ASSERT_TRUE(VirtualAlloc(memory, kPageSize,
508 const DWORD kPageSize = sSysInfo.dwPageSize;
511 const int kOffset = kPageSize - sizeof(instructions);
515 kPageSize * 2,
519 ASSERT_TRUE(VirtualAlloc(memory, kPageSize,
  /art/compiler/linker/
multi_oat_relative_patcher.cc 39 DCHECK_ALIGNED(adjustment, kPageSize);

Completed in 697 milliseconds

1 2 3 4 5