HomeSort by relevance Sort by last modified time
    Searched refs:kPageSize (Results 1 - 25 of 140) sorted by null

1 2 3 4 5 6

  /art/libartbase/base/
safe_copy_test.cc 33 DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE));
36 void* map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE,
40 char* page2 = page1 + kPageSize;
41 char* page3 = page2 + kPageSize;
42 char* page4 = page3 + kPageSize;
43 ASSERT_EQ(0, mprotect(page1 + kPageSize, kPageSize, PROT_NONE));
44 ASSERT_EQ(0, munmap(page4, kPageSize));
47 page1[kPageSize - 1] = 'z'
    [all...]
globals.h 39 static constexpr int kPageSize = 4096;
44 return offset < kPageSize;
50 static constexpr size_t kLargeObjectAlignment = kPageSize;
  /external/compiler-rt/test/tsan/
large_malloc_meta.cc 16 const int kPageSize = 4 << 10;
18 for (int j = 0; j < kSize; j += kPageSize / sizeof(*p))
21 mmap(0, kSize * sizeof(*p) + kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANON,
  /external/compiler-rt/test/asan/TestCases/Posix/
large_allocator_unpoisons_on_free.cc 27 const long kPageSize = sysconf(_SC_PAGESIZE);
28 void *p = my_memalign(kPageSize, 1024 * 1024);
31 char *q = (char *)mmap(p, kPageSize, PROT_READ | PROT_WRITE,
35 memset(q, 42, kPageSize);
37 munmap(q, kPageSize);
  /external/perfetto/src/ftrace_reader/
cpu_reader_fuzzer.cc 33 uint8_t g_page[base::kPageSize];
42 protozero::ScatteredStreamWriterNullDelegate delegate(base::kPageSize);
52 memset(g_page, 0, base::kPageSize);
53 memcpy(g_page, data, std::min(base::kPageSize, size));
  /external/perfetto/src/tracing/ipc/
posix_shared_memory_unittest.cc 44 factory.CreateSharedMemory(base::kPageSize);
48 ASSERT_EQ(base::kPageSize, shm_size);
59 PosixSharedMemory::Create(base::kPageSize);
62 ASSERT_EQ(static_cast<off_t>(base::kPageSize), lseek(fd, 0, SEEK_END));
71 ASSERT_EQ(0, ftruncate(fd_num, base::kPageSize));
79 ASSERT_EQ(base::kPageSize, shm_size);
  /external/perfetto/src/tracing/core/
null_trace_writer_unittest.cc 37 for (size_t i = 0; i < 3 * base::kPageSize; i++) {
null_trace_writer.cc 29 : delegate_(base::kPageSize), stream_(&delegate_) {
trace_writer_for_testing.cc 29 : delegate_(static_cast<size_t>(base::kPageSize)), stream_(&delegate_) {
47 size_t chunk_size_ = base::kPageSize;
  /art/runtime/
mem_map_test.cc 76 const size_t page_size = static_cast<size_t>(kPageSize);
154 uintptr_t random_start = CreateStartPos(i * kPageSize);
172 kPageSize,
180 kPageSize,
191 std::vector<uint8_t> data = RandomData(kPageSize);
200 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
209 5 * kPageSize, // Need to make it larger
221 3 * kPageSize,
232 std::vector<uint8_t> data = RandomData(3 * kPageSize);
236 dest->SetSize(kPageSize);
    [all...]
  /external/perfetto/src/ipc/
buffered_frame_deserializer.cc 42 PERFETTO_CHECK(max_capacity % base::kPageSize == 0);
43 PERFETTO_CHECK(max_capacity > base::kPageSize);
60 int res = madvise(buf() + base::kPageSize, capacity_ - base::kPageSize,
145 if (consumed_size > base::kPageSize) {
146 size_t size_rounded_up = (size_ / base::kPageSize + 1) * base::kPageSize;
  /external/compiler-rt/lib/tsan/rtl/
tsan_sync.cc 130 const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
131 if (sz <= 4 * kPageSize) {
137 uptr diff = RoundUp(p, kPageSize) - p;
143 diff = p + sz - RoundDown(p + sz, kPageSize);
150 CHECK_EQ(p, RoundUp(p, kPageSize));
151 CHECK_EQ(sz, RoundUp(sz, kPageSize));
155 for (uptr checked = 0; sz > 0; checked += kPageSize) {
156 bool has_something = FreeRange(proc, p, kPageSize);
157 p += kPageSize;
158 sz -= kPageSize;
    [all...]
  /art/runtime/gc/allocator/
rosalloc.cc 52 size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 };
66 DCHECK_ALIGNED(base, kPageSize);
67 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
68 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
70 CHECK_ALIGNED(page_release_size_threshold_, kPageSize);
91 size_t num_of_pages = footprint_ / kPageSize;
92 size_t max_num_of_pages = max_capacity_ / kPageSize;
95 RoundUp(max_num_of_pages, kPageSize),
107 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0));
132 const size_t req_byte_size = num_pages * kPageSize;
    [all...]
dlmalloc.cc 71 start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::kPageSize));
72 end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::kPageSize));
  /art/runtime/arch/
instruction_set.cc 146 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned");
147 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned");
148 static_assert(IsAligned<kPageSize>(kMipsStackOverflowReservedBytes), "Mips gap not page aligned");
149 static_assert(IsAligned<kPageSize>(kMips64StackOverflowReservedBytes),
151 static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned");
152 static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
159 // TODO: Should we require an extra page (RoundUp(SIZE) + kPageSize)?
  /external/libtextclassifier/util/memory/
mmap.cc 86 static const int64 kPageSize = sysconf(_SC_PAGE_SIZE);
87 const int64 aligned_offset = (segment_offset / kPageSize) * kPageSize;
  /art/runtime/gc/collector/
immune_spaces_test.cc 73 reinterpret_cast<uint8_t*>(kPageSize),
74 kPageSize));
210 constexpr size_t kImageSize = 123 * kPageSize;
211 constexpr size_t kImageOatSize = 321 * kPageSize;
212 constexpr size_t kOtherSpaceSize = 100 * kPageSize;
257 constexpr size_t kImage1Size = kPageSize * 17;
258 constexpr size_t kImage2Size = kPageSize * 13;
259 constexpr size_t kImage3Size = kPageSize * 3;
260 constexpr size_t kImage1OatSize = kPageSize * 5;
261 constexpr size_t kImage2OatSize = kPageSize * 8
    [all...]
  /external/v8/src/heap/
remembered-set.h 31 slot_set[offset / Page::kPageSize].Insert(offset % Page::kPageSize);
43 return slot_set[offset / Page::kPageSize].Contains(offset %
44 Page::kPageSize);
55 slot_set[offset / Page::kPageSize].Remove(offset % Page::kPageSize);
68 if (end_offset < static_cast<uintptr_t>(Page::kPageSize)) {
74 int start_chunk = static_cast<int>(start_offset / Page::kPageSize);
75 int end_chunk = static_cast<int>((end_offset - 1) / Page::kPageSize);
77 static_cast<int>(start_offset % Page::kPageSize);
    [all...]
  /external/perfetto/src/base/
page_allocator.cc 29 constexpr size_t kGuardSize = kPageSize;
33 PERFETTO_DCHECK(size % kPageSize == 0);
  /external/perfetto/src/ftrace_reader/test/
cpu_reader_support.cc 46 auto buffer = std::unique_ptr<uint8_t[]>(new uint8_t[base::kPageSize]);
48 memset(buffer.get(), 0xfa, base::kPageSize);
  /system/core/libmemunreachable/
Allocator.cpp 57 static constexpr size_t kPageSize = 4096;
59 static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
64 static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
123 size = (size + kPageSize - 1) & ~(kPageSize - 1);
126 size_t map_size = size + align - kPageSize;
211 static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
259 unsigned int page = n * allocation_size_ / kPageSize;
294 if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
302 // unsigned int allocsPerPage = kPageSize / allocation_size_
    [all...]
  /external/libmojo/base/android/library_loader/
library_prefetcher.cc 30 const size_t kPageSize = 4096;
63 const uintptr_t page_mask = kPageSize - 1;
72 for (unsigned char* ptr = start_ptr; ptr < end_ptr; ptr += kPageSize) {
172 const uintptr_t page_mask = kPageSize - 1;
178 size_t pages = length / kPageSize;
  /bionic/linker/tests/
linker_block_allocator_test.cpp 60 static size_t kPageSize = sysconf(_SC_PAGE_SIZE);
108 size_t n = kPageSize/sizeof(test_struct_larger) + 1 - 2;
123 size_t n = kPageSize/sizeof(test_struct_larger) - 1;
  /external/libchrome/base/
security_unittest.cc 142 size_t kPageSize = 4096; // We support x86_64 only.
149 mmap(0, kPageSize, PROT_READ|PROT_WRITE,
153 ASSERT_EQ(munmap(default_mmap_heap_address, kPageSize), 0);
  /external/perfetto/include/perfetto/base/
utils.h 39 constexpr size_t kPageSize = 4096;

Completed in 854 milliseconds

1 2 3 4 5 6