Home | History | Annotate | Download | only in space

Lines Matching defs:kRegionSize

38   CHECK_ALIGNED(capacity, kRegionSize);
40 // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
46 capacity + kRegionSize,
63 CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
66 if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
67 // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
68 // kRegionSize at the end.
72 mem_map->AlignBy(kRegionSize);
74 CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
75 CHECK_ALIGNED(mem_map->End(), kRegionSize);
89 CHECK_ALIGNED(mem_map_size, kRegionSize);
90 CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
91 num_regions_ = mem_map_size / kRegionSize;
97 for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
98 regions_[i].Init(i, region_addr, region_addr + kRegionSize);
106 CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
130 return num_regions * kRegionSize;
142 return num_regions * kRegionSize;
154 return num_regions * kRegionSize;
172 const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
226 num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
313 reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
334 reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
388 max_contiguous_free_regions * kRegionSize);
416 DCHECK_ALIGNED(large_obj, kRegionSize);
419 uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
421 kRegionSize) {
487 DCHECK_ALIGNED(tlab_start, kRegionSize);
490 DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
541 if (LIKELY(num_bytes <= kRegionSize)) {
546 *usable_size = RoundUp(num_bytes, kRegionSize);
592 mprotect(Begin(), kRegionSize, PROT_READ | PROT_WRITE);