Lines Matching refs:address
51 // address of the next page and its ownership information. The second word may
52 // have the allocation top address of this page. The next 248 bytes are
68 #define ASSERT_PAGE_ALIGNED(address) \
69 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
71 #define ASSERT_OBJECT_ALIGNED(address) \
72 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
74 #define ASSERT_MAP_ALIGNED(address) \
75 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0)
94 // address is always aligned to the 8K page size. A page is divided into
100 // for a pointer in the remembered set. Given an address, its remembered set
120 // Returns the page containing a given address. The address ranges
127 INLINE(static Page* FromAddress(Address a)) {
132 // top address can be the upper bound of the page, we need to subtract
133 // it with kPointerSize first. The address ranges from
135 INLINE(static Page* FromAllocationTop(Address top)) {
141 // Returns the start address of this page.
142 Address address() { return reinterpret_cast<Address>(this); }
144 // Checks whether this is a valid page address.
145 bool is_valid() { return address() != NULL; }
151 inline Address AllocationTop();
153 // Returns the start address of the object area in this page.
154 Address ObjectAreaStart() { return address() + kObjectStartOffset; }
156 // Returns the end address (exclusive) of the object area in this page.
157 Address ObjectAreaEnd() { return address() + Page::kPageSize; }
159 // Returns the start address of the remembered set area.
160 Address RSetStart() { return address() + kRSetStartOffset; }
162 // Returns the end address of the remembered set area (exclusive).
163 Address RSetEnd() { return address() + kRSetEndOffset; }
165 // Checks whether an address is page aligned.
166 static bool IsAlignedToPageSize(Address a) {
173 // Returns the offset of a given address to this page.
174 INLINE(int Offset(Address a)) {
175 int offset = static_cast<int>(a - address());
180 // Returns the address for a given offset to the this page.
181 Address OffsetToAddress(int offset) {
183 return address() + offset;
192 // Return the address of the remembered set word corresponding to an
193 // object address/offset pair, and the bit encoded as a single-bit
195 INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
198 // Sets the corresponding remembered set bit for a given address.
199 INLINE(static void SetRSet(Address address, int offset));
201 // Clears the corresponding remembered set bit for a given address.
202 static inline void UnsetRSet(Address address, int offset);
204 // Checks whether the remembered set bit for a given address is set.
205 static inline bool IsRSetSet(Address address, int offset);
234 // to align start of rset to a uint32_t address.
251 // opaque_header, encodes the next page address (aligned to kPageSize 8K)
254 // or [next_page_start, next_page_end[. It cannot point to a valid address
257 // same) contain the address of the next large object chunk.
275 Address mc_relocation_top;
277 // The forwarding address of the first live object in this page.
278 Address mc_first_forwarded;
326 // displacements cover the entire 4GB virtual address space. On 64-bit
341 static bool contains(Address address) {
343 Address start = static_cast<Address>(code_range_->address());
344 return start <= address && address < start + code_range_->size();
359 FreeBlock(Address start_arg, size_t size_arg)
362 : start(static_cast<Address>(start_arg)), size(size_arg) {}
364 Address start;
414 // Reserves an initial address range of virtual memory to be split between
420 // address of the initial chunk if successful, with the side effect of
429 // address is non-null and that it is big enough to hold at least one
432 static Page* CommitPages(Address start, size_t size, PagedSpace* owner,
436 // the address is not NULL, the size is greater than zero, and that the
439 static bool CommitBlock(Address start, size_t size, Executability executable);
445 static bool UncommitBlock(Address start, size_t size);
449 static void ZapBlock(Address start, size_t size);
510 static inline void Protect(Address start, size_t size);
511 static inline void Unprotect(Address start, size_t size,
545 // Allocated chunk info: chunk start address, chunk size, and owning space.
549 void init(Address a, size_t s, PagedSpace* o) {
554 Address address() { return address_; }
559 Address address_;
587 // True if the address lies in the initial chunk.
588 static inline bool InInitialChunk(Address address);
590 // Initializes pages in a chunk. Returns the first page address.
618 // A HeapObjectIterator iterates objects from a given address to the
619 // top of a space. The given address must be below the current
640 // address is not given, the iterator starts from the space bottom.
645 HeapObjectIterator(PagedSpace* space, Address start);
647 Address start,
658 Address cur_addr_; // current iteration point
659 Address end_addr_; // end iteration point
660 Address cur_limit_; // current page limit
662 Page* end_page_; // caches the page of the end address
681 void Initialize(Address start, Address end, HeapObjectCallback size_func);
742 Address top; // current allocation top
743 Address limit; // current allocation limit
846 // Set up the space using the given address range of virtual memory (from
850 bool Setup(Address start, size_t size);
860 // Checks whether an object/address is in this space.
861 inline bool Contains(Address a);
862 bool Contains(HeapObject* o) { return Contains(o->address()); }
864 // Given an address occupied by a live object, return that object if it is
866 // iterates over objects in the page containing the address, the cost is
868 Object* FindObject(Address addr);
879 virtual Address PageAllocationTop(Page* page) = 0;
898 // Returns the address of the first object in this space.
899 Address bottom() { return first_page_->ObjectAreaStart(); }
902 Address top() { return allocation_info_.top; }
928 // Computes the offset of a given address in this space to the beginning
930 int MCSpaceOffsetForAddress(Address addr);
1105 bool Setup(Address start, int initial_capacity, int maximum_capacity);
1117 // address range to grow).
1129 // Returns the start address of the space.
1130 Address low() { return start_; }
1131 // Returns one past the end address of the space.
1132 Address high() { return low() + capacity_; }
1135 Address age_mark() { return age_mark_; }
1136 void set_age_mark(Address mark) { age_mark_ = mark; }
1138 // True if the address is in the address range of this semispace (not
1140 bool Contains(Address a) {
1145 // True if the object is a heap object in the address range of this
1151 // The offset of an address from the beginning of the space.
1152 int SpaceOffsetForAddress(Address addr) {
1192 // The start address of the space.
1193 Address start_;
1195 Address age_mark_;
1211 // semispace from a given start address (defaulting to the bottom of the
1217 // address is given, the iterator starts from the bottom of the space. If
1221 SemiSpaceIterator(NewSpace* space, Address start);
1237 void Initialize(NewSpace* space, Address start, Address end,
1243 Address current_;
1245 Address limit_;
1263 bool Setup(Address start, int size);
1284 // True if the address or object lies in the address range of either
1286 bool Contains(Address a) {
1324 // Return the address of the allocation pointer in the active semispace.
1325 Address top() { return allocation_info_.top; }
1326 // Return the address of the first object in the active semispace.
1327 Address bottom() { return to_space_.low(); }
1330 Address age_mark() { return from_space_.age_mark(); }
1332 Address mark) { to_space_.set_age_mark(mark); }
1334 // The start address of the space and a bit mask. Anding an address in the
1335 // new space with the mask will result in the start address.
1336 Address start() { return start_; }
1340 Address* allocation_top_address() { return &allocation_info_.top; }
1341 Address* allocation_limit_address() { return &allocation_info_.limit; }
1363 Address FromSpaceLow() { return from_space_.low(); }
1364 Address FromSpaceHigh() { return from_space_.high(); }
1368 Address ToSpaceLow() { return to_space_.low(); }
1369 Address ToSpaceHigh() { return to_space_.high(); }
1372 int ToSpaceOffsetForAddress(Address a) {
1375 int FromSpaceOffsetForAddress(Address a) {
1379 // True if the object is a heap object in the address range of the
1385 bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
1386 bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
1434 // Start address and bit mask for containment testing.
1435 Address start_;
1467 // the raw address of the next free list node (or NULL).
1470 // Obtain a free-list node from a raw address. This is not a cast because
1471 // it does not check nor require that the first word at the address is a map
1473 static FreeListNode* FromAddress(Address address) {
1474 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1486 inline Address next();
1487 inline void set_next(Address next);
1511 // ie, its contents will be destroyed. The start address should be word
1513 int Free(Address start, int size_in_bytes);
1539 // Address of the head FreeListNode of the implied block size or NULL.
1540 Address head_node_;
1590 // Does this free list contain a free block located at the address of 'node'?
1612 // destroyed. The start address should be word aligned.
1613 void Free(Address start);
1624 Address head_;
1656 virtual Address PageAllocationTop(Page* page) {
1662 void Free(Address start, int size_in_bytes) {
1719 virtual Address PageAllocationTop(Page* page) {
1727 void Free(Address start) {
1788 // Given an index, returns the page address.
1789 Address PageAddress(int page_index) { return page_addresses_[page_index]; }
1808 Address TopAfterCompaction(int live_maps) {
1823 Address top = top_page->ObjectAreaStart() + offset;
1830 void FinishCompaction(Address new_top, int live_maps) {
1870 // An array of page start address in a map space.
1871 Address page_addresses_[kMaxMapPageIndex];
1917 // Interpret a raw address as a large object chunk.
1918 static LargeObjectChunk* FromAddress(Address address) {
1919 return reinterpret_cast<LargeObjectChunk*>(address);
1922 // Returns the address of this chunk.
1923 Address address() { return reinterpret_cast<Address>(this); }
1994 // Finds an object for a given address, returns Failure::Exception()
1997 Object* FindObject(Address a);
2033 // Checks whether an address is in the object area in this space. It
2035 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); }