Lines Matching refs:block
37 * are the block space table and the block queue.
39 * The block space table records the state of a block. We must track
40 * whether a block is:
44 * - If the block holds part of a large object allocation, whether the
45 * block is the initial or a continued block of the allocation.
47 * - Whether the block is pinned, that is to say whether at least one
48 * object in the block must remain stationary. Only needed during a
54 * The block queue is used during garbage collection. Unlike Cheney's
57 * The block queue exists to thread lists of blocks from the various
61 * well as the address of the first object within a block, which is
69 * inside a block are packed together within a block. Objects that
70 * are larger than a block are allocated from contiguous sequences of
96 * The block scheme allows us to use VM page faults to maintain a
143 static void enqueueBlock(HeapSource *heapSource, size_t block);
195 * the block. We convert blocks to a relative number when
196 * indexing in the block queue. TODO: make the block queue base
197 * relative rather than the index into the block queue.
205 /* The space of the current block 0 (free), 1 or 2. */
208 /* Start of free space in the current block. */
210 /* Exclusive limit of free space in the current block. */
284 size_t block;
286 block = (uintptr_t)addr >> BLOCK_SHIFT;
287 return heapSource->baseBlock <= block &&
288 heapSource->limitBlock > block;
293 * Iterate over the block map looking for a contiguous run of free
306 /* Scan block map. */
329 LOG_ALLOC("allocateBlocks allocBlocks=%zu,block#=%zu", heapSource->allocBlocks, i);
332 * shaded a white object gray. We enqueue the block so
348 /* Converts an absolute address to a relative block number. */
356 /* Converts a relative block number to an absolute address. */
357 static u1 *blockToAddress(const HeapSource *heapSource, size_t block)
361 addr = (u1 *) (((uintptr_t) heapSource->baseBlock + block) * BLOCK_SIZE);
366 static void clearBlock(HeapSource *heapSource, size_t block)
369 assert(block < heapSource->totalBlocks);
370 u1 *addr = heapSource->blockBase + block*BLOCK_SIZE;
403 * Appends the given block to the block queue. The block queue is
406 static void enqueueBlock(HeapSource *heapSource, size_t block)
409 assert(block < heapSource->totalBlocks);
411 heapSource->blockQueue[heapSource->queueTail] = block;
413 heapSource->queueHead = block;
415 heapSource->blockQueue[block] = QUEUE_TAIL;
416 heapSource->queueTail = block;
421 * Grays all objects within the block corresponding to the given
426 size_t block;
428 block = addressToBlock(heapSource, (const u1 *)addr);
429 if (heapSource->blockSpace[block] != BLOCK_TO_SPACE) {
430 // LOG_PROM("promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
431 heapSource->blockSpace[block] = BLOCK_TO_SPACE;
432 enqueueBlock(heapSource, block);
436 // LOG_PROM("NOT promoting block %zu %d @ %p", block, heapSource->blockSpace[block], obj);
474 /* Byte indicating space residence or free status of block. */
575 * allocation cursor points into a block of free storage. If the
576 * given allocation fits in the remaining space of the block, we
578 * the allocation cannot fit in the current block but is smaller than
579 * a block we request a new block and allocate from it instead. If
580 * the allocation is larger than a block we must allocate from a span
597 /* Try allocating inside the current block. */
606 /* Try allocating in a new block. */
614 /* TODO(cshapiro): pad out the current block. */
627 /* TODO(cshapiro): pad out free space in the last block. */
642 size_t block;
648 block = addressToBlock(heapSource, (const u1 *)addr);
651 * Forcibly append the underlying block to the queue. This
655 enqueueBlock(heapSource, block);
656 LOG_PROM("forced promoting block %zu %d @ %p", block, heapSource->blockSpace[block], addr);
767 /* Reset the block queue. */
772 /* TODO(cshapiro): pad the current (prev) block. */
1352 /* The entire block is black. */
1873 * Heap block scavenging.
1877 * Scavenge objects in the current block. Scavenging terminates when
1878 * the pointer reaches the highest address in the block or when a run
1881 static void scavengeBlock(HeapSource *heapSource, size_t block)
1887 LOG_SCAV("scavengeBlock(heapSource=%p,block=%zu)", heapSource, block);
1890 assert(block < heapSource->totalBlocks);
1891 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1893 cursor = blockToAddress(heapSource, block);
1897 /* Parse and scavenge the current block. */
1938 static void verifyBlock(HeapSource *heapSource, size_t block)
1944 // LOG_VER("verifyBlock(heapSource=%p,block=%zu)", heapSource, block);
1947 assert(block < heapSource->totalBlocks);
1948 assert(heapSource->blockSpace[block] == BLOCK_TO_SPACE);
1950 cursor = blockToAddress(heapSource, block);
1954 /* Parse and scavenge the current block. */
1977 size_t block, count;
1980 block = heapSource->queueHead;
1984 while (block != QUEUE_TAIL) {
1985 block = heapSource->blockQueue[block];
1990 block = heapSource->queueHead;
1991 while (block != QUEUE_TAIL) {
1992 space = heapSource->blockSpace[block];
1993 LOG_SCAV("block=%zu@%p,space=%zu", block, blockToAddress(heapSource,block), space);
1994 block = heapSource->blockQueue[block];
2006 size_t block;
2012 block = heapSource->queueHead;
2013 LOG_SCAV("Dequeueing block %zu", block);
2014 scavengeBlock(heapSource, block);
2015 heapSource->queueHead = heapSource->blockQueue[block];
2022 * Scan the block list and verify all blocks that are marked as being
2039 LOG_VER("Block Demographics: "
2096 /* Reset allocation to an unallocated block. */
2100 * Hack: promote the empty block allocated above. If the
2102 * objects, the block queue may be empty. We must force a