Home | History | Annotate | Download | only in Objects

Lines Matching refs:POOL

279 /* Pool for small blocks. */
283 block *freeblock; /* pool's free list head */
284 struct pool_header *nextpool; /* next pool of this size class */
285 struct pool_header *prevpool; /* previous pool "" */
303 /* Pool-aligned pointer to the next pool to be carved off. */
323 * with at least one available pool, both members are used in the
341 /* Round pointer P down to the closest pool-aligned address <= P, as a poolp */
344 /* Return total number of blocks in pool of size index I, as a uint. */
359 * Pool table -- headed, circular, doubly-linked lists of partially used pools.
367 member) as needed. Once carved off, a pool is in one of three states forever
371 At least one block in the pool is currently allocated, and at least one
372 block in the pool is not currently allocated (note this implies a pool
374 This is a pool's initial state, as a pool is created only when malloc
376 The pool holds blocks of a fixed size, and is in the circular list headed
383 full == all the pool's blocks are currently allocated
384 On transition to full, a pool is unlinked from its usedpools[] list.
387 A free of a block in a full pool puts the pool back in the used state.
391 empty == all the pool's blocks are currently available for allocation
392 On transition to empty, a pool is unlinked from its usedpools[] list,
396 an empty list in usedpools[], it takes the first pool off of freepools.
397 If the size class needed happens to be the same as the size class the pool
398 last had, some pool initialization can be skipped.
403 Blocks within pools are again carved out as needed. pool->freeblock points to
404 the start of a singly-linked list of free blocks within the pool. When a
405 block is freed, it's inserted at the front of its pool's freeblock list. Note
406 that the available blocks in a pool are *not* linked all together when a pool
408 set up, returning the first such block, and setting pool->freeblock to a
410 pymalloc strives at all levels (arena, pool, and block) never to touch a piece
413 So long as a pool is in the used state, we're certain there *is* a block
414 available for allocating, and pool->freeblock is not NULL. If pool->freeblock
415 points to the end of the free list before we've carved the entire pool into
420 pool is initialized. All the blocks in a pool have been passed out at least
560 uint excess; /* number of bytes above pool alignment */
640 /* pool_address <- first pool-aligned address in the arena
656 Py_ADDRESS_IN_RANGE(P, POOL)
659 POOL must be the pool address associated with P, i.e., POOL = POOL_ADDR(P)
660 (the caller is asked to compute this because the macro expands POOL more than
665 Tricky: Let B be the arena base address associated with the pool, B =
666 arenas[(POOL)->arenaindex].address. Then P belongs to the arena if and only if
679 (POOL)->arenaindex < maxarenas must be false, saving us from trying to index
682 Details: given P and POOL, the arena_object corresponding to P is AO =
683 arenas[(POOL)->arenaindex]. Suppose obmalloc controls P. Then (barring wild
684 stores, etc), POOL is the correct address of P's pool, AO.address is the
685 correct base address of the pool's arena, and P must be within ARENA_SIZE of
691 call to the system malloc() or realloc()). (POOL)->arenaindex may be anything
722 memory when P is not controlled by obmalloc (to get at (POOL)->arenaindex)
730 by Python, it is important that (POOL)->arenaindex is read only once, as
733 (POOL)->arenaindex for the duration of the Py_ADDRESS_IN_RANGE macro's
737 #define Py_ADDRESS_IN_RANGE(P, POOL) \
738 ((arenaindex_temp = (POOL)->arenaindex) < maxarenas && \
770 int Py_ADDRESS_IN_RANGE(void *P, poolp pool) Py_NO_INLINE;
793 poolp pool;
822 pool = usedpools[size + size];
823 if (pool != pool->nextpool) {
825 * There is a used pool for this size class.
828 ++pool->ref.count;
829 bp = pool->freeblock;
831 if ((pool->freeblock = *(block **)bp) != NULL) {
838 if (pool->nextoffset <= pool->maxnextoffset) {
840 pool->freeblock = (block*)pool +
841 pool->nextoffset;
842 pool->nextoffset += INDEX2SIZE(size);
843 *(block **)(pool->freeblock) = NULL;
847 /* Pool is full, unlink from used pools. */
848 next = pool->nextpool;
849 pool = pool->prevpool;
850 next->prevpool = pool;
851 pool->nextpool = next;
856 /* There isn't a pool of the right size class immediately
857 * available: use a free pool.
860 /* No arena has a free pool: allocate a new arena. */
877 /* Try to get a cached free pool. */
878 pool = usable_arenas->freepools;
879 if (pool != NULL) {
881 usable_arenas->freepools = pool->nextpool;
918 pool->nextpool = next;
919 pool->prevpool = next;
920 next->nextpool = pool;
921 next->prevpool = pool;
922 pool->ref.count = 1;
923 if (pool->szidx == size) {
924 /* Luckily, this pool last contained blocks
928 bp = pool->freeblock;
929 pool->freeblock = *(block **)bp;
934 * Initialize the pool header, set up the free list to
938 pool->szidx = size;
940 bp = (block *)pool + POOL_OVERHEAD;
941 pool->nextoffset = POOL_OVERHEAD + (size << 1);
942 pool->maxnextoffset = POOL_SIZE - size;
943 pool->freeblock = bp + size;
944 *(block **)(pool->freeblock) = NULL;
949 /* Carve off a new pool. */
952 pool = (poolp)usable_arenas->pool_address;
953 assert((block*)pool <= (block*)usable_arenas->address +
955 pool->arenaindex = usable_arenas - arenas;
956 assert(&arenas[pool->arenaindex] == usable_arenas);
957 pool->szidx = DUMMY_SIZE_IDX;
996 poolp pool;
1012 pool = POOL_ADDR(p);
1013 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1016 /* Link p to the start of the pool's freeblock list. Since
1017 * the pool had at least the p block outstanding, the pool
1022 assert(pool->ref.count > 0); /* else it was empty */
1023 *(block **)p = lastfree = pool->freeblock;
1024 pool->freeblock = (block *)p;
1029 /* freeblock wasn't NULL, so the pool wasn't full,
1030 * and the pool is in a usedpools[] list.
1032 if (--pool->ref.count != 0) {
1033 /* pool isn't empty: leave it in usedpools */
1037 /* Pool is now empty: unlink from usedpools, and
1042 next = pool->nextpool;
1043 prev = pool->prevpool;
1047 /* Link the pool to freepools. This is a singly-linked
1048 * list, and pool->prevpool isn't used there.
1050 ao = &arenas[pool->arenaindex];
1051 pool->nextpool = ao->freepools;
1052 ao->freepools = pool;
1056 * a pool, and there are 4 cases for arena mgmt:
1059 * 2. If this is the only free pool in the arena,
1190 /* Pool was full, so doesn't currently live in any list:
1192 * This mimics LRU pool usage for new allocations and
1196 --pool->ref.count;
1197 assert(pool->ref.count > 0); /* else the pool is empty */
1198 size = pool->szidx;
1201 /* insert pool before next: prev <-> pool <-> next */
1202 pool->nextpool = next;
1203 pool->prevpool = prev;
1204 next->prevpool = pool;
1205 prev->nextpool = pool;
1228 poolp pool;
1252 pool = POOL_ADDR(p);
1253 if (Py_ADDRESS_IN_RANGE(p, pool)) {
1255 size = INDEX2SIZE(pool->szidx);
1837 /* round up to pool alignment */
1844 /* visit every pool in the arena */
1916 total += printone("# bytes lost to pool headers", pool_header_bytes);
1929 Py_ADDRESS_IN_RANGE(void *P, poolp pool)
1931 uint arenaindex_temp = pool->arenaindex;