Home | History | Annotate | Download | only in metrics

Lines Matching refs:freeptr

109   volatile std::atomic<uint32_t> freeptr;
149 // made to the allocator, notably "freeptr" (see comment in loop for why
155 // "count" was fetched _after_ "freeptr" then it would be possible for
172 // "freeptr" to above this point because there are no explicit dependencies
176 // blocks that could fit before freeptr" will allow.
180 // freeptr). Thus, the scenario above cannot happen.
207 const uint32_t freeptr = std::min(
208 allocator_->shared_meta()->freeptr.load(std::memory_order_relaxed),
211 freeptr / (sizeof(BlockHeader) + kAllocAlignment);
276 CHECK(((SharedMetadata*)0)->freeptr.is_lock_free());
295 shared_meta()->freeptr.load(std::memory_order_relaxed) != 0 ||
316 shared_meta()->freeptr.store(sizeof(SharedMetadata),
336 shared_meta()->freeptr.load(std::memory_order_relaxed) == 0 ||
409 return std::min(shared_meta()->freeptr.load(std::memory_order_relaxed),
488 /* const */ uint32_t freeptr =
489 shared_meta()->freeptr.load(std::memory_order_acquire);
498 if (freeptr + size > mem_size_) {
504 // the load of freeptr above, it is still safe as nothing will be written
506 volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
516 const uint32_t page_free = mem_page_ - freeptr % mem_page_;
522 const uint32_t new_freeptr = freeptr + page_free;
523 if (shared_meta()->freeptr.compare_exchange_strong(freeptr,
537 const uint32_t new_freeptr = freeptr + size;
547 if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr))
567 return freeptr;
573 mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
686 uint32_t freeptr = std::min(
687 shared_meta()->freeptr.load(std::memory_order_relaxed), mem_size_);
688 if (ref + size > freeptr)
694 if (ref + block->size > freeptr)