Home | History | Annotate | Download | only in cloud

Lines Matching refs:block

24 bool FileBlockCache::BlockNotStale(const std::shared_ptr<Block>& block) {
25 mutex_lock l(block->mu);
26 if (block->state != FetchState::FINISHED) {
30 return env_->NowSeconds() - block->timestamp <= max_staleness_;
33 std::shared_ptr<FileBlockCache::Block> FileBlockCache::Lookup(const Key& key) {
40 // Remove the stale block and continue.
45 // Insert a new empty block, setting the bookkeeping to sentinel values
47 auto new_entry = std::make_shared<Block>();
64 /// Move the block to the front of the LRU list if it isn't already there.
66 const std::shared_ptr<Block>& block) {
68 if (block->timestamp == 0) {
69 // The block was evicted from another thread. Allow it to remain evicted.
72 if (block->lru_iterator != lru_list_.begin()) {
73 lru_list_.erase(block->lru_iterator);
75 block->lru_iterator = lru_list_.begin();
78 // Check for inconsistent state. If there is a block later in the same file
79 // in the cache, and our current block is not block size, this likely means
82 if (block->data.size() < block_size_) {
86 return errors::Internal("Block cache contents are inconsistent.");
96 const std::shared_ptr<Block>& block) {
99 gtl::MakeCleanup([this, &downloaded_block, &key, &block] {
101 // locking block->mu.
104 // Do not update state if the block is already to be evicted.
105 if (block->timestamp != 0) {
106 cache_size_ += block->data.size();
108 lra_list_.erase(block->lra_iterator);
110 block->lra_iterator = lra_list_.begin();
111 block->timestamp = env_->NowSeconds();
115 // Loop until either block content is successfully fetched, or our request
117 mutex_lock l(block->mu);
120 switch (block->state) {
124 block->state = FetchState::FETCHING;
125 block->mu.unlock(); // Release the lock while making the API call.
126 block->data.clear();
127 block->data.resize(block_size_, 0);
130 block->data.data(), &bytes_transferred));
131 block->mu.lock(); // Reacquire the lock immediately afterwards
133 block->data.resize(bytes_transferred, 0);
134 block->data.shrink_to_fit();
136 block->state = FetchState::FINISHED;
138 block->state = FetchState::ERROR;
140 block->cond_var.notify_all();
143 block->cond_var.wait_for(l, std::chrono::seconds(60));
144 if (block->state == FetchState::FINISHED) {
168 // Calculate the block-aligned start and end of the read.
178 // Look up the block, fetching and inserting it if necessary, and update the
179 // LRU iterator for the key and block.
180 std::shared_ptr<Block> block = Lookup(key);
181 DCHECK(block) << "No block for key " << key.first << "@" << key.second;
182 TF_RETURN_IF_ERROR(MaybeFetch(key, block));
183 TF_RETURN_IF_ERROR(UpdateLRU(key, block));
184 // Copy the relevant portion of the block into the result buffer.
185 const auto& data = block->data;
188 // happen if `offset` is not block-aligned, and the read returns the last
189 // block in the file, which does not extend all the way out to `offset`.
197 // The block begins before the slice we're reading.
202 // The block extends past the end of the slice we're reading.
211 // The block was a partial block and thus signals EOF at its upper bound.
231 // The oldest block is not yet expired. Come back later.
265 // This signals that the block is removed, and should not be inadvertently