Home | History | Annotate | Download | only in surfaceflinger_client

Lines Matching refs:stack

184     SharedBufferStack& stack( *mSharedStack );
185 return stack.status;
190 SharedBufferStack& stack( *mSharedStack );
191 return stack.identity;
199 SharedBufferStack& stack( *mSharedStack );
203 prefix, stack.head, stack.available, stack.queued,
204 stack.reallocMask, stack.inUse, stack.identity, stack.status);
212 const SharedBufferStack& stack( *mSharedStack );
219 (stack.identity == identity) &&
220 (stack.status == NO_ERROR))
229 "shouldn't happen." , condition.name(), stack.identity);
235 stack.identity, stack.status);
244 return (stack.identity != mIdentity) ? status_t(BAD_INDEX) : stack.status;
254 return stack.available > 0;
261 // NOTE: if stack.head is messed up, we could crash the client
264 return (buf != stack.index[stack.head] ||
265 (stack.queued > 0 && stack.inUse != buf));
274 android_atomic_inc(&stack.queued);
282 if (android_atomic_dec(&stack.available) == 0) {
293 stack.index[tail] = buf;
294 android_atomic_inc(&stack.available);
303 if (stack.inUse != lockedBuffer) {
306 lockedBuffer, stack.inUse,
307 stack.identity, stack.token);
310 android_atomic_write(-1, &stack.inUse);
319 int32_t head = stack.head;
324 android_atomic_write(stack.headBuf, &stack.inUse);
329 queued = stack.queued;
333 } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued));
339 const int8_t headBuf = stack.index[head];
340 stack.headBuf = headBuf;
341 android_atomic_write(headBuf, &stack.inUse);
344 android_atomic_write(head, &stack.head);
347 android_atomic_inc(&stack.available);
357 android_atomic_write(status, &stack.status);
368 SharedBufferStack& stack( *mSharedStack );
370 queued_head = stack.head;
375 SharedBufferStack& stack( *mSharedStack );
376 return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
381 SharedBufferStack& stack( *mSharedStack );
383 if (stack.head == tail && stack.available == mNumBuffers) {
385 tail, stack.head, stack.available, stack.queued);
402 int dequeued = stack.index[tail];
435 SharedBufferStack& stack( *mSharedStack );
445 SharedBufferStack& stack( *mSharedStack );
448 stack.index[queued_head] = buf;
455 stack.stats.totalTime = ns2us(now - mDequeueTime[buf]);
461 SharedBufferStack& stack( *mSharedStack );
463 return (android_atomic_and(~mask, &stack.reallocMask) & mask) != 0;
468 SharedBufferStack& stack( *mSharedStack );
469 return stack.setCrop(buf, crop);
474 SharedBufferStack& stack( *mSharedStack );
475 return stack.setTransform(buf, uint8_t(transform));
480 SharedBufferStack& stack( *mSharedStack );
481 return stack.setDirtyRegion(buf, reg);
487 SharedBufferStack& stack( *mSharedStack );
499 queued_head = (stack.head + stack.queued) % mNumBuffers;
537 SharedBufferStack& stack( *mSharedStack );
538 buf = stack.index[buf];
564 stack( *mSharedStack );
566 android_atomic_or(mask, &stack.reallocMask);
574 SharedBufferStack& stack( *mSharedStack );
578 android_atomic_or(mask, &stack.reallocMask);
584 SharedBufferStack& stack( *mSharedStack );
585 return stack.queued;
590 SharedBufferStack& stack( *mSharedStack );
591 return stack.getDirtyRegion(buf);
596 SharedBufferStack& stack( *mSharedStack );
597 return stack.getCrop(buf);
602 SharedBufferStack& stack( *mSharedStack );
603 return stack.getTransform(buf);
624 SharedBufferStack& stack( *mSharedStack );
628 int32_t head = stack.head;
633 int32_t avail = stack.available;
637 int8_t* const index = const_cast<int8_t*>(stack.index);
641 // move head 'extra' ahead, this doesn't impact stack.index[head];
642 stack.head = head + extra;
644 stack.available += extra;
649 stack.index[base+i] = *curr;
660 SharedBufferStack& stack( *mSharedStack );
661 return stack.stats;