Home | History | Annotate | Download | only in drm
      1 /**************************************************************************
      2  *
      3  * Copyright 2007-2015 VMware, Inc.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
     22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 /**
     29  * \file
     30  * Implementation of fenced buffers.
     31  *
     32  * \author Jose Fonseca <jfonseca-at-vmware-dot-com>
     33  * \author Thomas Hellstrm <thellstrom-at-vmware-dot-com>
     34  */
     35 
     36 
     37 #include "pipe/p_config.h"
     38 
     39 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
     40 #include <unistd.h>
     41 #include <sched.h>
     42 #endif
     43 #include <inttypes.h>
     44 
     45 #include "pipe/p_compiler.h"
     46 #include "pipe/p_defines.h"
     47 #include "util/u_debug.h"
     48 #include "os/os_thread.h"
     49 #include "util/u_memory.h"
     50 #include "util/list.h"
     51 
     52 #include "pipebuffer/pb_buffer.h"
     53 #include "pipebuffer/pb_bufmgr.h"
     54 #include "pipebuffer/pb_buffer_fenced.h"
     55 #include "vmw_screen.h"
     56 
     57 
     58 /**
     59  * Convenience macro (type safe).
     60  */
     61 #define SUPER(__derived) (&(__derived)->base)
     62 
     63 
     64 struct fenced_manager
     65 {
     66    struct pb_manager base;
     67    struct pb_manager *provider;
     68    struct pb_fence_ops *ops;
     69 
     70    /**
     71     * Following members are mutable and protected by this mutex.
     72     */
     73    pipe_mutex mutex;
     74 
     75    /**
     76     * Fenced buffer list.
     77     *
     78     * All fenced buffers are placed in this listed, ordered from the oldest
     79     * fence to the newest fence.
     80     */
     81    struct list_head fenced;
     82    pb_size num_fenced;
     83 
     84    struct list_head unfenced;
     85    pb_size num_unfenced;
     86 
     87 };
     88 
     89 
     90 /**
     91  * Fenced buffer.
     92  *
     93  * Wrapper around a pipe buffer which adds fencing and reference counting.
     94  */
     95 struct fenced_buffer
     96 {
     97    /*
     98     * Immutable members.
     99     */
    100 
    101    struct pb_buffer base;
    102    struct fenced_manager *mgr;
    103 
    104    /*
    105     * Following members are mutable and protected by fenced_manager::mutex.
    106     */
    107 
    108    struct list_head head;
    109 
    110    /**
    111     * Buffer with storage.
    112     */
    113    struct pb_buffer *buffer;
    114    pb_size size;
    115 
    116    /**
    117     * A bitmask of PB_USAGE_CPU/GPU_READ/WRITE describing the current
    118     * buffer usage.
    119     */
    120    unsigned flags;
    121 
    122    unsigned mapcount;
    123 
    124    struct pb_validate *vl;
    125    unsigned validation_flags;
    126 
    127    struct pipe_fence_handle *fence;
    128 };
    129 
    130 
    131 static inline struct fenced_manager *
    132 fenced_manager(struct pb_manager *mgr)
    133 {
    134    assert(mgr);
    135    return (struct fenced_manager *)mgr;
    136 }
    137 
    138 
    139 static inline struct fenced_buffer *
    140 fenced_buffer(struct pb_buffer *buf)
    141 {
    142    assert(buf);
    143    return (struct fenced_buffer *)buf;
    144 }
    145 
    146 
    147 static void
    148 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf);
    149 
    150 static enum pipe_error
    151 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
    152                                         struct fenced_buffer *fenced_buf,
    153                                         const struct pb_desc *desc,
    154                                         boolean wait);
    155 /**
    156  * Dump the fenced buffer list.
    157  *
    158  * Useful to understand failures to allocate buffers.
    159  */
    160 static void
    161 fenced_manager_dump_locked(struct fenced_manager *fenced_mgr)
    162 {
    163 #ifdef DEBUG
    164    struct pb_fence_ops *ops = fenced_mgr->ops;
    165    struct list_head *curr, *next;
    166    struct fenced_buffer *fenced_buf;
    167 
    168    debug_printf("%10s %7s %8s %7s %10s %s\n",
    169                 "buffer", "size", "refcount", "storage", "fence", "signalled");
    170 
    171    curr = fenced_mgr->unfenced.next;
    172    next = curr->next;
    173    while(curr != &fenced_mgr->unfenced) {
    174       fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
    175       assert(!fenced_buf->fence);
    176       debug_printf("%10p %"PRIu64" %8u %7s\n",
    177                    (void *) fenced_buf,
    178                    fenced_buf->base.size,
    179                    p_atomic_read(&fenced_buf->base.reference.count),
    180                    fenced_buf->buffer ? "gpu" : "none");
    181       curr = next;
    182       next = curr->next;
    183    }
    184 
    185    curr = fenced_mgr->fenced.next;
    186    next = curr->next;
    187    while(curr != &fenced_mgr->fenced) {
    188       int signaled;
    189       fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
    190       assert(fenced_buf->buffer);
    191       signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
    192       debug_printf("%10p %"PRIu64" %8u %7s %10p %s\n",
    193                    (void *) fenced_buf,
    194                    fenced_buf->base.size,
    195                    p_atomic_read(&fenced_buf->base.reference.count),
    196                    "gpu",
    197                    (void *) fenced_buf->fence,
    198                    signaled == 0 ? "y" : "n");
    199       curr = next;
    200       next = curr->next;
    201    }
    202 #else
    203    (void)fenced_mgr;
    204 #endif
    205 }
    206 
    207 
    208 static inline void
    209 fenced_buffer_destroy_locked(struct fenced_manager *fenced_mgr,
    210                              struct fenced_buffer *fenced_buf)
    211 {
    212    assert(!pipe_is_referenced(&fenced_buf->base.reference));
    213 
    214    assert(!fenced_buf->fence);
    215    assert(fenced_buf->head.prev);
    216    assert(fenced_buf->head.next);
    217    LIST_DEL(&fenced_buf->head);
    218    assert(fenced_mgr->num_unfenced);
    219    --fenced_mgr->num_unfenced;
    220 
    221    fenced_buffer_destroy_gpu_storage_locked(fenced_buf);
    222 
    223    FREE(fenced_buf);
    224 }
    225 
    226 
    227 /**
    228  * Add the buffer to the fenced list.
    229  *
    230  * Reference count should be incremented before calling this function.
    231  */
    232 static inline void
    233 fenced_buffer_add_locked(struct fenced_manager *fenced_mgr,
    234                          struct fenced_buffer *fenced_buf)
    235 {
    236    assert(pipe_is_referenced(&fenced_buf->base.reference));
    237    assert(fenced_buf->flags & PB_USAGE_GPU_READ_WRITE);
    238    assert(fenced_buf->fence);
    239 
    240    p_atomic_inc(&fenced_buf->base.reference.count);
    241 
    242    LIST_DEL(&fenced_buf->head);
    243    assert(fenced_mgr->num_unfenced);
    244    --fenced_mgr->num_unfenced;
    245    LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->fenced);
    246    ++fenced_mgr->num_fenced;
    247 }
    248 
    249 
    250 /**
    251  * Remove the buffer from the fenced list, and potentially destroy the buffer
    252  * if the reference count reaches zero.
    253  *
    254  * Returns TRUE if the buffer was detroyed.
    255  */
    256 static inline boolean
    257 fenced_buffer_remove_locked(struct fenced_manager *fenced_mgr,
    258                             struct fenced_buffer *fenced_buf)
    259 {
    260    struct pb_fence_ops *ops = fenced_mgr->ops;
    261 
    262    assert(fenced_buf->fence);
    263    assert(fenced_buf->mgr == fenced_mgr);
    264 
    265    ops->fence_reference(ops, &fenced_buf->fence, NULL);
    266    fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
    267 
    268    assert(fenced_buf->head.prev);
    269    assert(fenced_buf->head.next);
    270 
    271    LIST_DEL(&fenced_buf->head);
    272    assert(fenced_mgr->num_fenced);
    273    --fenced_mgr->num_fenced;
    274 
    275    LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
    276    ++fenced_mgr->num_unfenced;
    277 
    278    if (p_atomic_dec_zero(&fenced_buf->base.reference.count)) {
    279       fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
    280       return TRUE;
    281    }
    282 
    283    return FALSE;
    284 }
    285 
    286 
    287 /**
    288  * Wait for the fence to expire, and remove it from the fenced list.
    289  *
    290  * This function will release and re-acquire the mutex, so any copy of mutable
    291  * state must be discarded after calling it.
    292  */
    293 static inline enum pipe_error
    294 fenced_buffer_finish_locked(struct fenced_manager *fenced_mgr,
    295                             struct fenced_buffer *fenced_buf)
    296 {
    297    struct pb_fence_ops *ops = fenced_mgr->ops;
    298    enum pipe_error ret = PIPE_ERROR;
    299 
    300 #if 0
    301    debug_warning("waiting for GPU");
    302 #endif
    303 
    304    assert(pipe_is_referenced(&fenced_buf->base.reference));
    305    assert(fenced_buf->fence);
    306 
    307    if(fenced_buf->fence) {
    308       struct pipe_fence_handle *fence = NULL;
    309       int finished;
    310       boolean proceed;
    311 
    312       ops->fence_reference(ops, &fence, fenced_buf->fence);
    313 
    314       pipe_mutex_unlock(fenced_mgr->mutex);
    315 
    316       finished = ops->fence_finish(ops, fenced_buf->fence, 0);
    317 
    318       pipe_mutex_lock(fenced_mgr->mutex);
    319 
    320       assert(pipe_is_referenced(&fenced_buf->base.reference));
    321 
    322       /*
    323        * Only proceed if the fence object didn't change in the meanwhile.
    324        * Otherwise assume the work has been already carried out by another
    325        * thread that re-acquired the lock before us.
    326        */
    327       proceed = fence == fenced_buf->fence ? TRUE : FALSE;
    328 
    329       ops->fence_reference(ops, &fence, NULL);
    330 
    331       if(proceed && finished == 0) {
    332          /*
    333           * Remove from the fenced list
    334           */
    335 
    336          boolean destroyed;
    337 
    338          destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
    339 
    340          /* TODO: remove consequents buffers with the same fence? */
    341 
    342          assert(!destroyed);
    343          (void) destroyed;
    344 
    345          fenced_buf->flags &= ~PB_USAGE_GPU_READ_WRITE;
    346 
    347          ret = PIPE_OK;
    348       }
    349    }
    350 
    351    return ret;
    352 }
    353 
    354 
    355 /**
    356  * Remove as many fenced buffers from the fenced list as possible.
    357  *
    358  * Returns TRUE if at least one buffer was removed.
    359  */
    360 static boolean
    361 fenced_manager_check_signalled_locked(struct fenced_manager *fenced_mgr,
    362                                       boolean wait)
    363 {
    364    struct pb_fence_ops *ops = fenced_mgr->ops;
    365    struct list_head *curr, *next;
    366    struct fenced_buffer *fenced_buf;
    367    struct pipe_fence_handle *prev_fence = NULL;
    368    boolean ret = FALSE;
    369 
    370    curr = fenced_mgr->fenced.next;
    371    next = curr->next;
    372    while(curr != &fenced_mgr->fenced) {
    373       fenced_buf = LIST_ENTRY(struct fenced_buffer, curr, head);
    374 
    375       if(fenced_buf->fence != prev_fence) {
    376          int signaled;
    377 
    378          if (wait) {
    379             signaled = ops->fence_finish(ops, fenced_buf->fence, 0);
    380 
    381             /*
    382              * Don't return just now. Instead preemptively check if the
    383              * following buffers' fences already expired,
    384              * without further waits.
    385              */
    386             wait = FALSE;
    387          }
    388          else {
    389             signaled = ops->fence_signalled(ops, fenced_buf->fence, 0);
    390          }
    391 
    392          if (signaled != 0) {
    393             return ret;
    394          }
    395 
    396          prev_fence = fenced_buf->fence;
    397       }
    398       else {
    399          /* This buffer's fence object is identical to the previous buffer's
    400           * fence object, so no need to check the fence again.
    401           */
    402          assert(ops->fence_signalled(ops, fenced_buf->fence, 0) == 0);
    403       }
    404 
    405       fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
    406 
    407       ret = TRUE;
    408 
    409       curr = next;
    410       next = curr->next;
    411    }
    412 
    413    return ret;
    414 }
    415 
    416 
    417 /**
    418  * Destroy the GPU storage.
    419  */
    420 static void
    421 fenced_buffer_destroy_gpu_storage_locked(struct fenced_buffer *fenced_buf)
    422 {
    423    if(fenced_buf->buffer) {
    424       pb_reference(&fenced_buf->buffer, NULL);
    425    }
    426 }
    427 
    428 
    429 /**
    430  * Try to create GPU storage for this buffer.
    431  *
    432  * This function is a shorthand around pb_manager::create_buffer for
    433  * fenced_buffer_create_gpu_storage_locked()'s benefit.
    434  */
    435 static inline boolean
    436 fenced_buffer_try_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
    437                                             struct fenced_buffer *fenced_buf,
    438                                             const struct pb_desc *desc)
    439 {
    440    struct pb_manager *provider = fenced_mgr->provider;
    441 
    442    assert(!fenced_buf->buffer);
    443 
    444    fenced_buf->buffer = provider->create_buffer(fenced_mgr->provider,
    445                                                 fenced_buf->size, desc);
    446    return fenced_buf->buffer ? TRUE : FALSE;
    447 }
    448 
    449 
    450 /**
    451  * Create GPU storage for this buffer.
    452  */
    453 static enum pipe_error
    454 fenced_buffer_create_gpu_storage_locked(struct fenced_manager *fenced_mgr,
    455                                         struct fenced_buffer *fenced_buf,
    456                                         const struct pb_desc *desc,
    457                                         boolean wait)
    458 {
    459    assert(!fenced_buf->buffer);
    460 
    461    /*
    462     * Check for signaled buffers before trying to allocate.
    463     */
    464    fenced_manager_check_signalled_locked(fenced_mgr, FALSE);
    465 
    466    fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf, desc);
    467 
    468    /*
    469     * Keep trying while there is some sort of progress:
    470     * - fences are expiring,
    471     * - or buffers are being being swapped out from GPU memory into CPU memory.
    472     */
    473    while(!fenced_buf->buffer &&
    474          (fenced_manager_check_signalled_locked(fenced_mgr, FALSE))) {
    475      fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
    476                                                  desc);
    477    }
    478 
    479    if(!fenced_buf->buffer && wait) {
    480       /*
    481        * Same as before, but this time around, wait to free buffers if
    482        * necessary.
    483        */
    484       while(!fenced_buf->buffer &&
    485             (fenced_manager_check_signalled_locked(fenced_mgr, TRUE))) {
    486         fenced_buffer_try_create_gpu_storage_locked(fenced_mgr, fenced_buf,
    487                                                     desc);
    488       }
    489    }
    490 
    491    if(!fenced_buf->buffer) {
    492       if(0)
    493          fenced_manager_dump_locked(fenced_mgr);
    494 
    495       /* give up */
    496       return PIPE_ERROR_OUT_OF_MEMORY;
    497    }
    498 
    499    return PIPE_OK;
    500 }
    501 
    502 
    503 static void
    504 fenced_buffer_destroy(struct pb_buffer *buf)
    505 {
    506    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    507    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    508 
    509    assert(!pipe_is_referenced(&fenced_buf->base.reference));
    510 
    511    pipe_mutex_lock(fenced_mgr->mutex);
    512 
    513    fenced_buffer_destroy_locked(fenced_mgr, fenced_buf);
    514 
    515    pipe_mutex_unlock(fenced_mgr->mutex);
    516 }
    517 
    518 
    519 static void *
    520 fenced_buffer_map(struct pb_buffer *buf,
    521                   unsigned flags, void *flush_ctx)
    522 {
    523    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    524    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    525    struct pb_fence_ops *ops = fenced_mgr->ops;
    526    void *map = NULL;
    527 
    528    pipe_mutex_lock(fenced_mgr->mutex);
    529 
    530    assert(!(flags & PB_USAGE_GPU_READ_WRITE));
    531 
    532    /*
    533     * Serialize writes.
    534     */
    535    while((fenced_buf->flags & PB_USAGE_GPU_WRITE) ||
    536          ((fenced_buf->flags & PB_USAGE_GPU_READ) &&
    537           (flags & PB_USAGE_CPU_WRITE))) {
    538 
    539       /*
    540        * Don't wait for the GPU to finish accessing it,
    541        * if blocking is forbidden.
    542        */
    543       if((flags & PB_USAGE_DONTBLOCK) &&
    544           ops->fence_signalled(ops, fenced_buf->fence, 0) != 0) {
    545          goto done;
    546       }
    547 
    548       if (flags & PB_USAGE_UNSYNCHRONIZED) {
    549          break;
    550       }
    551 
    552       /*
    553        * Wait for the GPU to finish accessing. This will release and re-acquire
    554        * the mutex, so all copies of mutable state must be discarded.
    555        */
    556       fenced_buffer_finish_locked(fenced_mgr, fenced_buf);
    557    }
    558 
    559    map = pb_map(fenced_buf->buffer, flags, flush_ctx);
    560 
    561    if(map) {
    562       ++fenced_buf->mapcount;
    563       fenced_buf->flags |= flags & PB_USAGE_CPU_READ_WRITE;
    564    }
    565 
    566 done:
    567    pipe_mutex_unlock(fenced_mgr->mutex);
    568 
    569    return map;
    570 }
    571 
    572 
    573 static void
    574 fenced_buffer_unmap(struct pb_buffer *buf)
    575 {
    576    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    577    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    578 
    579    pipe_mutex_lock(fenced_mgr->mutex);
    580 
    581    assert(fenced_buf->mapcount);
    582    if(fenced_buf->mapcount) {
    583       if (fenced_buf->buffer)
    584          pb_unmap(fenced_buf->buffer);
    585       --fenced_buf->mapcount;
    586       if(!fenced_buf->mapcount)
    587          fenced_buf->flags &= ~PB_USAGE_CPU_READ_WRITE;
    588    }
    589 
    590    pipe_mutex_unlock(fenced_mgr->mutex);
    591 }
    592 
    593 
    594 static enum pipe_error
    595 fenced_buffer_validate(struct pb_buffer *buf,
    596                        struct pb_validate *vl,
    597                        unsigned flags)
    598 {
    599    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    600    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    601    enum pipe_error ret;
    602 
    603    pipe_mutex_lock(fenced_mgr->mutex);
    604 
    605    if(!vl) {
    606       /* invalidate */
    607       fenced_buf->vl = NULL;
    608       fenced_buf->validation_flags = 0;
    609       ret = PIPE_OK;
    610       goto done;
    611    }
    612 
    613    assert(flags & PB_USAGE_GPU_READ_WRITE);
    614    assert(!(flags & ~PB_USAGE_GPU_READ_WRITE));
    615    flags &= PB_USAGE_GPU_READ_WRITE;
    616 
    617    /* Buffer cannot be validated in two different lists */
    618    if(fenced_buf->vl && fenced_buf->vl != vl) {
    619       ret = PIPE_ERROR_RETRY;
    620       goto done;
    621    }
    622 
    623    if(fenced_buf->vl == vl &&
    624       (fenced_buf->validation_flags & flags) == flags) {
    625       /* Nothing to do -- buffer already validated */
    626       ret = PIPE_OK;
    627       goto done;
    628    }
    629 
    630    ret = pb_validate(fenced_buf->buffer, vl, flags);
    631    if (ret != PIPE_OK)
    632       goto done;
    633 
    634    fenced_buf->vl = vl;
    635    fenced_buf->validation_flags |= flags;
    636 
    637 done:
    638    pipe_mutex_unlock(fenced_mgr->mutex);
    639 
    640    return ret;
    641 }
    642 
    643 
    644 static void
    645 fenced_buffer_fence(struct pb_buffer *buf,
    646                     struct pipe_fence_handle *fence)
    647 {
    648    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    649    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    650    struct pb_fence_ops *ops = fenced_mgr->ops;
    651 
    652    pipe_mutex_lock(fenced_mgr->mutex);
    653 
    654    assert(pipe_is_referenced(&fenced_buf->base.reference));
    655    assert(fenced_buf->buffer);
    656 
    657    if(fence != fenced_buf->fence) {
    658       assert(fenced_buf->vl);
    659       assert(fenced_buf->validation_flags);
    660 
    661       if (fenced_buf->fence) {
    662          boolean destroyed;
    663          destroyed = fenced_buffer_remove_locked(fenced_mgr, fenced_buf);
    664          assert(!destroyed);
    665          (void) destroyed;
    666       }
    667       if (fence) {
    668          ops->fence_reference(ops, &fenced_buf->fence, fence);
    669          fenced_buf->flags |= fenced_buf->validation_flags;
    670          fenced_buffer_add_locked(fenced_mgr, fenced_buf);
    671       }
    672 
    673       pb_fence(fenced_buf->buffer, fence);
    674 
    675       fenced_buf->vl = NULL;
    676       fenced_buf->validation_flags = 0;
    677    }
    678 
    679    pipe_mutex_unlock(fenced_mgr->mutex);
    680 }
    681 
    682 
    683 static void
    684 fenced_buffer_get_base_buffer(struct pb_buffer *buf,
    685                               struct pb_buffer **base_buf,
    686                               pb_size *offset)
    687 {
    688    struct fenced_buffer *fenced_buf = fenced_buffer(buf);
    689    struct fenced_manager *fenced_mgr = fenced_buf->mgr;
    690 
    691    pipe_mutex_lock(fenced_mgr->mutex);
    692 
    693    assert(fenced_buf->buffer);
    694 
    695    if(fenced_buf->buffer)
    696       pb_get_base_buffer(fenced_buf->buffer, base_buf, offset);
    697    else {
    698       *base_buf = buf;
    699       *offset = 0;
    700    }
    701 
    702    pipe_mutex_unlock(fenced_mgr->mutex);
    703 }
    704 
    705 
    706 static const struct pb_vtbl
    707 fenced_buffer_vtbl = {
    708       fenced_buffer_destroy,
    709       fenced_buffer_map,
    710       fenced_buffer_unmap,
    711       fenced_buffer_validate,
    712       fenced_buffer_fence,
    713       fenced_buffer_get_base_buffer
    714 };
    715 
    716 
    717 /**
    718  * Wrap a buffer in a fenced buffer.
    719  */
    720 static struct pb_buffer *
    721 fenced_bufmgr_create_buffer(struct pb_manager *mgr,
    722                             pb_size size,
    723                             const struct pb_desc *desc)
    724 {
    725    struct fenced_manager *fenced_mgr = fenced_manager(mgr);
    726    struct fenced_buffer *fenced_buf;
    727    enum pipe_error ret;
    728 
    729    fenced_buf = CALLOC_STRUCT(fenced_buffer);
    730    if(!fenced_buf)
    731       goto no_buffer;
    732 
    733    pipe_reference_init(&fenced_buf->base.reference, 1);
    734    fenced_buf->base.alignment = desc->alignment;
    735    fenced_buf->base.usage = desc->usage;
    736    fenced_buf->base.size = size;
    737    fenced_buf->size = size;
    738 
    739    fenced_buf->base.vtbl = &fenced_buffer_vtbl;
    740    fenced_buf->mgr = fenced_mgr;
    741 
    742    pipe_mutex_lock(fenced_mgr->mutex);
    743 
    744    /*
    745     * Try to create GPU storage without stalling,
    746     */
    747    ret = fenced_buffer_create_gpu_storage_locked(fenced_mgr, fenced_buf,
    748                                                  desc, TRUE);
    749 
    750    /*
    751     * Give up.
    752     */
    753    if(ret != PIPE_OK) {
    754       goto no_storage;
    755    }
    756 
    757    assert(fenced_buf->buffer);
    758 
    759    LIST_ADDTAIL(&fenced_buf->head, &fenced_mgr->unfenced);
    760    ++fenced_mgr->num_unfenced;
    761    pipe_mutex_unlock(fenced_mgr->mutex);
    762 
    763    return &fenced_buf->base;
    764 
    765 no_storage:
    766    pipe_mutex_unlock(fenced_mgr->mutex);
    767    FREE(fenced_buf);
    768 no_buffer:
    769    return NULL;
    770 }
    771 
    772 
    773 static void
    774 fenced_bufmgr_flush(struct pb_manager *mgr)
    775 {
    776    struct fenced_manager *fenced_mgr = fenced_manager(mgr);
    777 
    778    pipe_mutex_lock(fenced_mgr->mutex);
    779    while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
    780       ;
    781    pipe_mutex_unlock(fenced_mgr->mutex);
    782 
    783    assert(fenced_mgr->provider->flush);
    784    if(fenced_mgr->provider->flush)
    785       fenced_mgr->provider->flush(fenced_mgr->provider);
    786 }
    787 
    788 
    789 static void
    790 fenced_bufmgr_destroy(struct pb_manager *mgr)
    791 {
    792    struct fenced_manager *fenced_mgr = fenced_manager(mgr);
    793 
    794    pipe_mutex_lock(fenced_mgr->mutex);
    795 
    796    /* Wait on outstanding fences */
    797    while (fenced_mgr->num_fenced) {
    798       pipe_mutex_unlock(fenced_mgr->mutex);
    799 #if defined(PIPE_OS_LINUX) || defined(PIPE_OS_BSD) || defined(PIPE_OS_SOLARIS)
    800       sched_yield();
    801 #endif
    802       pipe_mutex_lock(fenced_mgr->mutex);
    803       while(fenced_manager_check_signalled_locked(fenced_mgr, TRUE))
    804          ;
    805    }
    806 
    807 #ifdef DEBUG
    808    /*assert(!fenced_mgr->num_unfenced);*/
    809 #endif
    810 
    811    pipe_mutex_unlock(fenced_mgr->mutex);
    812    pipe_mutex_destroy(fenced_mgr->mutex);
    813 
    814    FREE(fenced_mgr);
    815 }
    816 
    817 
    818 struct pb_manager *
    819 simple_fenced_bufmgr_create(struct pb_manager *provider,
    820                             struct pb_fence_ops *ops)
    821 {
    822    struct fenced_manager *fenced_mgr;
    823 
    824    if(!provider)
    825       return NULL;
    826 
    827    fenced_mgr = CALLOC_STRUCT(fenced_manager);
    828    if (!fenced_mgr)
    829       return NULL;
    830 
    831    fenced_mgr->base.destroy = fenced_bufmgr_destroy;
    832    fenced_mgr->base.create_buffer = fenced_bufmgr_create_buffer;
    833    fenced_mgr->base.flush = fenced_bufmgr_flush;
    834 
    835    fenced_mgr->provider = provider;
    836    fenced_mgr->ops = ops;
    837 
    838    LIST_INITHEAD(&fenced_mgr->fenced);
    839    fenced_mgr->num_fenced = 0;
    840 
    841    LIST_INITHEAD(&fenced_mgr->unfenced);
    842    fenced_mgr->num_unfenced = 0;
    843 
    844    pipe_mutex_init(fenced_mgr->mutex);
    845 
    846    return &fenced_mgr->base;
    847 }
    848