Home | History | Annotate | Download | only in drm
      1 /*
      2  * Copyright 2014, 2015 Red Hat.
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * on the rights to use, copy, modify, merge, publish, distribute, sub
      8  * license, and/or sell copies of the Software, and to permit persons to whom
      9  * the Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  */
     23 
     24 #include <errno.h>
     25 #include <fcntl.h>
     26 #include <stdio.h>
     27 #include <sys/ioctl.h>
     28 #include <sys/stat.h>
     29 
     30 #include "os/os_mman.h"
     31 #include "os/os_time.h"
     32 #include "util/u_memory.h"
     33 #include "util/u_format.h"
     34 #include "util/u_hash_table.h"
     35 #include "util/u_inlines.h"
     36 #include "state_tracker/drm_driver.h"
     37 #include "virgl/virgl_screen.h"
     38 #include "virgl/virgl_public.h"
     39 
     40 #include <xf86drm.h>
     41 #include "virtgpu_drm.h"
     42 
     43 #include "virgl_drm_winsys.h"
     44 #include "virgl_drm_public.h"
     45 
     46 static inline boolean can_cache_resource(struct virgl_hw_res *res)
     47 {
     48    return res->cacheable == TRUE;
     49 }
     50 
     51 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
     52                                  struct virgl_hw_res *res)
     53 {
     54       struct drm_gem_close args;
     55 
     56       if (res->flinked) {
     57          pipe_mutex_lock(qdws->bo_handles_mutex);
     58          util_hash_table_remove(qdws->bo_names,
     59                                 (void *)(uintptr_t)res->flink);
     60          pipe_mutex_unlock(qdws->bo_handles_mutex);
     61       }
     62 
     63       if (res->bo_handle) {
     64          pipe_mutex_lock(qdws->bo_handles_mutex);
     65          util_hash_table_remove(qdws->bo_handles,
     66                                 (void *)(uintptr_t)res->bo_handle);
     67          pipe_mutex_unlock(qdws->bo_handles_mutex);
     68       }
     69 
     70       if (res->ptr)
     71          os_munmap(res->ptr, res->size);
     72 
     73       memset(&args, 0, sizeof(args));
     74       args.handle = res->bo_handle;
     75       drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
     76       FREE(res);
     77 }
     78 
     79 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws,
     80                                           struct virgl_hw_res *res)
     81 {
     82    struct drm_virtgpu_3d_wait waitcmd;
     83    int ret;
     84 
     85    memset(&waitcmd, 0, sizeof(waitcmd));
     86    waitcmd.handle = res->bo_handle;
     87    waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
     88 
     89    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
     90    if (ret && errno == EBUSY)
     91       return TRUE;
     92    return FALSE;
     93 }
     94 
     95 static void
     96 virgl_cache_flush(struct virgl_drm_winsys *qdws)
     97 {
     98    struct list_head *curr, *next;
     99    struct virgl_hw_res *res;
    100 
    101    pipe_mutex_lock(qdws->mutex);
    102    curr = qdws->delayed.next;
    103    next = curr->next;
    104 
    105    while (curr != &qdws->delayed) {
    106       res = LIST_ENTRY(struct virgl_hw_res, curr, head);
    107       LIST_DEL(&res->head);
    108       virgl_hw_res_destroy(qdws, res);
    109       curr = next;
    110       next = curr->next;
    111    }
    112    pipe_mutex_unlock(qdws->mutex);
    113 }
    114 static void
    115 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
    116 {
    117    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    118 
    119    virgl_cache_flush(qdws);
    120 
    121    util_hash_table_destroy(qdws->bo_handles);
    122    util_hash_table_destroy(qdws->bo_names);
    123    pipe_mutex_destroy(qdws->bo_handles_mutex);
    124    pipe_mutex_destroy(qdws->mutex);
    125 
    126    FREE(qdws);
    127 }
    128 
    129 static void
    130 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
    131 {
    132    struct list_head *curr, *next;
    133    struct virgl_hw_res *res;
    134    int64_t now;
    135 
    136    now = os_time_get();
    137    curr = qdws->delayed.next;
    138    next = curr->next;
    139    while (curr != &qdws->delayed) {
    140       res = LIST_ENTRY(struct virgl_hw_res, curr, head);
    141       if (!os_time_timeout(res->start, res->end, now))
    142          break;
    143 
    144       LIST_DEL(&res->head);
    145       virgl_hw_res_destroy(qdws, res);
    146       curr = next;
    147       next = curr->next;
    148    }
    149 }
    150 
    151 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
    152                                        struct virgl_hw_res **dres,
    153                                        struct virgl_hw_res *sres)
    154 {
    155    struct virgl_hw_res *old = *dres;
    156    if (pipe_reference(&(*dres)->reference, &sres->reference)) {
    157 
    158       if (!can_cache_resource(old)) {
    159          virgl_hw_res_destroy(qdws, old);
    160       } else {
    161          pipe_mutex_lock(qdws->mutex);
    162          virgl_cache_list_check_free(qdws);
    163 
    164          old->start = os_time_get();
    165          old->end = old->start + qdws->usecs;
    166          LIST_ADDTAIL(&old->head, &qdws->delayed);
    167          qdws->num_delayed++;
    168          pipe_mutex_unlock(qdws->mutex);
    169       }
    170    }
    171    *dres = sres;
    172 }
    173 
    174 static struct virgl_hw_res *
    175 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
    176                                  enum pipe_texture_target target,
    177                                  uint32_t format,
    178                                  uint32_t bind,
    179                                  uint32_t width,
    180                                  uint32_t height,
    181                                  uint32_t depth,
    182                                  uint32_t array_size,
    183                                  uint32_t last_level,
    184                                  uint32_t nr_samples,
    185                                  uint32_t size)
    186 {
    187    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    188    struct drm_virtgpu_resource_create createcmd;
    189    int ret;
    190    struct virgl_hw_res *res;
    191    uint32_t stride = width * util_format_get_blocksize(format);
    192 
    193    res = CALLOC_STRUCT(virgl_hw_res);
    194    if (!res)
    195       return NULL;
    196 
    197    memset(&createcmd, 0, sizeof(createcmd));
    198    createcmd.target = target;
    199    createcmd.format = format;
    200    createcmd.bind = bind;
    201    createcmd.width = width;
    202    createcmd.height = height;
    203    createcmd.depth = depth;
    204    createcmd.array_size = array_size;
    205    createcmd.last_level = last_level;
    206    createcmd.nr_samples = nr_samples;
    207    createcmd.stride = stride;
    208    createcmd.size = size;
    209 
    210    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
    211    if (ret != 0) {
    212       FREE(res);
    213       return NULL;
    214    }
    215 
    216    res->bind = bind;
    217    res->format = format;
    218 
    219    res->res_handle = createcmd.res_handle;
    220    res->bo_handle = createcmd.bo_handle;
    221    res->size = size;
    222    res->stride = stride;
    223    pipe_reference_init(&res->reference, 1);
    224    res->num_cs_references = 0;
    225    return res;
    226 }
    227 
    228 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
    229                                       struct virgl_hw_res *res,
    230                                       uint32_t size, uint32_t bind,
    231                                       uint32_t format)
    232 {
    233    if (res->bind != bind)
    234       return 0;
    235    if (res->format != format)
    236       return 0;
    237    if (res->size < size)
    238       return 0;
    239    if (res->size > size * 2)
    240       return 0;
    241 
    242    if (virgl_drm_resource_is_busy(qdws, res)) {
    243       return -1;
    244    }
    245 
    246    return 1;
    247 }
    248 
    249 static int
    250 virgl_bo_transfer_put(struct virgl_winsys *vws,
    251                       struct virgl_hw_res *res,
    252                       const struct pipe_box *box,
    253                       uint32_t stride, uint32_t layer_stride,
    254                       uint32_t buf_offset, uint32_t level)
    255 {
    256    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
    257    struct drm_virtgpu_3d_transfer_to_host tohostcmd;
    258 
    259    memset(&tohostcmd, 0, sizeof(tohostcmd));
    260    tohostcmd.bo_handle = res->bo_handle;
    261    tohostcmd.box = *(struct drm_virtgpu_3d_box *)box;
    262    tohostcmd.offset = buf_offset;
    263    tohostcmd.level = level;
    264   // tohostcmd.stride = stride;
    265   // tohostcmd.layer_stride = stride;
    266    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
    267 }
    268 
    269 static int
    270 virgl_bo_transfer_get(struct virgl_winsys *vws,
    271                       struct virgl_hw_res *res,
    272                       const struct pipe_box *box,
    273                       uint32_t stride, uint32_t layer_stride,
    274                       uint32_t buf_offset, uint32_t level)
    275 {
    276    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
    277    struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
    278 
    279    memset(&fromhostcmd, 0, sizeof(fromhostcmd));
    280    fromhostcmd.bo_handle = res->bo_handle;
    281    fromhostcmd.level = level;
    282    fromhostcmd.offset = buf_offset;
    283   // fromhostcmd.stride = stride;
    284   // fromhostcmd.layer_stride = layer_stride;
    285    fromhostcmd.box = *(struct drm_virtgpu_3d_box *)box;
    286    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
    287 }
    288 
    289 static struct virgl_hw_res *
    290 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
    291                                        enum pipe_texture_target target,
    292                                        uint32_t format,
    293                                        uint32_t bind,
    294                                        uint32_t width,
    295                                        uint32_t height,
    296                                        uint32_t depth,
    297                                        uint32_t array_size,
    298                                        uint32_t last_level,
    299                                        uint32_t nr_samples,
    300                                        uint32_t size)
    301 {
    302    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    303    struct virgl_hw_res *res, *curr_res;
    304    struct list_head *curr, *next;
    305    int64_t now;
    306    int ret;
    307 
    308    /* only store binds for vertex/index/const buffers */
    309    if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
    310        bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
    311       goto alloc;
    312 
    313    pipe_mutex_lock(qdws->mutex);
    314 
    315    res = NULL;
    316    curr = qdws->delayed.next;
    317    next = curr->next;
    318 
    319    now = os_time_get();
    320    while (curr != &qdws->delayed) {
    321       curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
    322 
    323       if (!res && ((ret = virgl_is_res_compat(qdws, curr_res, size, bind, format)) > 0))
    324          res = curr_res;
    325       else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
    326          LIST_DEL(&curr_res->head);
    327          virgl_hw_res_destroy(qdws, curr_res);
    328       } else
    329          break;
    330 
    331       if (ret == -1)
    332          break;
    333 
    334       curr = next;
    335       next = curr->next;
    336    }
    337 
    338    if (!res && ret != -1) {
    339       while (curr != &qdws->delayed) {
    340          curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
    341          ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
    342          if (ret > 0) {
    343             res = curr_res;
    344             break;
    345          }
    346          if (ret == -1)
    347             break;
    348          curr = next;
    349          next = curr->next;
    350       }
    351    }
    352 
    353    if (res) {
    354       LIST_DEL(&res->head);
    355       --qdws->num_delayed;
    356       pipe_mutex_unlock(qdws->mutex);
    357       pipe_reference_init(&res->reference, 1);
    358       return res;
    359    }
    360 
    361    pipe_mutex_unlock(qdws->mutex);
    362 
    363 alloc:
    364    res = virgl_drm_winsys_resource_create(qws, target, format, bind,
    365                                            width, height, depth, array_size,
    366                                            last_level, nr_samples, size);
    367    if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
    368        bind == VIRGL_BIND_VERTEX_BUFFER)
    369       res->cacheable = TRUE;
    370    return res;
    371 }
    372 
    373 static struct virgl_hw_res *
    374 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
    375                                         struct winsys_handle *whandle)
    376 {
    377    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    378    struct drm_gem_open open_arg = {};
    379    struct drm_virtgpu_resource_info info_arg = {};
    380    struct virgl_hw_res *res;
    381    uint32_t handle = whandle->handle;
    382 
    383    if (whandle->offset != 0) {
    384       fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
    385               whandle->offset);
    386       return NULL;
    387    }
    388 
    389    pipe_mutex_lock(qdws->bo_handles_mutex);
    390 
    391    if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
    392       res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
    393       if (res) {
    394          struct virgl_hw_res *r = NULL;
    395          virgl_drm_resource_reference(qdws, &r, res);
    396          goto done;
    397       }
    398    }
    399 
    400    if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
    401       int r;
    402       r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
    403       if (r) {
    404          res = NULL;
    405          goto done;
    406       }
    407    }
    408 
    409    res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
    410    fprintf(stderr, "resource %p for handle %d, pfd=%d\n", res, handle, whandle->handle);
    411    if (res) {
    412       struct virgl_hw_res *r = NULL;
    413       virgl_drm_resource_reference(qdws, &r, res);
    414       goto done;
    415    }
    416 
    417    res = CALLOC_STRUCT(virgl_hw_res);
    418    if (!res)
    419       goto done;
    420 
    421    if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
    422       res->bo_handle = handle;
    423    } else {
    424       fprintf(stderr, "gem open handle %d\n", handle);
    425       memset(&open_arg, 0, sizeof(open_arg));
    426       open_arg.name = whandle->handle;
    427       if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
    428          FREE(res);
    429          res = NULL;
    430          goto done;
    431       }
    432       res->bo_handle = open_arg.handle;
    433    }
    434    res->name = handle;
    435 
    436    memset(&info_arg, 0, sizeof(info_arg));
    437    info_arg.bo_handle = res->bo_handle;
    438 
    439    if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
    440       /* close */
    441       FREE(res);
    442       res = NULL;
    443       goto done;
    444    }
    445 
    446    res->res_handle = info_arg.res_handle;
    447 
    448    res->size = info_arg.size;
    449    res->stride = info_arg.stride;
    450    pipe_reference_init(&res->reference, 1);
    451    res->num_cs_references = 0;
    452 
    453    util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
    454 
    455 done:
    456    pipe_mutex_unlock(qdws->bo_handles_mutex);
    457    return res;
    458 }
    459 
    460 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
    461                                                     struct virgl_hw_res *res,
    462                                                     uint32_t stride,
    463                                                     struct winsys_handle *whandle)
    464  {
    465    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    466    struct drm_gem_flink flink;
    467 
    468    if (!res)
    469        return FALSE;
    470 
    471    if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
    472       if (!res->flinked) {
    473          memset(&flink, 0, sizeof(flink));
    474          flink.handle = res->bo_handle;
    475 
    476          if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
    477             return FALSE;
    478          }
    479          res->flinked = TRUE;
    480          res->flink = flink.name;
    481 
    482          pipe_mutex_lock(qdws->bo_handles_mutex);
    483          util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
    484          pipe_mutex_unlock(qdws->bo_handles_mutex);
    485       }
    486       whandle->handle = res->flink;
    487    } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
    488       whandle->handle = res->bo_handle;
    489    } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
    490       if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
    491             return FALSE;
    492       pipe_mutex_lock(qdws->bo_handles_mutex);
    493       util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)res->bo_handle, res);
    494       pipe_mutex_unlock(qdws->bo_handles_mutex);
    495    }
    496    whandle->stride = stride;
    497    return TRUE;
    498 }
    499 
    500 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
    501                                             struct virgl_hw_res *hres)
    502 {
    503    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    504 
    505    virgl_drm_resource_reference(qdws, &hres, NULL);
    506 }
    507 
    508 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
    509                                     struct virgl_hw_res *res)
    510 {
    511    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    512    struct drm_virtgpu_map mmap_arg;
    513    void *ptr;
    514 
    515    if (res->ptr)
    516       return res->ptr;
    517 
    518    memset(&mmap_arg, 0, sizeof(mmap_arg));
    519    mmap_arg.handle = res->bo_handle;
    520    if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
    521       return NULL;
    522 
    523    ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
    524                  qdws->fd, mmap_arg.offset);
    525    if (ptr == MAP_FAILED)
    526       return NULL;
    527 
    528    res->ptr = ptr;
    529    return ptr;
    530 
    531 }
    532 
    533 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
    534                                     struct virgl_hw_res *res)
    535 {
    536    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    537    struct drm_virtgpu_3d_wait waitcmd;
    538    int ret;
    539 
    540    memset(&waitcmd, 0, sizeof(waitcmd));
    541    waitcmd.handle = res->bo_handle;
    542  again:
    543    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
    544    if (ret == -EAGAIN)
    545       goto again;
    546 }
    547 
    548 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
    549 {
    550    struct virgl_drm_cmd_buf *cbuf;
    551 
    552    cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
    553    if (!cbuf)
    554       return NULL;
    555 
    556    cbuf->ws = qws;
    557 
    558    cbuf->nres = 512;
    559    cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
    560    if (!cbuf->res_bo) {
    561       FREE(cbuf);
    562       return NULL;
    563    }
    564    cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
    565    if (!cbuf->res_hlist) {
    566       FREE(cbuf->res_bo);
    567       FREE(cbuf);
    568       return NULL;
    569    }
    570 
    571    cbuf->base.buf = cbuf->buf;
    572    return &cbuf->base;
    573 }
    574 
    575 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
    576 {
    577    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
    578 
    579    FREE(cbuf->res_hlist);
    580    FREE(cbuf->res_bo);
    581    FREE(cbuf);
    582 
    583 }
    584 
    585 static boolean virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
    586                                     struct virgl_hw_res *res)
    587 {
    588    unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
    589    int i;
    590 
    591    if (cbuf->is_handle_added[hash]) {
    592       i = cbuf->reloc_indices_hashlist[hash];
    593       if (cbuf->res_bo[i] == res)
    594          return true;
    595 
    596       for (i = 0; i < cbuf->cres; i++) {
    597          if (cbuf->res_bo[i] == res) {
    598             cbuf->reloc_indices_hashlist[hash] = i;
    599             return true;
    600          }
    601       }
    602    }
    603    return false;
    604 }
    605 
    606 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
    607                               struct virgl_drm_cmd_buf *cbuf,
    608                               struct virgl_hw_res *res)
    609 {
    610    unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
    611 
    612    if (cbuf->cres > cbuf->nres) {
    613       fprintf(stderr,"failure to add relocation\n");
    614       return;
    615    }
    616 
    617    cbuf->res_bo[cbuf->cres] = NULL;
    618    virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
    619    cbuf->res_hlist[cbuf->cres] = res->bo_handle;
    620    cbuf->is_handle_added[hash] = TRUE;
    621 
    622    cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
    623    p_atomic_inc(&res->num_cs_references);
    624    cbuf->cres++;
    625 }
    626 
    627 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
    628                                       struct virgl_drm_cmd_buf *cbuf)
    629 {
    630    int i;
    631 
    632    for (i = 0; i < cbuf->cres; i++) {
    633       p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
    634       virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
    635    }
    636    cbuf->cres = 0;
    637 }
    638 
    639 static void virgl_drm_emit_res(struct virgl_winsys *qws,
    640                                struct virgl_cmd_buf *_cbuf,
    641                                struct virgl_hw_res *res, boolean write_buf)
    642 {
    643    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    644    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
    645    boolean already_in_list = virgl_drm_lookup_res(cbuf, res);
    646 
    647    if (write_buf)
    648       cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
    649 
    650    if (!already_in_list)
    651       virgl_drm_add_res(qdws, cbuf, res);
    652 }
    653 
    654 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
    655                                     struct virgl_cmd_buf *_cbuf,
    656                                     struct virgl_hw_res *res)
    657 {
    658    if (!res->num_cs_references)
    659       return FALSE;
    660 
    661    return TRUE;
    662 }
    663 
    664 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
    665                                        struct virgl_cmd_buf *_cbuf)
    666 {
    667    struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
    668    struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
    669    struct drm_virtgpu_execbuffer eb;
    670    int ret;
    671 
    672    if (cbuf->base.cdw == 0)
    673       return 0;
    674 
    675    memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
    676    eb.command = (unsigned long)(void*)cbuf->buf;
    677    eb.size = cbuf->base.cdw * 4;
    678    eb.num_bo_handles = cbuf->cres;
    679    eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
    680 
    681    ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
    682    if (ret == -1)
    683       fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
    684    cbuf->base.cdw = 0;
    685 
    686    virgl_drm_release_all_res(qdws, cbuf);
    687 
    688    memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
    689    return ret;
    690 }
    691 
    692 static int virgl_drm_get_caps(struct virgl_winsys *vws,
    693                               struct virgl_drm_caps *caps)
    694 {
    695    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
    696    struct drm_virtgpu_get_caps args;
    697 
    698    memset(&args, 0, sizeof(args));
    699 
    700    args.cap_set_id = 1;
    701    args.addr = (unsigned long)&caps->caps;
    702    args.size = sizeof(union virgl_caps);
    703    return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
    704 }
    705 
    706 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
    707 
    708 static unsigned handle_hash(void *key)
    709 {
    710     return PTR_TO_UINT(key);
    711 }
    712 
    713 static int handle_compare(void *key1, void *key2)
    714 {
    715     return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
    716 }
    717 
    718 static struct pipe_fence_handle *
    719 virgl_cs_create_fence(struct virgl_winsys *vws)
    720 {
    721    struct virgl_hw_res *res;
    722 
    723    res = virgl_drm_winsys_resource_cache_create(vws,
    724                                                 PIPE_BUFFER,
    725                                                 PIPE_FORMAT_R8_UNORM,
    726                                                 VIRGL_BIND_CUSTOM,
    727                                                 8, 1, 1, 0, 0, 0, 8);
    728 
    729    return (struct pipe_fence_handle *)res;
    730 }
    731 
    732 static bool virgl_fence_wait(struct virgl_winsys *vws,
    733                              struct pipe_fence_handle *fence,
    734                              uint64_t timeout)
    735 {
    736    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
    737    struct virgl_hw_res *res = virgl_hw_res(fence);
    738 
    739    if (timeout == 0)
    740       return !virgl_drm_resource_is_busy(vdws, res);
    741 
    742    if (timeout != PIPE_TIMEOUT_INFINITE) {
    743       int64_t start_time = os_time_get();
    744       timeout /= 1000;
    745       while (virgl_drm_resource_is_busy(vdws, res)) {
    746          if (os_time_get() - start_time >= timeout)
    747             return FALSE;
    748          os_time_sleep(10);
    749       }
    750       return TRUE;
    751    }
    752    virgl_drm_resource_wait(vws, res);
    753    return TRUE;
    754 }
    755 
    756 static void virgl_fence_reference(struct virgl_winsys *vws,
    757                                   struct pipe_fence_handle **dst,
    758                                   struct pipe_fence_handle *src)
    759 {
    760    struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
    761    virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
    762                                 virgl_hw_res(src));
    763 }
    764 
    765 
    766 static struct virgl_winsys *
    767 virgl_drm_winsys_create(int drmFD)
    768 {
    769    struct virgl_drm_winsys *qdws;
    770 
    771    qdws = CALLOC_STRUCT(virgl_drm_winsys);
    772    if (!qdws)
    773       return NULL;
    774 
    775    qdws->fd = drmFD;
    776    qdws->num_delayed = 0;
    777    qdws->usecs = 1000000;
    778    LIST_INITHEAD(&qdws->delayed);
    779    pipe_mutex_init(qdws->mutex);
    780    pipe_mutex_init(qdws->bo_handles_mutex);
    781    qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
    782    qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
    783    qdws->base.destroy = virgl_drm_winsys_destroy;
    784 
    785    qdws->base.transfer_put = virgl_bo_transfer_put;
    786    qdws->base.transfer_get = virgl_bo_transfer_get;
    787    qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
    788    qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
    789    qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
    790    qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
    791    qdws->base.resource_map = virgl_drm_resource_map;
    792    qdws->base.resource_wait = virgl_drm_resource_wait;
    793    qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
    794    qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
    795    qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
    796    qdws->base.emit_res = virgl_drm_emit_res;
    797    qdws->base.res_is_referenced = virgl_drm_res_is_ref;
    798 
    799    qdws->base.cs_create_fence = virgl_cs_create_fence;
    800    qdws->base.fence_wait = virgl_fence_wait;
    801    qdws->base.fence_reference = virgl_fence_reference;
    802 
    803    qdws->base.get_caps = virgl_drm_get_caps;
    804    return &qdws->base;
    805 
    806 }
    807 
    808 static struct util_hash_table *fd_tab = NULL;
    809 pipe_static_mutex(virgl_screen_mutex);
    810 
    811 static void
    812 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
    813 {
    814    struct virgl_screen *screen = virgl_screen(pscreen);
    815    boolean destroy;
    816 
    817    pipe_mutex_lock(virgl_screen_mutex);
    818    destroy = --screen->refcnt == 0;
    819    if (destroy) {
    820       int fd = virgl_drm_winsys(screen->vws)->fd;
    821       util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
    822    }
    823    pipe_mutex_unlock(virgl_screen_mutex);
    824 
    825    if (destroy) {
    826       pscreen->destroy = screen->winsys_priv;
    827       pscreen->destroy(pscreen);
    828    }
    829 }
    830 
    831 static unsigned hash_fd(void *key)
    832 {
    833    int fd = pointer_to_intptr(key);
    834    struct stat stat;
    835    fstat(fd, &stat);
    836 
    837    return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
    838 }
    839 
    840 static int compare_fd(void *key1, void *key2)
    841 {
    842    int fd1 = pointer_to_intptr(key1);
    843    int fd2 = pointer_to_intptr(key2);
    844    struct stat stat1, stat2;
    845    fstat(fd1, &stat1);
    846    fstat(fd2, &stat2);
    847 
    848    return stat1.st_dev != stat2.st_dev ||
    849          stat1.st_ino != stat2.st_ino ||
    850          stat1.st_rdev != stat2.st_rdev;
    851 }
    852 
    853 struct pipe_screen *
    854 virgl_drm_screen_create(int fd)
    855 {
    856    struct pipe_screen *pscreen = NULL;
    857 
    858    pipe_mutex_lock(virgl_screen_mutex);
    859    if (!fd_tab) {
    860       fd_tab = util_hash_table_create(hash_fd, compare_fd);
    861       if (!fd_tab)
    862          goto unlock;
    863    }
    864 
    865    pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
    866    if (pscreen) {
    867       virgl_screen(pscreen)->refcnt++;
    868    } else {
    869       struct virgl_winsys *vws;
    870       int dup_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
    871 
    872       vws = virgl_drm_winsys_create(dup_fd);
    873 
    874       pscreen = virgl_create_screen(vws);
    875       if (pscreen) {
    876          util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);
    877 
    878          /* Bit of a hack, to avoid circular linkage dependency,
    879           * ie. pipe driver having to call in to winsys, we
    880           * override the pipe drivers screen->destroy():
    881           */
    882          virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
    883          pscreen->destroy = virgl_drm_screen_destroy;
    884       }
    885    }
    886 
    887 unlock:
    888    pipe_mutex_unlock(virgl_screen_mutex);
    889    return pscreen;
    890 }
    891