Home | History | Annotate | Download | only in wsi
      1 /*
      2  * Copyright  2015 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #include <X11/Xlib-xcb.h>
     25 #include <X11/xshmfence.h>
     26 #include <xcb/xcb.h>
     27 #include <xcb/dri3.h>
     28 #include <xcb/present.h>
     29 
     30 #include "util/macros.h"
     31 #include <stdlib.h>
     32 #include <stdio.h>
     33 #include <unistd.h>
     34 #include <errno.h>
     35 #include <string.h>
     36 #include <fcntl.h>
     37 #include <poll.h>
     38 #include <xf86drm.h>
     39 #include "util/hash_table.h"
     40 
     41 #include "vk_util.h"
     42 #include "wsi_common_private.h"
     43 #include "wsi_common_x11.h"
     44 #include "wsi_common_queue.h"
     45 
     46 #define typed_memcpy(dest, src, count) ({ \
     47    STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
     48    memcpy((dest), (src), (count) * sizeof(*(src))); \
     49 })
     50 
     51 struct wsi_x11_connection {
     52    bool has_dri3;
     53    bool has_present;
     54    bool is_proprietary_x11;
     55 };
     56 
     57 struct wsi_x11 {
     58    struct wsi_interface base;
     59 
     60    pthread_mutex_t                              mutex;
     61    /* Hash table of xcb_connection -> wsi_x11_connection mappings */
     62    struct hash_table *connections;
     63 };
     64 
     65 
     66 /** wsi_dri3_open
     67  *
     68  * Wrapper around xcb_dri3_open
     69  */
     70 static int
     71 wsi_dri3_open(xcb_connection_t *conn,
     72 	      xcb_window_t root,
     73 	      uint32_t provider)
     74 {
     75    xcb_dri3_open_cookie_t       cookie;
     76    xcb_dri3_open_reply_t        *reply;
     77    int                          fd;
     78 
     79    cookie = xcb_dri3_open(conn,
     80                           root,
     81                           provider);
     82 
     83    reply = xcb_dri3_open_reply(conn, cookie, NULL);
     84    if (!reply)
     85       return -1;
     86 
     87    if (reply->nfd != 1) {
     88       free(reply);
     89       return -1;
     90    }
     91 
     92    fd = xcb_dri3_open_reply_fds(conn, reply)[0];
     93    free(reply);
     94    fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC);
     95 
     96    return fd;
     97 }
     98 
     99 static bool
    100 wsi_x11_check_dri3_compatible(xcb_connection_t *conn, int local_fd)
    101 {
    102    xcb_screen_iterator_t screen_iter =
    103       xcb_setup_roots_iterator(xcb_get_setup(conn));
    104    xcb_screen_t *screen = screen_iter.data;
    105 
    106    int dri3_fd = wsi_dri3_open(conn, screen->root, None);
    107    if (dri3_fd != -1) {
    108       char *local_dev = drmGetRenderDeviceNameFromFd(local_fd);
    109       char *dri3_dev = drmGetRenderDeviceNameFromFd(dri3_fd);
    110       int ret;
    111 
    112       close(dri3_fd);
    113 
    114       ret = strcmp(local_dev, dri3_dev);
    115 
    116       free(local_dev);
    117       free(dri3_dev);
    118 
    119       if (ret != 0)
    120          return false;
    121    }
    122    return true;
    123 }
    124 
    125 static struct wsi_x11_connection *
    126 wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
    127                           xcb_connection_t *conn)
    128 {
    129    xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie;
    130    xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply;
    131 
    132    struct wsi_x11_connection *wsi_conn =
    133       vk_alloc(alloc, sizeof(*wsi_conn), 8,
    134                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
    135    if (!wsi_conn)
    136       return NULL;
    137 
    138    dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
    139    pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
    140 
    141    /* We try to be nice to users and emit a warning if they try to use a
    142     * Vulkan application on a system without DRI3 enabled.  However, this ends
    143     * up spewing the warning when a user has, for example, both Intel
    144     * integrated graphics and a discrete card with proprietary drivers and are
    145     * running on the discrete card with the proprietary DDX.  In this case, we
    146     * really don't want to print the warning because it just confuses users.
    147     * As a heuristic to detect this case, we check for a couple of proprietary
    148     * X11 extensions.
    149     */
    150    amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
    151    nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
    152 
    153    dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
    154    pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
    155    amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
    156    nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
    157    if (!dri3_reply || !pres_reply) {
    158       free(dri3_reply);
    159       free(pres_reply);
    160       free(amd_reply);
    161       free(nv_reply);
    162       vk_free(alloc, wsi_conn);
    163       return NULL;
    164    }
    165 
    166    wsi_conn->has_dri3 = dri3_reply->present != 0;
    167    wsi_conn->has_present = pres_reply->present != 0;
    168    wsi_conn->is_proprietary_x11 = false;
    169    if (amd_reply && amd_reply->present)
    170       wsi_conn->is_proprietary_x11 = true;
    171    if (nv_reply && nv_reply->present)
    172       wsi_conn->is_proprietary_x11 = true;
    173 
    174    free(dri3_reply);
    175    free(pres_reply);
    176    free(amd_reply);
    177    free(nv_reply);
    178 
    179    return wsi_conn;
    180 }
    181 
    182 static void
    183 wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
    184                            struct wsi_x11_connection *conn)
    185 {
    186    vk_free(alloc, conn);
    187 }
    188 
    189 static bool
    190 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
    191 {
    192   if (wsi_conn->has_dri3)
    193     return true;
    194   if (!wsi_conn->is_proprietary_x11) {
    195     fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
    196                     "Note: you can probably enable DRI3 in your Xorg config\n");
    197   }
    198   return false;
    199 }
    200 
    201 static struct wsi_x11_connection *
    202 wsi_x11_get_connection(struct wsi_device *wsi_dev,
    203 		       const VkAllocationCallbacks *alloc,
    204                        xcb_connection_t *conn)
    205 {
    206    struct wsi_x11 *wsi =
    207       (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
    208 
    209    pthread_mutex_lock(&wsi->mutex);
    210 
    211    struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
    212    if (!entry) {
    213       /* We're about to make a bunch of blocking calls.  Let's drop the
    214        * mutex for now so we don't block up too badly.
    215        */
    216       pthread_mutex_unlock(&wsi->mutex);
    217 
    218       struct wsi_x11_connection *wsi_conn =
    219          wsi_x11_connection_create(alloc, conn);
    220       if (!wsi_conn)
    221          return NULL;
    222 
    223       pthread_mutex_lock(&wsi->mutex);
    224 
    225       entry = _mesa_hash_table_search(wsi->connections, conn);
    226       if (entry) {
    227          /* Oops, someone raced us to it */
    228          wsi_x11_connection_destroy(alloc, wsi_conn);
    229       } else {
    230          entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
    231       }
    232    }
    233 
    234    pthread_mutex_unlock(&wsi->mutex);
    235 
    236    return entry->data;
    237 }
    238 
    239 static const VkFormat formats[] = {
    240    VK_FORMAT_B8G8R8A8_SRGB,
    241    VK_FORMAT_B8G8R8A8_UNORM,
    242 };
    243 
    244 static const VkPresentModeKHR present_modes[] = {
    245    VK_PRESENT_MODE_IMMEDIATE_KHR,
    246    VK_PRESENT_MODE_MAILBOX_KHR,
    247    VK_PRESENT_MODE_FIFO_KHR,
    248 };
    249 
    250 static xcb_screen_t *
    251 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
    252 {
    253    xcb_screen_iterator_t screen_iter =
    254       xcb_setup_roots_iterator(xcb_get_setup(conn));
    255 
    256    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
    257       if (screen_iter.data->root == root)
    258          return screen_iter.data;
    259    }
    260 
    261    return NULL;
    262 }
    263 
    264 static xcb_visualtype_t *
    265 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
    266                       unsigned *depth)
    267 {
    268    xcb_depth_iterator_t depth_iter =
    269       xcb_screen_allowed_depths_iterator(screen);
    270 
    271    for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
    272       xcb_visualtype_iterator_t visual_iter =
    273          xcb_depth_visuals_iterator (depth_iter.data);
    274 
    275       for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
    276          if (visual_iter.data->visual_id == visual_id) {
    277             if (depth)
    278                *depth = depth_iter.data->depth;
    279             return visual_iter.data;
    280          }
    281       }
    282    }
    283 
    284    return NULL;
    285 }
    286 
    287 static xcb_visualtype_t *
    288 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
    289                           unsigned *depth)
    290 {
    291    xcb_screen_iterator_t screen_iter =
    292       xcb_setup_roots_iterator(xcb_get_setup(conn));
    293 
    294    /* For this we have to iterate over all of the screens which is rather
    295     * annoying.  Fortunately, there is probably only 1.
    296     */
    297    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
    298       xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
    299                                                        visual_id, depth);
    300       if (visual)
    301          return visual;
    302    }
    303 
    304    return NULL;
    305 }
    306 
    307 static xcb_visualtype_t *
    308 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
    309                           unsigned *depth)
    310 {
    311    xcb_query_tree_cookie_t tree_cookie;
    312    xcb_get_window_attributes_cookie_t attrib_cookie;
    313    xcb_query_tree_reply_t *tree;
    314    xcb_get_window_attributes_reply_t *attrib;
    315 
    316    tree_cookie = xcb_query_tree(conn, window);
    317    attrib_cookie = xcb_get_window_attributes(conn, window);
    318 
    319    tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
    320    attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
    321    if (attrib == NULL || tree == NULL) {
    322       free(attrib);
    323       free(tree);
    324       return NULL;
    325    }
    326 
    327    xcb_window_t root = tree->root;
    328    xcb_visualid_t visual_id = attrib->visual;
    329    free(attrib);
    330    free(tree);
    331 
    332    xcb_screen_t *screen = get_screen_for_root(conn, root);
    333    if (screen == NULL)
    334       return NULL;
    335 
    336    return screen_get_visualtype(screen, visual_id, depth);
    337 }
    338 
    339 static bool
    340 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
    341 {
    342    uint32_t rgb_mask = visual->red_mask |
    343                        visual->green_mask |
    344                        visual->blue_mask;
    345 
    346    uint32_t all_mask = 0xffffffff >> (32 - depth);
    347 
    348    /* Do we have bits left over after RGB? */
    349    return (all_mask & ~rgb_mask) != 0;
    350 }
    351 
    352 VkBool32 wsi_get_physical_device_xcb_presentation_support(
    353     struct wsi_device *wsi_device,
    354     VkAllocationCallbacks *alloc,
    355     uint32_t                                    queueFamilyIndex,
    356     int fd,
    357     bool can_handle_different_gpu,
    358     xcb_connection_t*                           connection,
    359     xcb_visualid_t                              visual_id)
    360 {
    361    struct wsi_x11_connection *wsi_conn =
    362       wsi_x11_get_connection(wsi_device, alloc, connection);
    363 
    364    if (!wsi_conn)
    365       return false;
    366 
    367    if (!wsi_x11_check_for_dri3(wsi_conn))
    368       return false;
    369 
    370    if (!can_handle_different_gpu)
    371       if (!wsi_x11_check_dri3_compatible(connection, fd))
    372          return false;
    373 
    374    unsigned visual_depth;
    375    if (!connection_get_visualtype(connection, visual_id, &visual_depth))
    376       return false;
    377 
    378    if (visual_depth != 24 && visual_depth != 32)
    379       return false;
    380 
    381    return true;
    382 }
    383 
    384 static xcb_connection_t*
    385 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
    386 {
    387    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
    388       return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
    389    else
    390       return ((VkIcdSurfaceXcb *)icd_surface)->connection;
    391 }
    392 
    393 static xcb_window_t
    394 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
    395 {
    396    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
    397       return ((VkIcdSurfaceXlib *)icd_surface)->window;
    398    else
    399       return ((VkIcdSurfaceXcb *)icd_surface)->window;
    400 }
    401 
    402 static VkResult
    403 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
    404                         struct wsi_device *wsi_device,
    405                         const VkAllocationCallbacks *alloc,
    406                         uint32_t queueFamilyIndex,
    407                         int local_fd,
    408                         VkBool32* pSupported)
    409 {
    410    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
    411    xcb_window_t window = x11_surface_get_window(icd_surface);
    412 
    413    struct wsi_x11_connection *wsi_conn =
    414       wsi_x11_get_connection(wsi_device, alloc, conn);
    415    if (!wsi_conn)
    416       return VK_ERROR_OUT_OF_HOST_MEMORY;
    417 
    418    if (!wsi_x11_check_for_dri3(wsi_conn)) {
    419       *pSupported = false;
    420       return VK_SUCCESS;
    421    }
    422 
    423    unsigned visual_depth;
    424    if (!get_visualtype_for_window(conn, window, &visual_depth)) {
    425       *pSupported = false;
    426       return VK_SUCCESS;
    427    }
    428 
    429    if (visual_depth != 24 && visual_depth != 32) {
    430       *pSupported = false;
    431       return VK_SUCCESS;
    432    }
    433 
    434    *pSupported = true;
    435    return VK_SUCCESS;
    436 }
    437 
    438 static VkResult
    439 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
    440                              VkSurfaceCapabilitiesKHR *caps)
    441 {
    442    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
    443    xcb_window_t window = x11_surface_get_window(icd_surface);
    444    xcb_get_geometry_cookie_t geom_cookie;
    445    xcb_generic_error_t *err;
    446    xcb_get_geometry_reply_t *geom;
    447    unsigned visual_depth;
    448 
    449    geom_cookie = xcb_get_geometry(conn, window);
    450 
    451    /* This does a round-trip.  This is why we do get_geometry first and
    452     * wait to read the reply until after we have a visual.
    453     */
    454    xcb_visualtype_t *visual =
    455       get_visualtype_for_window(conn, window, &visual_depth);
    456 
    457    if (!visual)
    458       return VK_ERROR_SURFACE_LOST_KHR;
    459 
    460    geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
    461    if (geom) {
    462       VkExtent2D extent = { geom->width, geom->height };
    463       caps->currentExtent = extent;
    464       caps->minImageExtent = extent;
    465       caps->maxImageExtent = extent;
    466    } else {
    467       /* This can happen if the client didn't wait for the configure event
    468        * to come back from the compositor.  In that case, we don't know the
    469        * size of the window so we just return valid "I don't know" stuff.
    470        */
    471       caps->currentExtent = (VkExtent2D) { -1, -1 };
    472       caps->minImageExtent = (VkExtent2D) { 1, 1 };
    473       /* This is the maximum supported size on Intel */
    474       caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
    475    }
    476    free(err);
    477    free(geom);
    478 
    479    if (visual_has_alpha(visual, visual_depth)) {
    480       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
    481                                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
    482    } else {
    483       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
    484                                       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
    485    }
    486 
    487    /* For true mailbox mode, we need at least 4 images:
    488     *  1) One to scan out from
    489     *  2) One to have queued for scan-out
    490     *  3) One to be currently held by the X server
    491     *  4) One to render to
    492     */
    493    caps->minImageCount = 2;
    494    /* There is no real maximum */
    495    caps->maxImageCount = 0;
    496 
    497    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    498    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    499    caps->maxImageArrayLayers = 1;
    500    caps->supportedUsageFlags =
    501       VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
    502       VK_IMAGE_USAGE_SAMPLED_BIT |
    503       VK_IMAGE_USAGE_TRANSFER_DST_BIT |
    504       VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
    505 
    506    return VK_SUCCESS;
    507 }
    508 
    509 static VkResult
    510 x11_surface_get_capabilities2(VkIcdSurfaceBase *icd_surface,
    511                               const void *info_next,
    512                               VkSurfaceCapabilities2KHR *caps)
    513 {
    514    assert(caps->sType == VK_STRUCTURE_TYPE_SURFACE_CAPABILITIES_2_KHR);
    515 
    516    return x11_surface_get_capabilities(icd_surface, &caps->surfaceCapabilities);
    517 }
    518 
    519 static VkResult
    520 x11_surface_get_formats(VkIcdSurfaceBase *surface,
    521                         struct wsi_device *wsi_device,
    522                         uint32_t *pSurfaceFormatCount,
    523                         VkSurfaceFormatKHR *pSurfaceFormats)
    524 {
    525    VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
    526 
    527    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
    528       vk_outarray_append(&out, f) {
    529          f->format = formats[i];
    530          f->colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
    531       }
    532    }
    533 
    534    return vk_outarray_status(&out);
    535 }
    536 
    537 static VkResult
    538 x11_surface_get_formats2(VkIcdSurfaceBase *surface,
    539                         struct wsi_device *wsi_device,
    540                         const void *info_next,
    541                         uint32_t *pSurfaceFormatCount,
    542                         VkSurfaceFormat2KHR *pSurfaceFormats)
    543 {
    544    VK_OUTARRAY_MAKE(out, pSurfaceFormats, pSurfaceFormatCount);
    545 
    546    for (unsigned i = 0; i < ARRAY_SIZE(formats); i++) {
    547       vk_outarray_append(&out, f) {
    548          assert(f->sType == VK_STRUCTURE_TYPE_SURFACE_FORMAT_2_KHR);
    549          f->surfaceFormat.format = formats[i];
    550          f->surfaceFormat.colorSpace = VK_COLORSPACE_SRGB_NONLINEAR_KHR;
    551       }
    552    }
    553 
    554    return vk_outarray_status(&out);
    555 }
    556 
    557 static VkResult
    558 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
    559                               uint32_t *pPresentModeCount,
    560                               VkPresentModeKHR *pPresentModes)
    561 {
    562    if (pPresentModes == NULL) {
    563       *pPresentModeCount = ARRAY_SIZE(present_modes);
    564       return VK_SUCCESS;
    565    }
    566 
    567    *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
    568    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
    569 
    570    return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
    571       VK_INCOMPLETE : VK_SUCCESS;
    572 }
    573 
    574 VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
    575 				const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
    576 				VkSurfaceKHR *pSurface)
    577 {
    578    VkIcdSurfaceXcb *surface;
    579 
    580    surface = vk_alloc(pAllocator, sizeof *surface, 8,
    581                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    582    if (surface == NULL)
    583       return VK_ERROR_OUT_OF_HOST_MEMORY;
    584 
    585    surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
    586    surface->connection = pCreateInfo->connection;
    587    surface->window = pCreateInfo->window;
    588 
    589    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
    590    return VK_SUCCESS;
    591 }
    592 
    593 VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator,
    594 				 const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
    595 				 VkSurfaceKHR *pSurface)
    596 {
    597    VkIcdSurfaceXlib *surface;
    598 
    599    surface = vk_alloc(pAllocator, sizeof *surface, 8,
    600                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    601    if (surface == NULL)
    602       return VK_ERROR_OUT_OF_HOST_MEMORY;
    603 
    604    surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
    605    surface->dpy = pCreateInfo->dpy;
    606    surface->window = pCreateInfo->window;
    607 
    608    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
    609    return VK_SUCCESS;
    610 }
    611 
    612 struct x11_image {
    613    struct wsi_image                          base;
    614    xcb_pixmap_t                              pixmap;
    615    bool                                      busy;
    616    struct xshmfence *                        shm_fence;
    617    uint32_t                                  sync_fence;
    618 };
    619 
    620 struct x11_swapchain {
    621    struct wsi_swapchain                        base;
    622 
    623    xcb_connection_t *                           conn;
    624    xcb_window_t                                 window;
    625    xcb_gc_t                                     gc;
    626    uint32_t                                     depth;
    627    VkExtent2D                                   extent;
    628 
    629    xcb_present_event_t                          event_id;
    630    xcb_special_event_t *                        special_event;
    631    uint64_t                                     send_sbc;
    632    uint64_t                                     last_present_msc;
    633    uint32_t                                     stamp;
    634 
    635    bool                                         threaded;
    636    VkResult                                     status;
    637    struct wsi_queue                             present_queue;
    638    struct wsi_queue                             acquire_queue;
    639    pthread_t                                    queue_manager;
    640 
    641    struct x11_image                             images[0];
    642 };
    643 
    644 static struct wsi_image *
    645 x11_get_wsi_image(struct wsi_swapchain *wsi_chain, uint32_t image_index)
    646 {
    647    struct x11_swapchain *chain = (struct x11_swapchain *)wsi_chain;
    648    return &chain->images[image_index].base;
    649 }
    650 
    651 static VkResult
    652 x11_handle_dri3_present_event(struct x11_swapchain *chain,
    653                               xcb_present_generic_event_t *event)
    654 {
    655    switch (event->evtype) {
    656    case XCB_PRESENT_CONFIGURE_NOTIFY: {
    657       xcb_present_configure_notify_event_t *config = (void *) event;
    658 
    659       if (config->width != chain->extent.width ||
    660           config->height != chain->extent.height)
    661          return VK_ERROR_OUT_OF_DATE_KHR;
    662 
    663       break;
    664    }
    665 
    666    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
    667       xcb_present_idle_notify_event_t *idle = (void *) event;
    668 
    669       for (unsigned i = 0; i < chain->base.image_count; i++) {
    670          if (chain->images[i].pixmap == idle->pixmap) {
    671             chain->images[i].busy = false;
    672             if (chain->threaded)
    673                wsi_queue_push(&chain->acquire_queue, i);
    674             break;
    675          }
    676       }
    677 
    678       break;
    679    }
    680 
    681    case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
    682       xcb_present_complete_notify_event_t *complete = (void *) event;
    683       if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP)
    684          chain->last_present_msc = complete->msc;
    685       break;
    686    }
    687 
    688    default:
    689       break;
    690    }
    691 
    692    return VK_SUCCESS;
    693 }
    694 
    695 
    696 static uint64_t wsi_get_current_time(void)
    697 {
    698    uint64_t current_time;
    699    struct timespec tv;
    700 
    701    clock_gettime(CLOCK_MONOTONIC, &tv);
    702    current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
    703    return current_time;
    704 }
    705 
    706 static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
    707 {
    708    uint64_t current_time = wsi_get_current_time();
    709 
    710    timeout = MIN2(UINT64_MAX - current_time, timeout);
    711 
    712    return current_time + timeout;
    713 }
    714 
    715 static VkResult
    716 x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
    717                                 uint32_t *image_index, uint64_t timeout)
    718 {
    719    xcb_generic_event_t *event;
    720    struct pollfd pfds;
    721    uint64_t atimeout;
    722    while (1) {
    723       for (uint32_t i = 0; i < chain->base.image_count; i++) {
    724          if (!chain->images[i].busy) {
    725             /* We found a non-busy image */
    726             xshmfence_await(chain->images[i].shm_fence);
    727             *image_index = i;
    728             chain->images[i].busy = true;
    729             return VK_SUCCESS;
    730          }
    731       }
    732 
    733       xcb_flush(chain->conn);
    734 
    735       if (timeout == UINT64_MAX) {
    736          event = xcb_wait_for_special_event(chain->conn, chain->special_event);
    737          if (!event)
    738             return VK_ERROR_OUT_OF_DATE_KHR;
    739       } else {
    740          event = xcb_poll_for_special_event(chain->conn, chain->special_event);
    741          if (!event) {
    742             int ret;
    743             if (timeout == 0)
    744                return VK_NOT_READY;
    745 
    746             atimeout = wsi_get_absolute_timeout(timeout);
    747 
    748             pfds.fd = xcb_get_file_descriptor(chain->conn);
    749             pfds.events = POLLIN;
    750             ret = poll(&pfds, 1, timeout / 1000 / 1000);
    751             if (ret == 0)
    752                return VK_TIMEOUT;
    753             if (ret == -1)
    754                return VK_ERROR_OUT_OF_DATE_KHR;
    755 
    756             /* If a non-special event happens, the fd will still
    757              * poll. So recalculate the timeout now just in case.
    758              */
    759             uint64_t current_time = wsi_get_current_time();
    760             if (atimeout > current_time)
    761                timeout = atimeout - current_time;
    762             else
    763                timeout = 0;
    764             continue;
    765          }
    766       }
    767 
    768       VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
    769       free(event);
    770       if (result != VK_SUCCESS)
    771          return result;
    772    }
    773 }
    774 
    775 static VkResult
    776 x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
    777                                   uint32_t *image_index_out, uint64_t timeout)
    778 {
    779    assert(chain->threaded);
    780 
    781    uint32_t image_index;
    782    VkResult result = wsi_queue_pull(&chain->acquire_queue,
    783                                     &image_index, timeout);
    784    if (result != VK_SUCCESS) {
    785       return result;
    786    } else if (chain->status != VK_SUCCESS) {
    787       return chain->status;
    788    }
    789 
    790    assert(image_index < chain->base.image_count);
    791    xshmfence_await(chain->images[image_index].shm_fence);
    792 
    793    *image_index_out = image_index;
    794 
    795    return VK_SUCCESS;
    796 }
    797 
    798 static VkResult
    799 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
    800                    uint32_t target_msc)
    801 {
    802    struct x11_image *image = &chain->images[image_index];
    803 
    804    assert(image_index < chain->base.image_count);
    805 
    806    uint32_t options = XCB_PRESENT_OPTION_NONE;
    807 
    808    int64_t divisor = 0;
    809    int64_t remainder = 0;
    810 
    811    if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR)
    812       options |= XCB_PRESENT_OPTION_ASYNC;
    813 
    814    xshmfence_reset(image->shm_fence);
    815 
    816    ++chain->send_sbc;
    817    xcb_void_cookie_t cookie =
    818       xcb_present_pixmap(chain->conn,
    819                          chain->window,
    820                          image->pixmap,
    821                          (uint32_t) chain->send_sbc,
    822                          0,                                    /* valid */
    823                          0,                                    /* update */
    824                          0,                                    /* x_off */
    825                          0,                                    /* y_off */
    826                          XCB_NONE,                             /* target_crtc */
    827                          XCB_NONE,
    828                          image->sync_fence,
    829                          options,
    830                          target_msc,
    831                          divisor,
    832                          remainder, 0, NULL);
    833    xcb_discard_reply(chain->conn, cookie.sequence);
    834    image->busy = true;
    835 
    836    xcb_flush(chain->conn);
    837 
    838    return VK_SUCCESS;
    839 }
    840 
    841 static VkResult
    842 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
    843                        uint64_t timeout,
    844                        VkSemaphore semaphore,
    845                        uint32_t *image_index)
    846 {
    847    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
    848 
    849    if (chain->threaded) {
    850       return x11_acquire_next_image_from_queue(chain, image_index, timeout);
    851    } else {
    852       return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
    853    }
    854 }
    855 
    856 static VkResult
    857 x11_queue_present(struct wsi_swapchain *anv_chain,
    858                   uint32_t image_index,
    859                   const VkPresentRegionKHR *damage)
    860 {
    861    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
    862 
    863    if (chain->threaded) {
    864       wsi_queue_push(&chain->present_queue, image_index);
    865       return chain->status;
    866    } else {
    867       return x11_present_to_x11(chain, image_index, 0);
    868    }
    869 }
    870 
    871 static void *
    872 x11_manage_fifo_queues(void *state)
    873 {
    874    struct x11_swapchain *chain = state;
    875    VkResult result;
    876 
    877    assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
    878 
    879    while (chain->status == VK_SUCCESS) {
    880       /* It should be safe to unconditionally block here.  Later in the loop
    881        * we blocks until the previous present has landed on-screen.  At that
    882        * point, we should have received IDLE_NOTIFY on all images presented
    883        * before that point so the client should be able to acquire any image
    884        * other than the currently presented one.
    885        */
    886       uint32_t image_index;
    887       result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
    888       if (result != VK_SUCCESS) {
    889          goto fail;
    890       } else if (chain->status != VK_SUCCESS) {
    891          return NULL;
    892       }
    893 
    894       uint64_t target_msc = chain->last_present_msc + 1;
    895       result = x11_present_to_x11(chain, image_index, target_msc);
    896       if (result != VK_SUCCESS)
    897          goto fail;
    898 
    899       while (chain->last_present_msc < target_msc) {
    900          xcb_generic_event_t *event =
    901             xcb_wait_for_special_event(chain->conn, chain->special_event);
    902          if (!event) {
    903             result = VK_ERROR_OUT_OF_DATE_KHR;
    904             goto fail;
    905          }
    906 
    907          result = x11_handle_dri3_present_event(chain, (void *)event);
    908          free(event);
    909          if (result != VK_SUCCESS)
    910             goto fail;
    911       }
    912    }
    913 
    914 fail:
    915    chain->status = result;
    916    wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
    917 
    918    return NULL;
    919 }
    920 
    921 static VkResult
    922 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
    923                const VkSwapchainCreateInfoKHR *pCreateInfo,
    924                const VkAllocationCallbacks* pAllocator,
    925                struct x11_image *image)
    926 {
    927    xcb_void_cookie_t cookie;
    928    VkResult result;
    929    uint32_t bpp = 32;
    930 
    931    if (chain->base.use_prime_blit) {
    932       result = wsi_create_prime_image(&chain->base, pCreateInfo, &image->base);
    933    } else {
    934       result = wsi_create_native_image(&chain->base, pCreateInfo, &image->base);
    935    }
    936    if (result != VK_SUCCESS)
    937       return result;
    938 
    939    image->pixmap = xcb_generate_id(chain->conn);
    940 
    941    cookie =
    942       xcb_dri3_pixmap_from_buffer_checked(chain->conn,
    943                                           image->pixmap,
    944                                           chain->window,
    945                                           image->base.size,
    946                                           pCreateInfo->imageExtent.width,
    947                                           pCreateInfo->imageExtent.height,
    948                                           image->base.row_pitch,
    949                                           chain->depth, bpp,
    950                                           image->base.fd);
    951    xcb_discard_reply(chain->conn, cookie.sequence);
    952    image->base.fd = -1; /* XCB has now taken ownership of the FD */
    953 
    954    int fence_fd = xshmfence_alloc_shm();
    955    if (fence_fd < 0)
    956       goto fail_pixmap;
    957 
    958    image->shm_fence = xshmfence_map_shm(fence_fd);
    959    if (image->shm_fence == NULL)
    960       goto fail_shmfence_alloc;
    961 
    962    image->sync_fence = xcb_generate_id(chain->conn);
    963    xcb_dri3_fence_from_fd(chain->conn,
    964                           image->pixmap,
    965                           image->sync_fence,
    966                           false,
    967                           fence_fd);
    968 
    969    image->busy = false;
    970    xshmfence_trigger(image->shm_fence);
    971 
    972    return VK_SUCCESS;
    973 
    974 fail_shmfence_alloc:
    975    close(fence_fd);
    976 
    977 fail_pixmap:
    978    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
    979    xcb_discard_reply(chain->conn, cookie.sequence);
    980 
    981    wsi_destroy_image(&chain->base, &image->base);
    982 
    983    return result;
    984 }
    985 
    986 static void
    987 x11_image_finish(struct x11_swapchain *chain,
    988                  const VkAllocationCallbacks* pAllocator,
    989                  struct x11_image *image)
    990 {
    991    xcb_void_cookie_t cookie;
    992 
    993    cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
    994    xcb_discard_reply(chain->conn, cookie.sequence);
    995    xshmfence_unmap_shm(image->shm_fence);
    996 
    997    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
    998    xcb_discard_reply(chain->conn, cookie.sequence);
    999 
   1000    wsi_destroy_image(&chain->base, &image->base);
   1001 }
   1002 
   1003 static VkResult
   1004 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
   1005                       const VkAllocationCallbacks *pAllocator)
   1006 {
   1007    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
   1008    xcb_void_cookie_t cookie;
   1009 
   1010    for (uint32_t i = 0; i < chain->base.image_count; i++)
   1011       x11_image_finish(chain, pAllocator, &chain->images[i]);
   1012 
   1013    if (chain->threaded) {
   1014       chain->status = VK_ERROR_OUT_OF_DATE_KHR;
   1015       /* Push a UINT32_MAX to wake up the manager */
   1016       wsi_queue_push(&chain->present_queue, UINT32_MAX);
   1017       pthread_join(chain->queue_manager, NULL);
   1018       wsi_queue_destroy(&chain->acquire_queue);
   1019       wsi_queue_destroy(&chain->present_queue);
   1020    }
   1021 
   1022    xcb_unregister_for_special_event(chain->conn, chain->special_event);
   1023    cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
   1024                                              chain->window,
   1025                                              XCB_PRESENT_EVENT_MASK_NO_EVENT);
   1026    xcb_discard_reply(chain->conn, cookie.sequence);
   1027 
   1028    wsi_swapchain_finish(&chain->base);
   1029 
   1030    vk_free(pAllocator, chain);
   1031 
   1032    return VK_SUCCESS;
   1033 }
   1034 
   1035 static VkResult
   1036 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
   1037                              VkDevice device,
   1038                              struct wsi_device *wsi_device,
   1039                              int local_fd,
   1040                              const VkSwapchainCreateInfoKHR *pCreateInfo,
   1041                              const VkAllocationCallbacks* pAllocator,
   1042                              struct wsi_swapchain **swapchain_out)
   1043 {
   1044    struct x11_swapchain *chain;
   1045    xcb_void_cookie_t cookie;
   1046    VkResult result;
   1047 
   1048    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
   1049 
   1050    const unsigned num_images = pCreateInfo->minImageCount;
   1051 
   1052    /* Check for whether or not we have a window up-front */
   1053    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
   1054    xcb_window_t window = x11_surface_get_window(icd_surface);
   1055    xcb_get_geometry_reply_t *geometry =
   1056       xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
   1057    if (geometry == NULL)
   1058       return VK_ERROR_SURFACE_LOST_KHR;
   1059    const uint32_t bit_depth = geometry->depth;
   1060    free(geometry);
   1061 
   1062    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
   1063    chain = vk_alloc(pAllocator, size, 8,
   1064                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
   1065    if (chain == NULL)
   1066       return VK_ERROR_OUT_OF_HOST_MEMORY;
   1067 
   1068    result = wsi_swapchain_init(wsi_device, &chain->base, device,
   1069                                pCreateInfo, pAllocator);
   1070    if (result != VK_SUCCESS)
   1071       goto fail_alloc;
   1072 
   1073    chain->base.destroy = x11_swapchain_destroy;
   1074    chain->base.get_wsi_image = x11_get_wsi_image;
   1075    chain->base.acquire_next_image = x11_acquire_next_image;
   1076    chain->base.queue_present = x11_queue_present;
   1077    chain->base.present_mode = pCreateInfo->presentMode;
   1078    chain->base.image_count = num_images;
   1079    chain->conn = conn;
   1080    chain->window = window;
   1081    chain->depth = bit_depth;
   1082    chain->extent = pCreateInfo->imageExtent;
   1083    chain->send_sbc = 0;
   1084    chain->last_present_msc = 0;
   1085    chain->threaded = false;
   1086    chain->status = VK_SUCCESS;
   1087 
   1088    if (!wsi_x11_check_dri3_compatible(conn, local_fd))
   1089        chain->base.use_prime_blit = true;
   1090 
   1091    chain->event_id = xcb_generate_id(chain->conn);
   1092    xcb_present_select_input(chain->conn, chain->event_id, chain->window,
   1093                             XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
   1094                             XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
   1095                             XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
   1096 
   1097    /* Create an XCB event queue to hold present events outside of the usual
   1098     * application event queue
   1099     */
   1100    chain->special_event =
   1101       xcb_register_for_special_xge(chain->conn, &xcb_present_id,
   1102                                    chain->event_id, NULL);
   1103 
   1104    chain->gc = xcb_generate_id(chain->conn);
   1105    if (!chain->gc) {
   1106       /* FINISHME: Choose a better error. */
   1107       result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1108       goto fail_register;
   1109    }
   1110 
   1111    cookie = xcb_create_gc(chain->conn,
   1112                           chain->gc,
   1113                           chain->window,
   1114                           XCB_GC_GRAPHICS_EXPOSURES,
   1115                           (uint32_t []) { 0 });
   1116    xcb_discard_reply(chain->conn, cookie.sequence);
   1117 
   1118    uint32_t image = 0;
   1119    for (; image < chain->base.image_count; image++) {
   1120       result = x11_image_init(device, chain, pCreateInfo, pAllocator,
   1121                               &chain->images[image]);
   1122       if (result != VK_SUCCESS)
   1123          goto fail_init_images;
   1124    }
   1125 
   1126    if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
   1127       chain->threaded = true;
   1128 
   1129       /* Initialize our queues.  We make them base.image_count + 1 because we will
   1130        * occasionally use UINT32_MAX to signal the other thread that an error
   1131        * has occurred and we don't want an overflow.
   1132        */
   1133       int ret;
   1134       ret = wsi_queue_init(&chain->acquire_queue, chain->base.image_count + 1);
   1135       if (ret) {
   1136          goto fail_init_images;
   1137       }
   1138 
   1139       ret = wsi_queue_init(&chain->present_queue, chain->base.image_count + 1);
   1140       if (ret) {
   1141          wsi_queue_destroy(&chain->acquire_queue);
   1142          goto fail_init_images;
   1143       }
   1144 
   1145       for (unsigned i = 0; i < chain->base.image_count; i++)
   1146          wsi_queue_push(&chain->acquire_queue, i);
   1147 
   1148       ret = pthread_create(&chain->queue_manager, NULL,
   1149                            x11_manage_fifo_queues, chain);
   1150       if (ret) {
   1151          wsi_queue_destroy(&chain->present_queue);
   1152          wsi_queue_destroy(&chain->acquire_queue);
   1153          goto fail_init_images;
   1154       }
   1155    }
   1156 
   1157    *swapchain_out = &chain->base;
   1158 
   1159    return VK_SUCCESS;
   1160 
   1161 fail_init_images:
   1162    for (uint32_t j = 0; j < image; j++)
   1163       x11_image_finish(chain, pAllocator, &chain->images[j]);
   1164 
   1165 fail_register:
   1166    xcb_unregister_for_special_event(chain->conn, chain->special_event);
   1167 
   1168    wsi_swapchain_finish(&chain->base);
   1169 
   1170 fail_alloc:
   1171    vk_free(pAllocator, chain);
   1172 
   1173    return result;
   1174 }
   1175 
   1176 VkResult
   1177 wsi_x11_init_wsi(struct wsi_device *wsi_device,
   1178                  const VkAllocationCallbacks *alloc)
   1179 {
   1180    struct wsi_x11 *wsi;
   1181    VkResult result;
   1182 
   1183    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
   1184                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
   1185    if (!wsi) {
   1186       result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1187       goto fail;
   1188    }
   1189 
   1190    int ret = pthread_mutex_init(&wsi->mutex, NULL);
   1191    if (ret != 0) {
   1192       if (ret == ENOMEM) {
   1193          result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1194       } else {
   1195          /* FINISHME: Choose a better error. */
   1196          result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1197       }
   1198 
   1199       goto fail_alloc;
   1200    }
   1201 
   1202    wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
   1203                                               _mesa_key_pointer_equal);
   1204    if (!wsi->connections) {
   1205       result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1206       goto fail_mutex;
   1207    }
   1208 
   1209    wsi->base.get_support = x11_surface_get_support;
   1210    wsi->base.get_capabilities = x11_surface_get_capabilities;
   1211    wsi->base.get_capabilities2 = x11_surface_get_capabilities2;
   1212    wsi->base.get_formats = x11_surface_get_formats;
   1213    wsi->base.get_formats2 = x11_surface_get_formats2;
   1214    wsi->base.get_present_modes = x11_surface_get_present_modes;
   1215    wsi->base.create_swapchain = x11_surface_create_swapchain;
   1216 
   1217    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
   1218    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
   1219 
   1220    return VK_SUCCESS;
   1221 
   1222 fail_mutex:
   1223    pthread_mutex_destroy(&wsi->mutex);
   1224 fail_alloc:
   1225    vk_free(alloc, wsi);
   1226 fail:
   1227    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
   1228    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
   1229 
   1230    return result;
   1231 }
   1232 
   1233 void
   1234 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
   1235                    const VkAllocationCallbacks *alloc)
   1236 {
   1237    struct wsi_x11 *wsi =
   1238       (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
   1239 
   1240    if (wsi) {
   1241       struct hash_entry *entry;
   1242       hash_table_foreach(wsi->connections, entry)
   1243          wsi_x11_connection_destroy(alloc, entry->data);
   1244 
   1245       _mesa_hash_table_destroy(wsi->connections, NULL);
   1246 
   1247       pthread_mutex_destroy(&wsi->mutex);
   1248 
   1249       vk_free(alloc, wsi);
   1250    }
   1251 }
   1252