Home | History | Annotate | Download | only in wsi
      1 /*
      2  * Copyright  2015 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #include <X11/Xlib-xcb.h>
     25 #include <X11/xshmfence.h>
     26 #include <xcb/xcb.h>
     27 #include <xcb/dri3.h>
     28 #include <xcb/present.h>
     29 
     30 #include "util/macros.h"
     31 #include <stdlib.h>
     32 #include <stdio.h>
     33 #include <unistd.h>
     34 #include <errno.h>
     35 #include <string.h>
     36 
     37 #include <poll.h>
     38 #include "util/hash_table.h"
     39 
     40 #include "wsi_common.h"
     41 #include "wsi_common_x11.h"
     42 #include "wsi_common_queue.h"
     43 
     44 #define typed_memcpy(dest, src, count) ({ \
     45    STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
     46    memcpy((dest), (src), (count) * sizeof(*(src))); \
     47 })
     48 
     49 struct wsi_x11_connection {
     50    bool has_dri3;
     51    bool has_present;
     52    bool is_proprietary_x11;
     53 };
     54 
     55 struct wsi_x11 {
     56    struct wsi_interface base;
     57 
     58    pthread_mutex_t                              mutex;
     59    /* Hash table of xcb_connection -> wsi_x11_connection mappings */
     60    struct hash_table *connections;
     61 };
     62 
     63 static struct wsi_x11_connection *
     64 wsi_x11_connection_create(const VkAllocationCallbacks *alloc,
     65                           xcb_connection_t *conn)
     66 {
     67    xcb_query_extension_cookie_t dri3_cookie, pres_cookie, amd_cookie, nv_cookie;
     68    xcb_query_extension_reply_t *dri3_reply, *pres_reply, *amd_reply, *nv_reply;
     69 
     70    struct wsi_x11_connection *wsi_conn =
     71       vk_alloc(alloc, sizeof(*wsi_conn), 8,
     72                 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
     73    if (!wsi_conn)
     74       return NULL;
     75 
     76    dri3_cookie = xcb_query_extension(conn, 4, "DRI3");
     77    pres_cookie = xcb_query_extension(conn, 7, "PRESENT");
     78 
     79    /* We try to be nice to users and emit a warning if they try to use a
     80     * Vulkan application on a system without DRI3 enabled.  However, this ends
     81     * up spewing the warning when a user has, for example, both Intel
     82     * integrated graphics and a discrete card with proprietary drivers and are
     83     * running on the discrete card with the proprietary DDX.  In this case, we
     84     * really don't want to print the warning because it just confuses users.
     85     * As a heuristic to detect this case, we check for a couple of proprietary
     86     * X11 extensions.
     87     */
     88    amd_cookie = xcb_query_extension(conn, 11, "ATIFGLRXDRI");
     89    nv_cookie = xcb_query_extension(conn, 10, "NV-CONTROL");
     90 
     91    dri3_reply = xcb_query_extension_reply(conn, dri3_cookie, NULL);
     92    pres_reply = xcb_query_extension_reply(conn, pres_cookie, NULL);
     93    amd_reply = xcb_query_extension_reply(conn, amd_cookie, NULL);
     94    nv_reply = xcb_query_extension_reply(conn, nv_cookie, NULL);
     95    if (!dri3_reply || !pres_reply) {
     96       free(dri3_reply);
     97       free(pres_reply);
     98       free(amd_reply);
     99       free(nv_reply);
    100       vk_free(alloc, wsi_conn);
    101       return NULL;
    102    }
    103 
    104    wsi_conn->has_dri3 = dri3_reply->present != 0;
    105    wsi_conn->has_present = pres_reply->present != 0;
    106    wsi_conn->is_proprietary_x11 = false;
    107    if (amd_reply && amd_reply->present)
    108       wsi_conn->is_proprietary_x11 = true;
    109    if (nv_reply && nv_reply->present)
    110       wsi_conn->is_proprietary_x11 = true;
    111 
    112    free(dri3_reply);
    113    free(pres_reply);
    114    free(amd_reply);
    115    free(nv_reply);
    116 
    117    return wsi_conn;
    118 }
    119 
    120 static void
    121 wsi_x11_connection_destroy(const VkAllocationCallbacks *alloc,
    122                            struct wsi_x11_connection *conn)
    123 {
    124    vk_free(alloc, conn);
    125 }
    126 
    127 static bool
    128 wsi_x11_check_for_dri3(struct wsi_x11_connection *wsi_conn)
    129 {
    130   if (wsi_conn->has_dri3)
    131     return true;
    132   if (!wsi_conn->is_proprietary_x11) {
    133     fprintf(stderr, "vulkan: No DRI3 support detected - required for presentation\n"
    134                     "Note: you can probably enable DRI3 in your Xorg config\n");
    135   }
    136   return false;
    137 }
    138 
    139 static struct wsi_x11_connection *
    140 wsi_x11_get_connection(struct wsi_device *wsi_dev,
    141 		       const VkAllocationCallbacks *alloc,
    142                        xcb_connection_t *conn)
    143 {
    144    struct wsi_x11 *wsi =
    145       (struct wsi_x11 *)wsi_dev->wsi[VK_ICD_WSI_PLATFORM_XCB];
    146 
    147    pthread_mutex_lock(&wsi->mutex);
    148 
    149    struct hash_entry *entry = _mesa_hash_table_search(wsi->connections, conn);
    150    if (!entry) {
    151       /* We're about to make a bunch of blocking calls.  Let's drop the
    152        * mutex for now so we don't block up too badly.
    153        */
    154       pthread_mutex_unlock(&wsi->mutex);
    155 
    156       struct wsi_x11_connection *wsi_conn =
    157          wsi_x11_connection_create(alloc, conn);
    158       if (!wsi_conn)
    159          return NULL;
    160 
    161       pthread_mutex_lock(&wsi->mutex);
    162 
    163       entry = _mesa_hash_table_search(wsi->connections, conn);
    164       if (entry) {
    165          /* Oops, someone raced us to it */
    166          wsi_x11_connection_destroy(alloc, wsi_conn);
    167       } else {
    168          entry = _mesa_hash_table_insert(wsi->connections, conn, wsi_conn);
    169       }
    170    }
    171 
    172    pthread_mutex_unlock(&wsi->mutex);
    173 
    174    return entry->data;
    175 }
    176 
    177 static const VkSurfaceFormatKHR formats[] = {
    178    { .format = VK_FORMAT_B8G8R8A8_SRGB, },
    179    { .format = VK_FORMAT_B8G8R8A8_UNORM, },
    180 };
    181 
    182 static const VkPresentModeKHR present_modes[] = {
    183    VK_PRESENT_MODE_IMMEDIATE_KHR,
    184    VK_PRESENT_MODE_MAILBOX_KHR,
    185    VK_PRESENT_MODE_FIFO_KHR,
    186 };
    187 
    188 static xcb_screen_t *
    189 get_screen_for_root(xcb_connection_t *conn, xcb_window_t root)
    190 {
    191    xcb_screen_iterator_t screen_iter =
    192       xcb_setup_roots_iterator(xcb_get_setup(conn));
    193 
    194    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
    195       if (screen_iter.data->root == root)
    196          return screen_iter.data;
    197    }
    198 
    199    return NULL;
    200 }
    201 
    202 static xcb_visualtype_t *
    203 screen_get_visualtype(xcb_screen_t *screen, xcb_visualid_t visual_id,
    204                       unsigned *depth)
    205 {
    206    xcb_depth_iterator_t depth_iter =
    207       xcb_screen_allowed_depths_iterator(screen);
    208 
    209    for (; depth_iter.rem; xcb_depth_next (&depth_iter)) {
    210       xcb_visualtype_iterator_t visual_iter =
    211          xcb_depth_visuals_iterator (depth_iter.data);
    212 
    213       for (; visual_iter.rem; xcb_visualtype_next (&visual_iter)) {
    214          if (visual_iter.data->visual_id == visual_id) {
    215             if (depth)
    216                *depth = depth_iter.data->depth;
    217             return visual_iter.data;
    218          }
    219       }
    220    }
    221 
    222    return NULL;
    223 }
    224 
    225 static xcb_visualtype_t *
    226 connection_get_visualtype(xcb_connection_t *conn, xcb_visualid_t visual_id,
    227                           unsigned *depth)
    228 {
    229    xcb_screen_iterator_t screen_iter =
    230       xcb_setup_roots_iterator(xcb_get_setup(conn));
    231 
    232    /* For this we have to iterate over all of the screens which is rather
    233     * annoying.  Fortunately, there is probably only 1.
    234     */
    235    for (; screen_iter.rem; xcb_screen_next (&screen_iter)) {
    236       xcb_visualtype_t *visual = screen_get_visualtype(screen_iter.data,
    237                                                        visual_id, depth);
    238       if (visual)
    239          return visual;
    240    }
    241 
    242    return NULL;
    243 }
    244 
    245 static xcb_visualtype_t *
    246 get_visualtype_for_window(xcb_connection_t *conn, xcb_window_t window,
    247                           unsigned *depth)
    248 {
    249    xcb_query_tree_cookie_t tree_cookie;
    250    xcb_get_window_attributes_cookie_t attrib_cookie;
    251    xcb_query_tree_reply_t *tree;
    252    xcb_get_window_attributes_reply_t *attrib;
    253 
    254    tree_cookie = xcb_query_tree(conn, window);
    255    attrib_cookie = xcb_get_window_attributes(conn, window);
    256 
    257    tree = xcb_query_tree_reply(conn, tree_cookie, NULL);
    258    attrib = xcb_get_window_attributes_reply(conn, attrib_cookie, NULL);
    259    if (attrib == NULL || tree == NULL) {
    260       free(attrib);
    261       free(tree);
    262       return NULL;
    263    }
    264 
    265    xcb_window_t root = tree->root;
    266    xcb_visualid_t visual_id = attrib->visual;
    267    free(attrib);
    268    free(tree);
    269 
    270    xcb_screen_t *screen = get_screen_for_root(conn, root);
    271    if (screen == NULL)
    272       return NULL;
    273 
    274    return screen_get_visualtype(screen, visual_id, depth);
    275 }
    276 
    277 static bool
    278 visual_has_alpha(xcb_visualtype_t *visual, unsigned depth)
    279 {
    280    uint32_t rgb_mask = visual->red_mask |
    281                        visual->green_mask |
    282                        visual->blue_mask;
    283 
    284    uint32_t all_mask = 0xffffffff >> (32 - depth);
    285 
    286    /* Do we have bits left over after RGB? */
    287    return (all_mask & ~rgb_mask) != 0;
    288 }
    289 
    290 VkBool32 wsi_get_physical_device_xcb_presentation_support(
    291     struct wsi_device *wsi_device,
    292     VkAllocationCallbacks *alloc,
    293     uint32_t                                    queueFamilyIndex,
    294     xcb_connection_t*                           connection,
    295     xcb_visualid_t                              visual_id)
    296 {
    297    struct wsi_x11_connection *wsi_conn =
    298       wsi_x11_get_connection(wsi_device, alloc, connection);
    299 
    300    if (!wsi_conn)
    301       return false;
    302 
    303    if (!wsi_x11_check_for_dri3(wsi_conn))
    304       return false;
    305 
    306    unsigned visual_depth;
    307    if (!connection_get_visualtype(connection, visual_id, &visual_depth))
    308       return false;
    309 
    310    if (visual_depth != 24 && visual_depth != 32)
    311       return false;
    312 
    313    return true;
    314 }
    315 
    316 static xcb_connection_t*
    317 x11_surface_get_connection(VkIcdSurfaceBase *icd_surface)
    318 {
    319    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
    320       return XGetXCBConnection(((VkIcdSurfaceXlib *)icd_surface)->dpy);
    321    else
    322       return ((VkIcdSurfaceXcb *)icd_surface)->connection;
    323 }
    324 
    325 static xcb_window_t
    326 x11_surface_get_window(VkIcdSurfaceBase *icd_surface)
    327 {
    328    if (icd_surface->platform == VK_ICD_WSI_PLATFORM_XLIB)
    329       return ((VkIcdSurfaceXlib *)icd_surface)->window;
    330    else
    331       return ((VkIcdSurfaceXcb *)icd_surface)->window;
    332 }
    333 
    334 static VkResult
    335 x11_surface_get_support(VkIcdSurfaceBase *icd_surface,
    336                         struct wsi_device *wsi_device,
    337                         const VkAllocationCallbacks *alloc,
    338                         uint32_t queueFamilyIndex,
    339                         VkBool32* pSupported)
    340 {
    341    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
    342    xcb_window_t window = x11_surface_get_window(icd_surface);
    343 
    344    struct wsi_x11_connection *wsi_conn =
    345       wsi_x11_get_connection(wsi_device, alloc, conn);
    346    if (!wsi_conn)
    347       return VK_ERROR_OUT_OF_HOST_MEMORY;
    348 
    349    if (!wsi_x11_check_for_dri3(wsi_conn)) {
    350       *pSupported = false;
    351       return VK_SUCCESS;
    352    }
    353 
    354    unsigned visual_depth;
    355    if (!get_visualtype_for_window(conn, window, &visual_depth)) {
    356       *pSupported = false;
    357       return VK_SUCCESS;
    358    }
    359 
    360    if (visual_depth != 24 && visual_depth != 32) {
    361       *pSupported = false;
    362       return VK_SUCCESS;
    363    }
    364 
    365    *pSupported = true;
    366    return VK_SUCCESS;
    367 }
    368 
    369 static VkResult
    370 x11_surface_get_capabilities(VkIcdSurfaceBase *icd_surface,
    371                              VkSurfaceCapabilitiesKHR *caps)
    372 {
    373    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
    374    xcb_window_t window = x11_surface_get_window(icd_surface);
    375    xcb_get_geometry_cookie_t geom_cookie;
    376    xcb_generic_error_t *err;
    377    xcb_get_geometry_reply_t *geom;
    378    unsigned visual_depth;
    379 
    380    geom_cookie = xcb_get_geometry(conn, window);
    381 
    382    /* This does a round-trip.  This is why we do get_geometry first and
    383     * wait to read the reply until after we have a visual.
    384     */
    385    xcb_visualtype_t *visual =
    386       get_visualtype_for_window(conn, window, &visual_depth);
    387 
    388    if (!visual)
    389       return VK_ERROR_SURFACE_LOST_KHR;
    390 
    391    geom = xcb_get_geometry_reply(conn, geom_cookie, &err);
    392    if (geom) {
    393       VkExtent2D extent = { geom->width, geom->height };
    394       caps->currentExtent = extent;
    395       caps->minImageExtent = extent;
    396       caps->maxImageExtent = extent;
    397    } else {
    398       /* This can happen if the client didn't wait for the configure event
    399        * to come back from the compositor.  In that case, we don't know the
    400        * size of the window so we just return valid "I don't know" stuff.
    401        */
    402       caps->currentExtent = (VkExtent2D) { -1, -1 };
    403       caps->minImageExtent = (VkExtent2D) { 1, 1 };
    404       /* This is the maximum supported size on Intel */
    405       caps->maxImageExtent = (VkExtent2D) { 1 << 14, 1 << 14 };
    406    }
    407    free(err);
    408    free(geom);
    409 
    410    if (visual_has_alpha(visual, visual_depth)) {
    411       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
    412                                       VK_COMPOSITE_ALPHA_PRE_MULTIPLIED_BIT_KHR;
    413    } else {
    414       caps->supportedCompositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR |
    415                                       VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR;
    416    }
    417 
    418    /* For true mailbox mode, we need at least 4 images:
    419     *  1) One to scan out from
    420     *  2) One to have queued for scan-out
    421     *  3) One to be currently held by the X server
    422     *  4) One to render to
    423     */
    424    caps->minImageCount = 2;
    425    /* There is no real maximum */
    426    caps->maxImageCount = 0;
    427 
    428    caps->supportedTransforms = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    429    caps->currentTransform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
    430    caps->maxImageArrayLayers = 1;
    431    caps->supportedUsageFlags =
    432       VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
    433       VK_IMAGE_USAGE_SAMPLED_BIT |
    434       VK_IMAGE_USAGE_TRANSFER_DST_BIT |
    435       VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT;
    436 
    437    return VK_SUCCESS;
    438 }
    439 
    440 static VkResult
    441 x11_surface_get_formats(VkIcdSurfaceBase *surface,
    442                         struct wsi_device *wsi_device,
    443                         uint32_t *pSurfaceFormatCount,
    444                         VkSurfaceFormatKHR *pSurfaceFormats)
    445 {
    446    if (pSurfaceFormats == NULL) {
    447       *pSurfaceFormatCount = ARRAY_SIZE(formats);
    448       return VK_SUCCESS;
    449    }
    450 
    451    *pSurfaceFormatCount = MIN2(*pSurfaceFormatCount, ARRAY_SIZE(formats));
    452    typed_memcpy(pSurfaceFormats, formats, *pSurfaceFormatCount);
    453 
    454    return *pSurfaceFormatCount < ARRAY_SIZE(formats) ?
    455       VK_INCOMPLETE : VK_SUCCESS;
    456 }
    457 
    458 static VkResult
    459 x11_surface_get_present_modes(VkIcdSurfaceBase *surface,
    460                               uint32_t *pPresentModeCount,
    461                               VkPresentModeKHR *pPresentModes)
    462 {
    463    if (pPresentModes == NULL) {
    464       *pPresentModeCount = ARRAY_SIZE(present_modes);
    465       return VK_SUCCESS;
    466    }
    467 
    468    *pPresentModeCount = MIN2(*pPresentModeCount, ARRAY_SIZE(present_modes));
    469    typed_memcpy(pPresentModes, present_modes, *pPresentModeCount);
    470 
    471    return *pPresentModeCount < ARRAY_SIZE(present_modes) ?
    472       VK_INCOMPLETE : VK_SUCCESS;
    473 }
    474 
    475 VkResult wsi_create_xcb_surface(const VkAllocationCallbacks *pAllocator,
    476 				const VkXcbSurfaceCreateInfoKHR *pCreateInfo,
    477 				VkSurfaceKHR *pSurface)
    478 {
    479    VkIcdSurfaceXcb *surface;
    480 
    481    surface = vk_alloc(pAllocator, sizeof *surface, 8,
    482                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    483    if (surface == NULL)
    484       return VK_ERROR_OUT_OF_HOST_MEMORY;
    485 
    486    surface->base.platform = VK_ICD_WSI_PLATFORM_XCB;
    487    surface->connection = pCreateInfo->connection;
    488    surface->window = pCreateInfo->window;
    489 
    490    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
    491    return VK_SUCCESS;
    492 }
    493 
    494 VkResult wsi_create_xlib_surface(const VkAllocationCallbacks *pAllocator,
    495 				 const VkXlibSurfaceCreateInfoKHR *pCreateInfo,
    496 				 VkSurfaceKHR *pSurface)
    497 {
    498    VkIcdSurfaceXlib *surface;
    499 
    500    surface = vk_alloc(pAllocator, sizeof *surface, 8,
    501                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    502    if (surface == NULL)
    503       return VK_ERROR_OUT_OF_HOST_MEMORY;
    504 
    505    surface->base.platform = VK_ICD_WSI_PLATFORM_XLIB;
    506    surface->dpy = pCreateInfo->dpy;
    507    surface->window = pCreateInfo->window;
    508 
    509    *pSurface = VkIcdSurfaceBase_to_handle(&surface->base);
    510    return VK_SUCCESS;
    511 }
    512 
    513 struct x11_image {
    514    VkImage image;
    515    VkDeviceMemory memory;
    516    xcb_pixmap_t                              pixmap;
    517    bool                                      busy;
    518    struct xshmfence *                        shm_fence;
    519    uint32_t                                  sync_fence;
    520 };
    521 
    522 struct x11_swapchain {
    523    struct wsi_swapchain                        base;
    524 
    525    xcb_connection_t *                           conn;
    526    xcb_window_t                                 window;
    527    xcb_gc_t                                     gc;
    528    uint32_t                                     depth;
    529    VkExtent2D                                   extent;
    530    uint32_t                                     image_count;
    531 
    532    xcb_present_event_t                          event_id;
    533    xcb_special_event_t *                        special_event;
    534    uint64_t                                     send_sbc;
    535    uint64_t                                     last_present_msc;
    536    uint32_t                                     stamp;
    537 
    538    bool                                         threaded;
    539    VkResult                                     status;
    540    struct wsi_queue                             present_queue;
    541    struct wsi_queue                             acquire_queue;
    542    pthread_t                                    queue_manager;
    543 
    544    struct x11_image                             images[0];
    545 };
    546 
    547 static VkResult
    548 x11_get_images(struct wsi_swapchain *anv_chain,
    549                uint32_t* pCount, VkImage *pSwapchainImages)
    550 {
    551    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
    552    uint32_t ret_count;
    553    VkResult result;
    554 
    555    if (pSwapchainImages == NULL) {
    556       *pCount = chain->image_count;
    557       return VK_SUCCESS;
    558    }
    559 
    560    result = VK_SUCCESS;
    561    ret_count = chain->image_count;
    562    if (chain->image_count > *pCount) {
    563      ret_count = *pCount;
    564      result = VK_INCOMPLETE;
    565    }
    566 
    567    for (uint32_t i = 0; i < ret_count; i++)
    568       pSwapchainImages[i] = chain->images[i].image;
    569 
    570    return result;
    571 }
    572 
    573 static VkResult
    574 x11_handle_dri3_present_event(struct x11_swapchain *chain,
    575                               xcb_present_generic_event_t *event)
    576 {
    577    switch (event->evtype) {
    578    case XCB_PRESENT_CONFIGURE_NOTIFY: {
    579       xcb_present_configure_notify_event_t *config = (void *) event;
    580 
    581       if (config->width != chain->extent.width ||
    582           config->height != chain->extent.height)
    583          return VK_ERROR_OUT_OF_DATE_KHR;
    584 
    585       break;
    586    }
    587 
    588    case XCB_PRESENT_EVENT_IDLE_NOTIFY: {
    589       xcb_present_idle_notify_event_t *idle = (void *) event;
    590 
    591       for (unsigned i = 0; i < chain->image_count; i++) {
    592          if (chain->images[i].pixmap == idle->pixmap) {
    593             chain->images[i].busy = false;
    594             if (chain->threaded)
    595                wsi_queue_push(&chain->acquire_queue, i);
    596             break;
    597          }
    598       }
    599 
    600       break;
    601    }
    602 
    603    case XCB_PRESENT_EVENT_COMPLETE_NOTIFY: {
    604       xcb_present_complete_notify_event_t *complete = (void *) event;
    605       if (complete->kind == XCB_PRESENT_COMPLETE_KIND_PIXMAP)
    606          chain->last_present_msc = complete->msc;
    607       break;
    608    }
    609 
    610    default:
    611       break;
    612    }
    613 
    614    return VK_SUCCESS;
    615 }
    616 
    617 
    618 static uint64_t wsi_get_current_time(void)
    619 {
    620    uint64_t current_time;
    621    struct timespec tv;
    622 
    623    clock_gettime(CLOCK_MONOTONIC, &tv);
    624    current_time = tv.tv_nsec + tv.tv_sec*1000000000ull;
    625    return current_time;
    626 }
    627 
    628 static uint64_t wsi_get_absolute_timeout(uint64_t timeout)
    629 {
    630    uint64_t current_time = wsi_get_current_time();
    631 
    632    timeout = MIN2(UINT64_MAX - current_time, timeout);
    633 
    634    return current_time + timeout;
    635 }
    636 
    637 static VkResult
    638 x11_acquire_next_image_poll_x11(struct x11_swapchain *chain,
    639                                 uint32_t *image_index, uint64_t timeout)
    640 {
    641    xcb_generic_event_t *event;
    642    struct pollfd pfds;
    643    uint64_t atimeout;
    644    while (1) {
    645       for (uint32_t i = 0; i < chain->image_count; i++) {
    646          if (!chain->images[i].busy) {
    647             /* We found a non-busy image */
    648             xshmfence_await(chain->images[i].shm_fence);
    649             *image_index = i;
    650             chain->images[i].busy = true;
    651             return VK_SUCCESS;
    652          }
    653       }
    654 
    655       xcb_flush(chain->conn);
    656 
    657       if (timeout == UINT64_MAX) {
    658          event = xcb_wait_for_special_event(chain->conn, chain->special_event);
    659          if (!event)
    660             return VK_ERROR_OUT_OF_DATE_KHR;
    661       } else {
    662          event = xcb_poll_for_special_event(chain->conn, chain->special_event);
    663          if (!event) {
    664             int ret;
    665             if (timeout == 0)
    666                return VK_NOT_READY;
    667 
    668             atimeout = wsi_get_absolute_timeout(timeout);
    669 
    670             pfds.fd = xcb_get_file_descriptor(chain->conn);
    671             pfds.events = POLLIN;
    672             ret = poll(&pfds, 1, timeout / 1000 / 1000);
    673             if (ret == 0)
    674                return VK_TIMEOUT;
    675             if (ret == -1)
    676                return VK_ERROR_OUT_OF_DATE_KHR;
    677 
    678             /* If a non-special event happens, the fd will still
    679              * poll. So recalculate the timeout now just in case.
    680              */
    681             uint64_t current_time = wsi_get_current_time();
    682             if (atimeout > current_time)
    683                timeout = atimeout - current_time;
    684             else
    685                timeout = 0;
    686             continue;
    687          }
    688       }
    689 
    690       VkResult result = x11_handle_dri3_present_event(chain, (void *)event);
    691       free(event);
    692       if (result != VK_SUCCESS)
    693          return result;
    694    }
    695 }
    696 
    697 static VkResult
    698 x11_acquire_next_image_from_queue(struct x11_swapchain *chain,
    699                                   uint32_t *image_index_out, uint64_t timeout)
    700 {
    701    assert(chain->threaded);
    702 
    703    uint32_t image_index;
    704    VkResult result = wsi_queue_pull(&chain->acquire_queue,
    705                                     &image_index, timeout);
    706    if (result != VK_SUCCESS) {
    707       return result;
    708    } else if (chain->status != VK_SUCCESS) {
    709       return chain->status;
    710    }
    711 
    712    assert(image_index < chain->image_count);
    713    xshmfence_await(chain->images[image_index].shm_fence);
    714 
    715    *image_index_out = image_index;
    716 
    717    return VK_SUCCESS;
    718 }
    719 
    720 static VkResult
    721 x11_present_to_x11(struct x11_swapchain *chain, uint32_t image_index,
    722                    uint32_t target_msc)
    723 {
    724    struct x11_image *image = &chain->images[image_index];
    725 
    726    assert(image_index < chain->image_count);
    727 
    728    uint32_t options = XCB_PRESENT_OPTION_NONE;
    729 
    730    int64_t divisor = 0;
    731    int64_t remainder = 0;
    732 
    733    if (chain->base.present_mode == VK_PRESENT_MODE_IMMEDIATE_KHR)
    734       options |= XCB_PRESENT_OPTION_ASYNC;
    735 
    736    xshmfence_reset(image->shm_fence);
    737 
    738    ++chain->send_sbc;
    739    xcb_void_cookie_t cookie =
    740       xcb_present_pixmap(chain->conn,
    741                          chain->window,
    742                          image->pixmap,
    743                          (uint32_t) chain->send_sbc,
    744                          0,                                    /* valid */
    745                          0,                                    /* update */
    746                          0,                                    /* x_off */
    747                          0,                                    /* y_off */
    748                          XCB_NONE,                             /* target_crtc */
    749                          XCB_NONE,
    750                          image->sync_fence,
    751                          options,
    752                          target_msc,
    753                          divisor,
    754                          remainder, 0, NULL);
    755    xcb_discard_reply(chain->conn, cookie.sequence);
    756    image->busy = true;
    757 
    758    xcb_flush(chain->conn);
    759 
    760    return VK_SUCCESS;
    761 }
    762 
    763 static VkResult
    764 x11_acquire_next_image(struct wsi_swapchain *anv_chain,
    765                        uint64_t timeout,
    766                        VkSemaphore semaphore,
    767                        uint32_t *image_index)
    768 {
    769    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
    770 
    771    if (chain->threaded) {
    772       return x11_acquire_next_image_from_queue(chain, image_index, timeout);
    773    } else {
    774       return x11_acquire_next_image_poll_x11(chain, image_index, timeout);
    775    }
    776 }
    777 
    778 static VkResult
    779 x11_queue_present(struct wsi_swapchain *anv_chain,
    780                   uint32_t image_index)
    781 {
    782    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
    783 
    784    if (chain->threaded) {
    785       wsi_queue_push(&chain->present_queue, image_index);
    786       return chain->status;
    787    } else {
    788       return x11_present_to_x11(chain, image_index, 0);
    789    }
    790 }
    791 
    792 static void *
    793 x11_manage_fifo_queues(void *state)
    794 {
    795    struct x11_swapchain *chain = state;
    796    VkResult result;
    797 
    798    assert(chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR);
    799 
    800    while (chain->status == VK_SUCCESS) {
    801       /* It should be safe to unconditionally block here.  Later in the loop
    802        * we blocks until the previous present has landed on-screen.  At that
    803        * point, we should have received IDLE_NOTIFY on all images presented
    804        * before that point so the client should be able to acquire any image
    805        * other than the currently presented one.
    806        */
    807       uint32_t image_index;
    808       result = wsi_queue_pull(&chain->present_queue, &image_index, INT64_MAX);
    809       if (result != VK_SUCCESS) {
    810          goto fail;
    811       } else if (chain->status != VK_SUCCESS) {
    812          return NULL;
    813       }
    814 
    815       uint64_t target_msc = chain->last_present_msc + 1;
    816       result = x11_present_to_x11(chain, image_index, target_msc);
    817       if (result != VK_SUCCESS)
    818          goto fail;
    819 
    820       while (chain->last_present_msc < target_msc) {
    821          xcb_generic_event_t *event =
    822             xcb_wait_for_special_event(chain->conn, chain->special_event);
    823          if (!event)
    824             goto fail;
    825 
    826          result = x11_handle_dri3_present_event(chain, (void *)event);
    827          if (result != VK_SUCCESS)
    828             goto fail;
    829       }
    830    }
    831 
    832 fail:
    833    chain->status = result;
    834    wsi_queue_push(&chain->acquire_queue, UINT32_MAX);
    835 
    836    return NULL;
    837 }
    838 
    839 static VkResult
    840 x11_image_init(VkDevice device_h, struct x11_swapchain *chain,
    841                const VkSwapchainCreateInfoKHR *pCreateInfo,
    842                const VkAllocationCallbacks* pAllocator,
    843                struct x11_image *image)
    844 {
    845    xcb_void_cookie_t cookie;
    846    VkResult result;
    847    uint32_t row_pitch;
    848    uint32_t offset;
    849    uint32_t bpp = 32;
    850    int fd;
    851    uint32_t size;
    852 
    853    result = chain->base.image_fns->create_wsi_image(device_h,
    854                                                     pCreateInfo,
    855                                                     pAllocator,
    856                                                     &image->image,
    857                                                     &image->memory,
    858                                                     &size,
    859                                                     &offset,
    860                                                     &row_pitch,
    861                                                     &fd);
    862    if (result != VK_SUCCESS)
    863       return result;
    864 
    865    image->pixmap = xcb_generate_id(chain->conn);
    866 
    867    cookie =
    868       xcb_dri3_pixmap_from_buffer_checked(chain->conn,
    869                                           image->pixmap,
    870                                           chain->window,
    871                                           size,
    872                                           pCreateInfo->imageExtent.width,
    873                                           pCreateInfo->imageExtent.height,
    874                                           row_pitch,
    875                                           chain->depth, bpp, fd);
    876    xcb_discard_reply(chain->conn, cookie.sequence);
    877 
    878    int fence_fd = xshmfence_alloc_shm();
    879    if (fence_fd < 0)
    880       goto fail_pixmap;
    881 
    882    image->shm_fence = xshmfence_map_shm(fence_fd);
    883    if (image->shm_fence == NULL)
    884       goto fail_shmfence_alloc;
    885 
    886    image->sync_fence = xcb_generate_id(chain->conn);
    887    xcb_dri3_fence_from_fd(chain->conn,
    888                           image->pixmap,
    889                           image->sync_fence,
    890                           false,
    891                           fence_fd);
    892 
    893    image->busy = false;
    894    xshmfence_trigger(image->shm_fence);
    895 
    896    return VK_SUCCESS;
    897 
    898 fail_shmfence_alloc:
    899    close(fence_fd);
    900 
    901 fail_pixmap:
    902    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
    903    xcb_discard_reply(chain->conn, cookie.sequence);
    904 
    905    chain->base.image_fns->free_wsi_image(device_h, pAllocator,
    906                                         image->image, image->memory);
    907 
    908    return result;
    909 }
    910 
    911 static void
    912 x11_image_finish(struct x11_swapchain *chain,
    913                  const VkAllocationCallbacks* pAllocator,
    914                  struct x11_image *image)
    915 {
    916    xcb_void_cookie_t cookie;
    917 
    918    cookie = xcb_sync_destroy_fence(chain->conn, image->sync_fence);
    919    xcb_discard_reply(chain->conn, cookie.sequence);
    920    xshmfence_unmap_shm(image->shm_fence);
    921 
    922    cookie = xcb_free_pixmap(chain->conn, image->pixmap);
    923    xcb_discard_reply(chain->conn, cookie.sequence);
    924 
    925    chain->base.image_fns->free_wsi_image(chain->base.device, pAllocator,
    926                                         image->image, image->memory);
    927 }
    928 
    929 static VkResult
    930 x11_swapchain_destroy(struct wsi_swapchain *anv_chain,
    931                       const VkAllocationCallbacks *pAllocator)
    932 {
    933    struct x11_swapchain *chain = (struct x11_swapchain *)anv_chain;
    934    xcb_void_cookie_t cookie;
    935 
    936    for (uint32_t i = 0; i < chain->image_count; i++)
    937       x11_image_finish(chain, pAllocator, &chain->images[i]);
    938 
    939    if (chain->threaded) {
    940       chain->status = VK_ERROR_OUT_OF_DATE_KHR;
    941       /* Push a UINT32_MAX to wake up the manager */
    942       wsi_queue_push(&chain->present_queue, UINT32_MAX);
    943       pthread_join(chain->queue_manager, NULL);
    944       wsi_queue_destroy(&chain->acquire_queue);
    945       wsi_queue_destroy(&chain->present_queue);
    946    }
    947 
    948    xcb_unregister_for_special_event(chain->conn, chain->special_event);
    949    cookie = xcb_present_select_input_checked(chain->conn, chain->event_id,
    950                                              chain->window,
    951                                              XCB_PRESENT_EVENT_MASK_NO_EVENT);
    952    xcb_discard_reply(chain->conn, cookie.sequence);
    953 
    954    vk_free(pAllocator, chain);
    955 
    956    return VK_SUCCESS;
    957 }
    958 
    959 static VkResult
    960 x11_surface_create_swapchain(VkIcdSurfaceBase *icd_surface,
    961                              VkDevice device,
    962                              struct wsi_device *wsi_device,
    963                              const VkSwapchainCreateInfoKHR *pCreateInfo,
    964                              const VkAllocationCallbacks* pAllocator,
    965                              const struct wsi_image_fns *image_fns,
    966                              struct wsi_swapchain **swapchain_out)
    967 {
    968    struct x11_swapchain *chain;
    969    xcb_void_cookie_t cookie;
    970    VkResult result;
    971 
    972    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR);
    973 
    974    const unsigned num_images = pCreateInfo->minImageCount;
    975 
    976    size_t size = sizeof(*chain) + num_images * sizeof(chain->images[0]);
    977    chain = vk_alloc(pAllocator, size, 8,
    978                       VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
    979    if (chain == NULL)
    980       return VK_ERROR_OUT_OF_HOST_MEMORY;
    981 
    982    xcb_connection_t *conn = x11_surface_get_connection(icd_surface);
    983    xcb_window_t window = x11_surface_get_window(icd_surface);
    984    xcb_get_geometry_reply_t *geometry =
    985       xcb_get_geometry_reply(conn, xcb_get_geometry(conn, window), NULL);
    986 
    987    if (geometry == NULL)
    988       return VK_ERROR_SURFACE_LOST_KHR;
    989 
    990    chain->base.device = device;
    991    chain->base.destroy = x11_swapchain_destroy;
    992    chain->base.get_images = x11_get_images;
    993    chain->base.acquire_next_image = x11_acquire_next_image;
    994    chain->base.queue_present = x11_queue_present;
    995    chain->base.image_fns = image_fns;
    996    chain->base.present_mode = pCreateInfo->presentMode;
    997    chain->conn = conn;
    998    chain->window = window;
    999    chain->depth = geometry->depth;
   1000    chain->extent = pCreateInfo->imageExtent;
   1001    chain->image_count = num_images;
   1002    chain->send_sbc = 0;
   1003    chain->last_present_msc = 0;
   1004    chain->threaded = false;
   1005    chain->status = VK_SUCCESS;
   1006 
   1007    free(geometry);
   1008 
   1009    chain->event_id = xcb_generate_id(chain->conn);
   1010    xcb_present_select_input(chain->conn, chain->event_id, chain->window,
   1011                             XCB_PRESENT_EVENT_MASK_CONFIGURE_NOTIFY |
   1012                             XCB_PRESENT_EVENT_MASK_COMPLETE_NOTIFY |
   1013                             XCB_PRESENT_EVENT_MASK_IDLE_NOTIFY);
   1014 
   1015    /* Create an XCB event queue to hold present events outside of the usual
   1016     * application event queue
   1017     */
   1018    chain->special_event =
   1019       xcb_register_for_special_xge(chain->conn, &xcb_present_id,
   1020                                    chain->event_id, NULL);
   1021 
   1022    chain->gc = xcb_generate_id(chain->conn);
   1023    if (!chain->gc) {
   1024       /* FINISHME: Choose a better error. */
   1025       result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1026       goto fail_register;
   1027    }
   1028 
   1029    cookie = xcb_create_gc(chain->conn,
   1030                           chain->gc,
   1031                           chain->window,
   1032                           XCB_GC_GRAPHICS_EXPOSURES,
   1033                           (uint32_t []) { 0 });
   1034    xcb_discard_reply(chain->conn, cookie.sequence);
   1035 
   1036    uint32_t image = 0;
   1037    for (; image < chain->image_count; image++) {
   1038       result = x11_image_init(device, chain, pCreateInfo, pAllocator,
   1039                               &chain->images[image]);
   1040       if (result != VK_SUCCESS)
   1041          goto fail_init_images;
   1042    }
   1043 
   1044    if (chain->base.present_mode == VK_PRESENT_MODE_FIFO_KHR) {
   1045       chain->threaded = true;
   1046 
   1047       /* Initialize our queues.  We make them image_count + 1 because we will
   1048        * occasionally use UINT32_MAX to signal the other thread that an error
   1049        * has occurred and we don't want an overflow.
   1050        */
   1051       int ret;
   1052       ret = wsi_queue_init(&chain->acquire_queue, chain->image_count + 1);
   1053       if (ret) {
   1054          goto fail_init_images;
   1055       }
   1056 
   1057       ret = wsi_queue_init(&chain->present_queue, chain->image_count + 1);
   1058       if (ret) {
   1059          wsi_queue_destroy(&chain->acquire_queue);
   1060          goto fail_init_images;
   1061       }
   1062 
   1063       for (unsigned i = 0; i < chain->image_count; i++)
   1064          wsi_queue_push(&chain->acquire_queue, i);
   1065 
   1066       ret = pthread_create(&chain->queue_manager, NULL,
   1067                            x11_manage_fifo_queues, chain);
   1068       if (ret) {
   1069          wsi_queue_destroy(&chain->present_queue);
   1070          wsi_queue_destroy(&chain->acquire_queue);
   1071          goto fail_init_images;
   1072       }
   1073    }
   1074 
   1075    *swapchain_out = &chain->base;
   1076 
   1077    return VK_SUCCESS;
   1078 
   1079 fail_init_images:
   1080    for (uint32_t j = 0; j < image; j++)
   1081       x11_image_finish(chain, pAllocator, &chain->images[j]);
   1082 
   1083 fail_register:
   1084    xcb_unregister_for_special_event(chain->conn, chain->special_event);
   1085 
   1086    vk_free(pAllocator, chain);
   1087 
   1088    return result;
   1089 }
   1090 
   1091 VkResult
   1092 wsi_x11_init_wsi(struct wsi_device *wsi_device,
   1093                  const VkAllocationCallbacks *alloc)
   1094 {
   1095    struct wsi_x11 *wsi;
   1096    VkResult result;
   1097 
   1098    wsi = vk_alloc(alloc, sizeof(*wsi), 8,
   1099                    VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
   1100    if (!wsi) {
   1101       result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1102       goto fail;
   1103    }
   1104 
   1105    int ret = pthread_mutex_init(&wsi->mutex, NULL);
   1106    if (ret != 0) {
   1107       if (ret == ENOMEM) {
   1108          result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1109       } else {
   1110          /* FINISHME: Choose a better error. */
   1111          result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1112       }
   1113 
   1114       goto fail_alloc;
   1115    }
   1116 
   1117    wsi->connections = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
   1118                                               _mesa_key_pointer_equal);
   1119    if (!wsi->connections) {
   1120       result = VK_ERROR_OUT_OF_HOST_MEMORY;
   1121       goto fail_mutex;
   1122    }
   1123 
   1124    wsi->base.get_support = x11_surface_get_support;
   1125    wsi->base.get_capabilities = x11_surface_get_capabilities;
   1126    wsi->base.get_formats = x11_surface_get_formats;
   1127    wsi->base.get_present_modes = x11_surface_get_present_modes;
   1128    wsi->base.create_swapchain = x11_surface_create_swapchain;
   1129 
   1130    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = &wsi->base;
   1131    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = &wsi->base;
   1132 
   1133    return VK_SUCCESS;
   1134 
   1135 fail_mutex:
   1136    pthread_mutex_destroy(&wsi->mutex);
   1137 fail_alloc:
   1138    vk_free(alloc, wsi);
   1139 fail:
   1140    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB] = NULL;
   1141    wsi_device->wsi[VK_ICD_WSI_PLATFORM_XLIB] = NULL;
   1142 
   1143    return result;
   1144 }
   1145 
   1146 void
   1147 wsi_x11_finish_wsi(struct wsi_device *wsi_device,
   1148                    const VkAllocationCallbacks *alloc)
   1149 {
   1150    struct wsi_x11 *wsi =
   1151       (struct wsi_x11 *)wsi_device->wsi[VK_ICD_WSI_PLATFORM_XCB];
   1152 
   1153    if (wsi) {
   1154       struct hash_entry *entry;
   1155       hash_table_foreach(wsi->connections, entry)
   1156          wsi_x11_connection_destroy(alloc, entry->data);
   1157 
   1158       _mesa_hash_table_destroy(wsi->connections, NULL);
   1159 
   1160       pthread_mutex_destroy(&wsi->mutex);
   1161 
   1162       vk_free(alloc, wsi);
   1163    }
   1164 }
   1165