Home | History | Annotate | Download | only in svga
      1 /**********************************************************
      2  * Copyright 2008-2015 VMware, Inc.  All rights reserved.
      3  *
      4  * Permission is hereby granted, free of charge, to any person
      5  * obtaining a copy of this software and associated documentation
      6  * files (the "Software"), to deal in the Software without
      7  * restriction, including without limitation the rights to use, copy,
      8  * modify, merge, publish, distribute, sublicense, and/or sell copies
      9  * of the Software, and to permit persons to whom the Software is
     10  * furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice shall be
     13  * included in all copies or substantial portions of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     22  * SOFTWARE.
     23  *
     24  **********************************************************/
     25 
     26 #include "pipe/p_state.h"
     27 #include "pipe/p_context.h"
     28 
     29 #include "util/u_bitmask.h"
     30 #include "util/u_memory.h"
     31 
     32 #include "svga_cmd.h"
     33 #include "svga_context.h"
     34 #include "svga_screen.h"
     35 #include "svga_resource_buffer.h"
     36 #include "svga_winsys.h"
     37 #include "svga_debug.h"
     38 
     39 
     40 /* Fixme: want a public base class for all pipe structs, even if there
     41  * isn't much in them.
     42  */
     43 struct pipe_query {
     44    int dummy;
     45 };
     46 
     47 struct svga_query {
     48    struct pipe_query base;
     49    unsigned type;                  /**< PIPE_QUERY_x or SVGA_QUERY_x */
     50    SVGA3dQueryType svga_type;      /**< SVGA3D_QUERYTYPE_x or unused */
     51 
     52    unsigned id;                    /** Per-context query identifier */
     53 
     54    struct pipe_fence_handle *fence;
     55 
     56    /** For PIPE_QUERY_OCCLUSION_COUNTER / SVGA3D_QUERYTYPE_OCCLUSION */
     57 
     58    /* For VGPU9 */
     59    struct svga_winsys_buffer *hwbuf;
     60    volatile SVGA3dQueryResult *queryResult;
     61 
     62    /** For VGPU10 */
     63    struct svga_winsys_gb_query *gb_query;
     64    SVGA3dDXQueryFlags flags;
     65    unsigned offset;                /**< offset to the gb_query memory */
     66    struct pipe_query *predicate;   /** The associated query that can be used for predicate */
     67 
     68    /** For non-GPU SVGA_QUERY_x queries */
     69    uint64_t begin_count, end_count;
     70 };
     71 
     72 
     73 /** cast wrapper */
     74 static inline struct svga_query *
     75 svga_query(struct pipe_query *q)
     76 {
     77    return (struct svga_query *)q;
     78 }
     79 
     80 /**
     81  * VGPU9
     82  */
     83 
     84 static boolean
     85 svga_get_query_result(struct pipe_context *pipe,
     86                       struct pipe_query *q,
     87                       boolean wait,
     88                       union pipe_query_result *result);
     89 
     90 static enum pipe_error
     91 define_query_vgpu9(struct svga_context *svga,
     92                    struct svga_query *sq)
     93 {
     94    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
     95 
     96    sq->hwbuf = svga_winsys_buffer_create(svga, 1,
     97                                          SVGA_BUFFER_USAGE_PINNED,
     98                                          sizeof *sq->queryResult);
     99    if (!sq->hwbuf)
    100       return PIPE_ERROR_OUT_OF_MEMORY;
    101 
    102    sq->queryResult = (SVGA3dQueryResult *)
    103                      sws->buffer_map(sws, sq->hwbuf, PIPE_TRANSFER_WRITE);
    104    if (!sq->queryResult) {
    105       sws->buffer_destroy(sws, sq->hwbuf);
    106       return PIPE_ERROR_OUT_OF_MEMORY;
    107    }
    108 
    109    sq->queryResult->totalSize = sizeof *sq->queryResult;
    110    sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
    111 
    112    /* We request the buffer to be pinned and assume it is always mapped.
    113     * The reason is that we don't want to wait for fences when checking the
    114     * query status.
    115     */
    116    sws->buffer_unmap(sws, sq->hwbuf);
    117 
    118    return PIPE_OK;
    119 }
    120 
    121 static enum pipe_error
    122 begin_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
    123 {
    124    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
    125    enum pipe_error ret = PIPE_OK;
    126 
    127    if (sq->queryResult->state == SVGA3D_QUERYSTATE_PENDING) {
    128       /* The application doesn't care for the pending query result.
    129        * We cannot let go of the existing buffer and just get a new one
    130        * because its storage may be reused for other purposes and clobbered
    131        * by the host when it determines the query result.  So the only
    132        * option here is to wait for the existing query's result -- not a
    133        * big deal, given that no sane application would do this.
    134        */
    135        uint64_t result;
    136        svga_get_query_result(&svga->pipe, &sq->base, TRUE, (void*)&result);
    137        assert(sq->queryResult->state != SVGA3D_QUERYSTATE_PENDING);
    138    }
    139 
    140    sq->queryResult->state = SVGA3D_QUERYSTATE_NEW;
    141    sws->fence_reference(sws, &sq->fence, NULL);
    142 
    143    ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
    144    if (ret != PIPE_OK) {
    145       svga_context_flush(svga, NULL);
    146       ret = SVGA3D_BeginQuery(svga->swc, sq->svga_type);
    147    }
    148    return ret;
    149 }
    150 
    151 static enum pipe_error
    152 end_query_vgpu9(struct svga_context *svga, struct svga_query *sq)
    153 {
    154    enum pipe_error ret = PIPE_OK;
    155 
    156    /* Set to PENDING before sending EndQuery. */
    157    sq->queryResult->state = SVGA3D_QUERYSTATE_PENDING;
    158 
    159    ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
    160    if (ret != PIPE_OK) {
    161       svga_context_flush(svga, NULL);
    162       ret = SVGA3D_EndQuery(svga->swc, sq->svga_type, sq->hwbuf);
    163    }
    164    return ret;
    165 }
    166 
    167 static boolean
    168 get_query_result_vgpu9(struct svga_context *svga, struct svga_query *sq,
    169                        boolean wait, uint64_t *result)
    170 {
    171    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
    172    enum pipe_error ret;
    173    SVGA3dQueryState state;
    174 
    175    if (!sq->fence) {
    176       /* The query status won't be updated by the host unless
    177        * SVGA_3D_CMD_WAIT_FOR_QUERY is emitted. Unfortunately this will cause
    178        * a synchronous wait on the host.
    179        */
    180       ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
    181       if (ret != PIPE_OK) {
    182          svga_context_flush(svga, NULL);
    183          ret = SVGA3D_WaitForQuery(svga->swc, sq->svga_type, sq->hwbuf);
    184       }
    185       assert (ret == PIPE_OK);
    186       svga_context_flush(svga, &sq->fence);
    187       assert(sq->fence);
    188    }
    189 
    190    state = sq->queryResult->state;
    191    if (state == SVGA3D_QUERYSTATE_PENDING) {
    192       if (!wait)
    193          return FALSE;
    194       sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
    195       state = sq->queryResult->state;
    196    }
    197 
    198    assert(state == SVGA3D_QUERYSTATE_SUCCEEDED ||
    199           state == SVGA3D_QUERYSTATE_FAILED);
    200 
    201    *result = (uint64_t)sq->queryResult->result32;
    202    return TRUE;
    203 }
    204 
    205 
    206 /**
    207  * VGPU10
    208  *
    209  * There is one query mob allocated for each context to be shared by all
    210  * query types. The mob is used to hold queries's state and result. Since
    211  * each query result type is of different length, to ease the query allocation
    212  * management, the mob is divided into memory blocks. Each memory block
    213  * will hold queries of the same type. Multiple memory blocks can be allocated
    214  * for a particular query type.
    215  *
    216  * Currently each memory block is of 184 bytes. We support up to 128
    217  * memory blocks. The query memory size is arbitrary right now.
    218  * Each occlusion query takes about 8 bytes. One memory block can accomodate
    219  * 23 occlusion queries. 128 of those blocks can support up to 2944 occlusion
    220  * queries. That seems reasonable for now. If we think this limit is
    221  * not enough, we can increase the limit or try to grow the mob in runtime.
    222  * Note, SVGA device does not impose one mob per context for queries,
    223  * we could allocate multiple mobs for queries; however, wddm KMD does not
    224  * currently support that.
    225  *
    226  * Also note that the GL guest driver does not issue any of the
    227  * following commands: DXMoveQuery, DXBindAllQuery & DXReadbackAllQuery.
    228  */
    229 #define SVGA_QUERY_MEM_BLOCK_SIZE    (sizeof(SVGADXQueryResultUnion) * 2)
    230 #define SVGA_QUERY_MEM_SIZE          (128 * SVGA_QUERY_MEM_BLOCK_SIZE)
    231 
    232 struct svga_qmem_alloc_entry
    233 {
    234    unsigned start_offset;               /* start offset of the memory block */
    235    unsigned block_index;                /* block index of the memory block */
    236    unsigned query_size;                 /* query size in this memory block */
    237    unsigned nquery;                     /* number of queries allocated */
    238    struct util_bitmask *alloc_mask;     /* allocation mask */
    239    struct svga_qmem_alloc_entry *next;  /* next memory block */
    240 };
    241 
    242 
    243 /**
    244  * Allocate a memory block from the query object memory
    245  * \return -1 if out of memory, else index of the query memory block
    246  */
    247 static int
    248 allocate_query_block(struct svga_context *svga)
    249 {
    250    int index;
    251    unsigned offset;
    252 
    253    /* Find the next available query block */
    254    index = util_bitmask_add(svga->gb_query_alloc_mask);
    255 
    256    if (index == UTIL_BITMASK_INVALID_INDEX)
    257       return -1;
    258 
    259    offset = index * SVGA_QUERY_MEM_BLOCK_SIZE;
    260    if (offset >= svga->gb_query_len) {
    261       unsigned i;
    262 
    263       /**
    264        * All the memory blocks are allocated, lets see if there is
    265        * any empty memory block around that can be freed up.
    266        */
    267       index = -1;
    268       for (i = 0; i < SVGA3D_QUERYTYPE_MAX && index == -1; i++) {
    269          struct svga_qmem_alloc_entry *alloc_entry;
    270          struct svga_qmem_alloc_entry *prev_alloc_entry = NULL;
    271 
    272          alloc_entry = svga->gb_query_map[i];
    273          while (alloc_entry && index == -1) {
    274             if (alloc_entry->nquery == 0) {
    275                /* This memory block is empty, it can be recycled. */
    276                if (prev_alloc_entry) {
    277                   prev_alloc_entry->next = alloc_entry->next;
    278                } else {
    279                   svga->gb_query_map[i] = alloc_entry->next;
    280                }
    281                index = alloc_entry->block_index;
    282             } else {
    283                prev_alloc_entry = alloc_entry;
    284                alloc_entry = alloc_entry->next;
    285             }
    286          }
    287       }
    288    }
    289 
    290    return index;
    291 }
    292 
    293 /**
    294  * Allocate a slot in the specified memory block.
    295  * All slots in this memory block are of the same size.
    296  *
    297  * \return -1 if out of memory, else index of the query slot
    298  */
    299 static int
    300 allocate_query_slot(struct svga_context *svga,
    301                     struct svga_qmem_alloc_entry *alloc)
    302 {
    303    int index;
    304    unsigned offset;
    305 
    306    /* Find the next available slot */
    307    index = util_bitmask_add(alloc->alloc_mask);
    308 
    309    if (index == UTIL_BITMASK_INVALID_INDEX)
    310       return -1;
    311 
    312    offset = index * alloc->query_size;
    313    if (offset >= SVGA_QUERY_MEM_BLOCK_SIZE)
    314       return -1;
    315 
    316    alloc->nquery++;
    317 
    318    return index;
    319 }
    320 
    321 /**
    322  * Deallocate the specified slot in the memory block.
    323  * If all slots are freed up, then deallocate the memory block
    324  * as well, so it can be allocated for other query type
    325  */
    326 static void
    327 deallocate_query_slot(struct svga_context *svga,
    328                       struct svga_qmem_alloc_entry *alloc,
    329                       unsigned index)
    330 {
    331    assert(index != UTIL_BITMASK_INVALID_INDEX);
    332 
    333    util_bitmask_clear(alloc->alloc_mask, index);
    334    alloc->nquery--;
    335 
    336    /**
    337     * Don't worry about deallocating the empty memory block here.
    338     * The empty memory block will be recycled when no more memory block
    339     * can be allocated.
    340     */
    341 }
    342 
    343 static struct svga_qmem_alloc_entry *
    344 allocate_query_block_entry(struct svga_context *svga,
    345                            unsigned len)
    346 {
    347    struct svga_qmem_alloc_entry *alloc_entry;
    348    int block_index = -1;
    349 
    350    block_index = allocate_query_block(svga);
    351    if (block_index == -1)
    352       return NULL;
    353    alloc_entry = CALLOC_STRUCT(svga_qmem_alloc_entry);
    354    if (!alloc_entry)
    355       return NULL;
    356 
    357    alloc_entry->block_index = block_index;
    358    alloc_entry->start_offset = block_index * SVGA_QUERY_MEM_BLOCK_SIZE;
    359    alloc_entry->nquery = 0;
    360    alloc_entry->alloc_mask = util_bitmask_create();
    361    alloc_entry->next = NULL;
    362    alloc_entry->query_size = len;
    363 
    364    return alloc_entry;
    365 }
    366 
    367 /**
    368  * Allocate a memory slot for a query of the specified type.
    369  * It will first search through the memory blocks that are allocated
    370  * for the query type. If no memory slot is available, it will try
    371  * to allocate another memory block within the query object memory for
    372  * this query type.
    373  */
    374 static int
    375 allocate_query(struct svga_context *svga,
    376                SVGA3dQueryType type,
    377                unsigned len)
    378 {
    379    struct svga_qmem_alloc_entry *alloc_entry;
    380    int slot_index = -1;
    381    unsigned offset;
    382 
    383    assert(type < SVGA3D_QUERYTYPE_MAX);
    384 
    385    alloc_entry = svga->gb_query_map[type];
    386 
    387    if (!alloc_entry) {
    388       /**
    389        * No query memory block has been allocated for this query type,
    390        * allocate one now
    391        */
    392       alloc_entry = allocate_query_block_entry(svga, len);
    393       if (!alloc_entry)
    394          return -1;
    395       svga->gb_query_map[type] = alloc_entry;
    396    }
    397 
    398    /* Allocate a slot within the memory block allocated for this query type */
    399    slot_index = allocate_query_slot(svga, alloc_entry);
    400 
    401    if (slot_index == -1) {
    402       /* This query memory block is full, allocate another one */
    403       alloc_entry = allocate_query_block_entry(svga, len);
    404       if (!alloc_entry)
    405          return -1;
    406       alloc_entry->next = svga->gb_query_map[type];
    407       svga->gb_query_map[type] = alloc_entry;
    408       slot_index = allocate_query_slot(svga, alloc_entry);
    409    }
    410 
    411    assert(slot_index != -1);
    412    offset = slot_index * len + alloc_entry->start_offset;
    413 
    414    return offset;
    415 }
    416 
    417 
    418 /**
    419  * Deallocate memory slot allocated for the specified query
    420  */
    421 static void
    422 deallocate_query(struct svga_context *svga,
    423                  struct svga_query *sq)
    424 {
    425    struct svga_qmem_alloc_entry *alloc_entry;
    426    unsigned slot_index;
    427    unsigned offset = sq->offset;
    428 
    429    alloc_entry = svga->gb_query_map[sq->svga_type];
    430 
    431    while (alloc_entry) {
    432       if (offset >= alloc_entry->start_offset &&
    433           offset < alloc_entry->start_offset + SVGA_QUERY_MEM_BLOCK_SIZE) {
    434 
    435          /* The slot belongs to this memory block, deallocate it */
    436          slot_index = (offset - alloc_entry->start_offset) /
    437                       alloc_entry->query_size;
    438          deallocate_query_slot(svga, alloc_entry, slot_index);
    439          alloc_entry = NULL;
    440       } else {
    441          alloc_entry = alloc_entry->next;
    442       }
    443    }
    444 }
    445 
    446 
    447 /**
    448  * Destroy the gb query object and all the related query structures
    449  */
    450 static void
    451 destroy_gb_query_obj(struct svga_context *svga)
    452 {
    453    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
    454    unsigned i;
    455 
    456    for (i = 0; i < SVGA3D_QUERYTYPE_MAX; i++) {
    457       struct svga_qmem_alloc_entry *alloc_entry, *next;
    458       alloc_entry = svga->gb_query_map[i];
    459       while (alloc_entry) {
    460          next = alloc_entry->next;
    461          util_bitmask_destroy(alloc_entry->alloc_mask);
    462          FREE(alloc_entry);
    463          alloc_entry = next;
    464       }
    465       svga->gb_query_map[i] = NULL;
    466    }
    467 
    468    if (svga->gb_query)
    469       sws->query_destroy(sws, svga->gb_query);
    470    svga->gb_query = NULL;
    471 
    472    util_bitmask_destroy(svga->gb_query_alloc_mask);
    473 }
    474 
    475 /**
    476  * Define query and create the gb query object if it is not already created.
    477  * There is only one gb query object per context which will be shared by
    478  * queries of all types.
    479  */
    480 static enum pipe_error
    481 define_query_vgpu10(struct svga_context *svga,
    482                     struct svga_query *sq, int resultLen)
    483 {
    484    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
    485    int qlen;
    486    enum pipe_error ret = PIPE_OK;
    487 
    488    SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
    489 
    490    if (svga->gb_query == NULL) {
    491       /* Create a gb query object */
    492       svga->gb_query = sws->query_create(sws, SVGA_QUERY_MEM_SIZE);
    493       if (!svga->gb_query)
    494          return PIPE_ERROR_OUT_OF_MEMORY;
    495       svga->gb_query_len = SVGA_QUERY_MEM_SIZE;
    496       memset (svga->gb_query_map, 0, sizeof(svga->gb_query_map));
    497       svga->gb_query_alloc_mask = util_bitmask_create();
    498 
    499       /* Bind the query object to the context */
    500       if (svga->swc->query_bind(svga->swc, svga->gb_query,
    501                                 SVGA_QUERY_FLAG_SET) != PIPE_OK) {
    502          svga_context_flush(svga, NULL);
    503          svga->swc->query_bind(svga->swc, svga->gb_query,
    504                                SVGA_QUERY_FLAG_SET);
    505       }
    506    }
    507 
    508    sq->gb_query = svga->gb_query;
    509 
    510    /* Allocate an integer ID for this query */
    511    sq->id = util_bitmask_add(svga->query_id_bm);
    512    if (sq->id == UTIL_BITMASK_INVALID_INDEX)
    513       return PIPE_ERROR_OUT_OF_MEMORY;
    514 
    515    /* Find a slot for this query in the gb object */
    516    qlen = resultLen + sizeof(SVGA3dQueryState);
    517    sq->offset = allocate_query(svga, sq->svga_type, qlen);
    518    if (sq->offset == -1)
    519       return PIPE_ERROR_OUT_OF_MEMORY;
    520 
    521    SVGA_DBG(DEBUG_QUERY, "   query type=%d qid=0x%x offset=%d\n",
    522             sq->svga_type, sq->id, sq->offset);
    523 
    524    /**
    525     * Send SVGA3D commands to define the query
    526     */
    527    ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
    528    if (ret != PIPE_OK) {
    529       svga_context_flush(svga, NULL);
    530       ret = SVGA3D_vgpu10_DefineQuery(svga->swc, sq->id, sq->svga_type, sq->flags);
    531    }
    532    if (ret != PIPE_OK)
    533       return PIPE_ERROR_OUT_OF_MEMORY;
    534 
    535    ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
    536    if (ret != PIPE_OK) {
    537       svga_context_flush(svga, NULL);
    538       ret = SVGA3D_vgpu10_BindQuery(svga->swc, sq->gb_query, sq->id);
    539    }
    540    assert(ret == PIPE_OK);
    541 
    542    ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
    543    if (ret != PIPE_OK) {
    544       svga_context_flush(svga, NULL);
    545       ret = SVGA3D_vgpu10_SetQueryOffset(svga->swc, sq->id, sq->offset);
    546    }
    547    assert(ret == PIPE_OK);
    548 
    549    return PIPE_OK;
    550 }
    551 
    552 static enum pipe_error
    553 destroy_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
    554 {
    555    enum pipe_error ret;
    556 
    557    ret = SVGA3D_vgpu10_DestroyQuery(svga->swc, sq->id);
    558 
    559    /* Deallocate the memory slot allocated for this query */
    560    deallocate_query(svga, sq);
    561 
    562    return ret;
    563 }
    564 
    565 
    566 /**
    567  * Rebind queryies to the context.
    568  */
    569 static void
    570 rebind_vgpu10_query(struct svga_context *svga)
    571 {
    572    if (svga->swc->query_bind(svga->swc, svga->gb_query,
    573                              SVGA_QUERY_FLAG_REF) != PIPE_OK) {
    574       svga_context_flush(svga, NULL);
    575       svga->swc->query_bind(svga->swc, svga->gb_query,
    576                             SVGA_QUERY_FLAG_REF);
    577    }
    578 
    579    svga->rebind.flags.query = FALSE;
    580 }
    581 
    582 
    583 static enum pipe_error
    584 begin_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
    585 {
    586    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
    587    enum pipe_error ret = PIPE_OK;
    588    int status = 0;
    589 
    590    sws->fence_reference(sws, &sq->fence, NULL);
    591 
    592    /* Initialize the query state to NEW */
    593    status = sws->query_init(sws, sq->gb_query, sq->offset, SVGA3D_QUERYSTATE_NEW);
    594    if (status)
    595       return PIPE_ERROR;
    596 
    597    if (svga->rebind.flags.query) {
    598       rebind_vgpu10_query(svga);
    599    }
    600 
    601    /* Send the BeginQuery command to the device */
    602    ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
    603    if (ret != PIPE_OK) {
    604       svga_context_flush(svga, NULL);
    605       ret = SVGA3D_vgpu10_BeginQuery(svga->swc, sq->id);
    606    }
    607    return ret;
    608 }
    609 
    610 static enum pipe_error
    611 end_query_vgpu10(struct svga_context *svga, struct svga_query *sq)
    612 {
    613    enum pipe_error ret = PIPE_OK;
    614 
    615    if (svga->rebind.flags.query) {
    616       rebind_vgpu10_query(svga);
    617    }
    618 
    619    ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
    620    if (ret != PIPE_OK) {
    621       svga_context_flush(svga, NULL);
    622       ret = SVGA3D_vgpu10_EndQuery(svga->swc, sq->id);
    623    }
    624 
    625    return ret;
    626 }
    627 
    628 static boolean
    629 get_query_result_vgpu10(struct svga_context *svga, struct svga_query *sq,
    630                         boolean wait, void *result, int resultLen)
    631 {
    632    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
    633    SVGA3dQueryState queryState;
    634 
    635    if (svga->rebind.flags.query) {
    636       rebind_vgpu10_query(svga);
    637    }
    638 
    639    sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
    640 
    641    if (queryState != SVGA3D_QUERYSTATE_SUCCEEDED && !sq->fence) {
    642       /* We don't have the query result yet, and the query hasn't been
    643        * submitted.  We need to submit it now since the GL spec says
    644        * "Querying the state for a given occlusion query forces that
    645        * occlusion query to complete within a finite amount of time."
    646        */
    647       svga_context_flush(svga, &sq->fence);
    648    }
    649 
    650    if (queryState == SVGA3D_QUERYSTATE_PENDING ||
    651        queryState == SVGA3D_QUERYSTATE_NEW) {
    652       if (!wait)
    653          return FALSE;
    654       sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
    655       sws->query_get_result(sws, sq->gb_query, sq->offset, &queryState, result, resultLen);
    656    }
    657 
    658    assert(queryState == SVGA3D_QUERYSTATE_SUCCEEDED ||
    659           queryState == SVGA3D_QUERYSTATE_FAILED);
    660 
    661    return TRUE;
    662 }
    663 
    664 static struct pipe_query *
    665 svga_create_query(struct pipe_context *pipe,
    666                   unsigned query_type,
    667                   unsigned index)
    668 {
    669    struct svga_context *svga = svga_context(pipe);
    670    struct svga_query *sq;
    671 
    672    assert(query_type < SVGA_QUERY_MAX);
    673 
    674    sq = CALLOC_STRUCT(svga_query);
    675    if (!sq)
    676       goto fail;
    677 
    678    /* Allocate an integer ID for the query */
    679    sq->id = util_bitmask_add(svga->query_id_bm);
    680    if (sq->id == UTIL_BITMASK_INVALID_INDEX)
    681       goto fail;
    682 
    683    SVGA_DBG(DEBUG_QUERY, "%s type=%d sq=0x%x id=%d\n", __FUNCTION__,
    684             query_type, sq, sq->id);
    685 
    686    switch (query_type) {
    687    case PIPE_QUERY_OCCLUSION_COUNTER:
    688       sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
    689       if (svga_have_vgpu10(svga)) {
    690          define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionQueryResult));
    691 
    692          /**
    693           * In OpenGL, occlusion counter query can be used in conditional
    694           * rendering; however, in DX10, only OCCLUSION_PREDICATE query can
    695           * be used for predication. Hence, we need to create an occlusion
    696           * predicate query along with the occlusion counter query. So when
    697           * the occlusion counter query is used for predication, the associated
    698           * query of occlusion predicate type will be used
    699           * in the SetPredication command.
    700           */
    701          sq->predicate = svga_create_query(pipe, PIPE_QUERY_OCCLUSION_PREDICATE, index);
    702 
    703       } else {
    704          define_query_vgpu9(svga, sq);
    705       }
    706       break;
    707    case PIPE_QUERY_OCCLUSION_PREDICATE:
    708       if (svga_have_vgpu10(svga)) {
    709          sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE;
    710          define_query_vgpu10(svga, sq, sizeof(SVGADXOcclusionPredicateQueryResult));
    711       } else {
    712          sq->svga_type = SVGA3D_QUERYTYPE_OCCLUSION;
    713          define_query_vgpu9(svga, sq);
    714       }
    715       break;
    716    case PIPE_QUERY_PRIMITIVES_GENERATED:
    717    case PIPE_QUERY_PRIMITIVES_EMITTED:
    718    case PIPE_QUERY_SO_STATISTICS:
    719       assert(svga_have_vgpu10(svga));
    720       sq->svga_type = SVGA3D_QUERYTYPE_STREAMOUTPUTSTATS;
    721       define_query_vgpu10(svga, sq,
    722                           sizeof(SVGADXStreamOutStatisticsQueryResult));
    723       break;
    724    case PIPE_QUERY_TIMESTAMP:
    725       assert(svga_have_vgpu10(svga));
    726       sq->svga_type = SVGA3D_QUERYTYPE_TIMESTAMP;
    727       define_query_vgpu10(svga, sq,
    728                           sizeof(SVGADXTimestampQueryResult));
    729       break;
    730    case SVGA_QUERY_NUM_DRAW_CALLS:
    731    case SVGA_QUERY_NUM_FALLBACKS:
    732    case SVGA_QUERY_NUM_FLUSHES:
    733    case SVGA_QUERY_NUM_VALIDATIONS:
    734    case SVGA_QUERY_NUM_BUFFERS_MAPPED:
    735    case SVGA_QUERY_NUM_TEXTURES_MAPPED:
    736    case SVGA_QUERY_NUM_BYTES_UPLOADED:
    737    case SVGA_QUERY_COMMAND_BUFFER_SIZE:
    738    case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
    739    case SVGA_QUERY_MEMORY_USED:
    740    case SVGA_QUERY_NUM_SHADERS:
    741    case SVGA_QUERY_NUM_RESOURCES:
    742    case SVGA_QUERY_NUM_STATE_OBJECTS:
    743    case SVGA_QUERY_NUM_SURFACE_VIEWS:
    744    case SVGA_QUERY_NUM_GENERATE_MIPMAP:
    745    case SVGA_QUERY_NUM_READBACKS:
    746    case SVGA_QUERY_NUM_RESOURCE_UPDATES:
    747    case SVGA_QUERY_NUM_BUFFER_UPLOADS:
    748    case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
    749    case SVGA_QUERY_NUM_CONST_UPDATES:
    750       break;
    751    case SVGA_QUERY_FLUSH_TIME:
    752    case SVGA_QUERY_MAP_BUFFER_TIME:
    753       /* These queries need os_time_get() */
    754       svga->hud.uses_time = TRUE;
    755       break;
    756    default:
    757       assert(!"unexpected query type in svga_create_query()");
    758    }
    759 
    760    sq->type = query_type;
    761 
    762    return &sq->base;
    763 
    764 fail:
    765    FREE(sq);
    766    return NULL;
    767 }
    768 
    769 static void
    770 svga_destroy_query(struct pipe_context *pipe, struct pipe_query *q)
    771 {
    772    struct svga_context *svga = svga_context(pipe);
    773    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
    774    struct svga_query *sq;
    775 
    776    if (!q) {
    777       destroy_gb_query_obj(svga);
    778       return;
    779    }
    780 
    781    sq = svga_query(q);
    782 
    783    SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
    784             sq, sq->id);
    785 
    786    switch (sq->type) {
    787    case PIPE_QUERY_OCCLUSION_COUNTER:
    788    case PIPE_QUERY_OCCLUSION_PREDICATE:
    789       if (svga_have_vgpu10(svga)) {
    790          /* make sure to also destroy any associated predicate query */
    791          if (sq->predicate)
    792             svga_destroy_query(pipe, sq->predicate);
    793          destroy_query_vgpu10(svga, sq);
    794       } else {
    795          sws->buffer_destroy(sws, sq->hwbuf);
    796       }
    797       sws->fence_reference(sws, &sq->fence, NULL);
    798       break;
    799    case PIPE_QUERY_PRIMITIVES_GENERATED:
    800    case PIPE_QUERY_PRIMITIVES_EMITTED:
    801    case PIPE_QUERY_SO_STATISTICS:
    802    case PIPE_QUERY_TIMESTAMP:
    803       assert(svga_have_vgpu10(svga));
    804       destroy_query_vgpu10(svga, sq);
    805       sws->fence_reference(sws, &sq->fence, NULL);
    806       break;
    807    case SVGA_QUERY_NUM_DRAW_CALLS:
    808    case SVGA_QUERY_NUM_FALLBACKS:
    809    case SVGA_QUERY_NUM_FLUSHES:
    810    case SVGA_QUERY_NUM_VALIDATIONS:
    811    case SVGA_QUERY_MAP_BUFFER_TIME:
    812    case SVGA_QUERY_NUM_BUFFERS_MAPPED:
    813    case SVGA_QUERY_NUM_TEXTURES_MAPPED:
    814    case SVGA_QUERY_NUM_BYTES_UPLOADED:
    815    case SVGA_QUERY_COMMAND_BUFFER_SIZE:
    816    case SVGA_QUERY_FLUSH_TIME:
    817    case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
    818    case SVGA_QUERY_MEMORY_USED:
    819    case SVGA_QUERY_NUM_SHADERS:
    820    case SVGA_QUERY_NUM_RESOURCES:
    821    case SVGA_QUERY_NUM_STATE_OBJECTS:
    822    case SVGA_QUERY_NUM_SURFACE_VIEWS:
    823    case SVGA_QUERY_NUM_GENERATE_MIPMAP:
    824    case SVGA_QUERY_NUM_READBACKS:
    825    case SVGA_QUERY_NUM_RESOURCE_UPDATES:
    826    case SVGA_QUERY_NUM_BUFFER_UPLOADS:
    827    case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
    828    case SVGA_QUERY_NUM_CONST_UPDATES:
    829       /* nothing */
    830       break;
    831    default:
    832       assert(!"svga: unexpected query type in svga_destroy_query()");
    833    }
    834 
    835    /* Free the query id */
    836    util_bitmask_clear(svga->query_id_bm, sq->id);
    837 
    838    FREE(sq);
    839 }
    840 
    841 
    842 static boolean
    843 svga_begin_query(struct pipe_context *pipe, struct pipe_query *q)
    844 {
    845    struct svga_context *svga = svga_context(pipe);
    846    struct svga_query *sq = svga_query(q);
    847    enum pipe_error ret;
    848 
    849    assert(sq);
    850    assert(sq->type < SVGA_QUERY_MAX);
    851 
    852    SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
    853             sq, sq->id);
    854 
    855    /* Need to flush out buffered drawing commands so that they don't
    856     * get counted in the query results.
    857     */
    858    svga_hwtnl_flush_retry(svga);
    859 
    860    switch (sq->type) {
    861    case PIPE_QUERY_OCCLUSION_COUNTER:
    862    case PIPE_QUERY_OCCLUSION_PREDICATE:
    863       if (svga_have_vgpu10(svga)) {
    864          ret = begin_query_vgpu10(svga, sq);
    865          /* also need to start the associated occlusion predicate query */
    866          if (sq->predicate) {
    867             enum pipe_error status;
    868             status = begin_query_vgpu10(svga, svga_query(sq->predicate));
    869             assert(status == PIPE_OK);
    870             (void) status;
    871          }
    872       } else {
    873          ret = begin_query_vgpu9(svga, sq);
    874       }
    875       assert(ret == PIPE_OK);
    876       (void) ret;
    877       break;
    878    case PIPE_QUERY_PRIMITIVES_GENERATED:
    879    case PIPE_QUERY_PRIMITIVES_EMITTED:
    880    case PIPE_QUERY_SO_STATISTICS:
    881    case PIPE_QUERY_TIMESTAMP:
    882       assert(svga_have_vgpu10(svga));
    883       ret = begin_query_vgpu10(svga, sq);
    884       assert(ret == PIPE_OK);
    885       break;
    886    case SVGA_QUERY_NUM_DRAW_CALLS:
    887       sq->begin_count = svga->hud.num_draw_calls;
    888       break;
    889    case SVGA_QUERY_NUM_FALLBACKS:
    890       sq->begin_count = svga->hud.num_fallbacks;
    891       break;
    892    case SVGA_QUERY_NUM_FLUSHES:
    893       sq->begin_count = svga->hud.num_flushes;
    894       break;
    895    case SVGA_QUERY_NUM_VALIDATIONS:
    896       sq->begin_count = svga->hud.num_validations;
    897       break;
    898    case SVGA_QUERY_MAP_BUFFER_TIME:
    899       sq->begin_count = svga->hud.map_buffer_time;
    900       break;
    901    case SVGA_QUERY_NUM_BUFFERS_MAPPED:
    902       sq->begin_count = svga->hud.num_buffers_mapped;
    903       break;
    904    case SVGA_QUERY_NUM_TEXTURES_MAPPED:
    905       sq->begin_count = svga->hud.num_textures_mapped;
    906       break;
    907    case SVGA_QUERY_NUM_BYTES_UPLOADED:
    908       sq->begin_count = svga->hud.num_bytes_uploaded;
    909       break;
    910    case SVGA_QUERY_COMMAND_BUFFER_SIZE:
    911       sq->begin_count = svga->hud.command_buffer_size;
    912       break;
    913    case SVGA_QUERY_FLUSH_TIME:
    914       sq->begin_count = svga->hud.flush_time;
    915       break;
    916    case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
    917       sq->begin_count = svga->hud.surface_write_flushes;
    918       break;
    919    case SVGA_QUERY_NUM_READBACKS:
    920       sq->begin_count = svga->hud.num_readbacks;
    921       break;
    922    case SVGA_QUERY_NUM_RESOURCE_UPDATES:
    923       sq->begin_count = svga->hud.num_resource_updates;
    924       break;
    925    case SVGA_QUERY_NUM_BUFFER_UPLOADS:
    926       sq->begin_count = svga->hud.num_buffer_uploads;
    927       break;
    928    case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
    929       sq->begin_count = svga->hud.num_const_buf_updates;
    930       break;
    931    case SVGA_QUERY_NUM_CONST_UPDATES:
    932       sq->begin_count = svga->hud.num_const_updates;
    933       break;
    934    case SVGA_QUERY_MEMORY_USED:
    935    case SVGA_QUERY_NUM_SHADERS:
    936    case SVGA_QUERY_NUM_RESOURCES:
    937    case SVGA_QUERY_NUM_STATE_OBJECTS:
    938    case SVGA_QUERY_NUM_SURFACE_VIEWS:
    939    case SVGA_QUERY_NUM_GENERATE_MIPMAP:
    940       /* nothing */
    941       break;
    942    default:
    943       assert(!"unexpected query type in svga_begin_query()");
    944    }
    945 
    946    svga->sq[sq->type] = sq;
    947 
    948    return true;
    949 }
    950 
    951 
    952 static bool
    953 svga_end_query(struct pipe_context *pipe, struct pipe_query *q)
    954 {
    955    struct svga_context *svga = svga_context(pipe);
    956    struct svga_query *sq = svga_query(q);
    957    enum pipe_error ret;
    958 
    959    assert(sq);
    960    assert(sq->type < SVGA_QUERY_MAX);
    961 
    962    SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d\n", __FUNCTION__,
    963             sq, sq->id);
    964 
    965    if (sq->type == PIPE_QUERY_TIMESTAMP && svga->sq[sq->type] != sq)
    966       svga_begin_query(pipe, q);
    967 
    968    svga_hwtnl_flush_retry(svga);
    969 
    970    assert(svga->sq[sq->type] == sq);
    971 
    972    switch (sq->type) {
    973    case PIPE_QUERY_OCCLUSION_COUNTER:
    974    case PIPE_QUERY_OCCLUSION_PREDICATE:
    975       if (svga_have_vgpu10(svga)) {
    976          ret = end_query_vgpu10(svga, sq);
    977          /* also need to end the associated occlusion predicate query */
    978          if (sq->predicate) {
    979             enum pipe_error status;
    980             status = end_query_vgpu10(svga, svga_query(sq->predicate));
    981             assert(status == PIPE_OK);
    982             (void) status;
    983          }
    984       } else {
    985          ret = end_query_vgpu9(svga, sq);
    986       }
    987       assert(ret == PIPE_OK);
    988       (void) ret;
    989       break;
    990    case PIPE_QUERY_PRIMITIVES_GENERATED:
    991    case PIPE_QUERY_PRIMITIVES_EMITTED:
    992    case PIPE_QUERY_SO_STATISTICS:
    993    case PIPE_QUERY_TIMESTAMP:
    994       assert(svga_have_vgpu10(svga));
    995       ret = end_query_vgpu10(svga, sq);
    996       assert(ret == PIPE_OK);
    997       break;
    998    case SVGA_QUERY_NUM_DRAW_CALLS:
    999       sq->end_count = svga->hud.num_draw_calls;
   1000       break;
   1001    case SVGA_QUERY_NUM_FALLBACKS:
   1002       sq->end_count = svga->hud.num_fallbacks;
   1003       break;
   1004    case SVGA_QUERY_NUM_FLUSHES:
   1005       sq->end_count = svga->hud.num_flushes;
   1006       break;
   1007    case SVGA_QUERY_NUM_VALIDATIONS:
   1008       sq->end_count = svga->hud.num_validations;
   1009       break;
   1010    case SVGA_QUERY_MAP_BUFFER_TIME:
   1011       sq->end_count = svga->hud.map_buffer_time;
   1012       break;
   1013    case SVGA_QUERY_NUM_BUFFERS_MAPPED:
   1014       sq->end_count = svga->hud.num_buffers_mapped;
   1015       break;
   1016    case SVGA_QUERY_NUM_TEXTURES_MAPPED:
   1017       sq->end_count = svga->hud.num_textures_mapped;
   1018       break;
   1019    case SVGA_QUERY_NUM_BYTES_UPLOADED:
   1020       sq->end_count = svga->hud.num_bytes_uploaded;
   1021       break;
   1022    case SVGA_QUERY_COMMAND_BUFFER_SIZE:
   1023       sq->end_count = svga->hud.command_buffer_size;
   1024       break;
   1025    case SVGA_QUERY_FLUSH_TIME:
   1026       sq->end_count = svga->hud.flush_time;
   1027       break;
   1028    case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
   1029       sq->end_count = svga->hud.surface_write_flushes;
   1030       break;
   1031    case SVGA_QUERY_NUM_READBACKS:
   1032       sq->end_count = svga->hud.num_readbacks;
   1033       break;
   1034    case SVGA_QUERY_NUM_RESOURCE_UPDATES:
   1035       sq->end_count = svga->hud.num_resource_updates;
   1036       break;
   1037    case SVGA_QUERY_NUM_BUFFER_UPLOADS:
   1038       sq->end_count = svga->hud.num_buffer_uploads;
   1039       break;
   1040    case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
   1041       sq->end_count = svga->hud.num_const_buf_updates;
   1042       break;
   1043    case SVGA_QUERY_NUM_CONST_UPDATES:
   1044       sq->end_count = svga->hud.num_const_updates;
   1045       break;
   1046    case SVGA_QUERY_MEMORY_USED:
   1047    case SVGA_QUERY_NUM_SHADERS:
   1048    case SVGA_QUERY_NUM_RESOURCES:
   1049    case SVGA_QUERY_NUM_STATE_OBJECTS:
   1050    case SVGA_QUERY_NUM_SURFACE_VIEWS:
   1051    case SVGA_QUERY_NUM_GENERATE_MIPMAP:
   1052       /* nothing */
   1053       break;
   1054    default:
   1055       assert(!"unexpected query type in svga_end_query()");
   1056    }
   1057    svga->sq[sq->type] = NULL;
   1058    return true;
   1059 }
   1060 
   1061 
   1062 static boolean
   1063 svga_get_query_result(struct pipe_context *pipe,
   1064                       struct pipe_query *q,
   1065                       boolean wait,
   1066                       union pipe_query_result *vresult)
   1067 {
   1068    struct svga_screen *svgascreen = svga_screen(pipe->screen);
   1069    struct svga_context *svga = svga_context(pipe);
   1070    struct svga_query *sq = svga_query(q);
   1071    uint64_t *result = (uint64_t *)vresult;
   1072    boolean ret = TRUE;
   1073 
   1074    assert(sq);
   1075 
   1076    SVGA_DBG(DEBUG_QUERY, "%s sq=0x%x id=%d wait: %d\n",
   1077             __FUNCTION__, sq, sq->id, wait);
   1078 
   1079    switch (sq->type) {
   1080    case PIPE_QUERY_OCCLUSION_COUNTER:
   1081       if (svga_have_vgpu10(svga)) {
   1082          SVGADXOcclusionQueryResult occResult;
   1083          ret = get_query_result_vgpu10(svga, sq, wait,
   1084                                        (void *)&occResult, sizeof(occResult));
   1085          *result = (uint64_t)occResult.samplesRendered;
   1086       } else {
   1087          ret = get_query_result_vgpu9(svga, sq, wait, result);
   1088       }
   1089       break;
   1090    case PIPE_QUERY_OCCLUSION_PREDICATE: {
   1091       if (svga_have_vgpu10(svga)) {
   1092          SVGADXOcclusionPredicateQueryResult occResult;
   1093          ret = get_query_result_vgpu10(svga, sq, wait,
   1094                                        (void *)&occResult, sizeof(occResult));
   1095          vresult->b = occResult.anySamplesRendered != 0;
   1096       } else {
   1097          uint64_t count = 0;
   1098          ret = get_query_result_vgpu9(svga, sq, wait, &count);
   1099          vresult->b = count != 0;
   1100       }
   1101       break;
   1102    }
   1103    case PIPE_QUERY_SO_STATISTICS: {
   1104       SVGADXStreamOutStatisticsQueryResult sResult;
   1105       struct pipe_query_data_so_statistics *pResult =
   1106          (struct pipe_query_data_so_statistics *)vresult;
   1107 
   1108       assert(svga_have_vgpu10(svga));
   1109       ret = get_query_result_vgpu10(svga, sq, wait,
   1110                                     (void *)&sResult, sizeof(sResult));
   1111       pResult->num_primitives_written = sResult.numPrimitivesWritten;
   1112       pResult->primitives_storage_needed = sResult.numPrimitivesRequired;
   1113       break;
   1114    }
   1115    case PIPE_QUERY_TIMESTAMP: {
   1116       SVGADXTimestampQueryResult sResult;
   1117 
   1118       assert(svga_have_vgpu10(svga));
   1119       ret = get_query_result_vgpu10(svga, sq, wait,
   1120                                     (void *)&sResult, sizeof(sResult));
   1121       *result = (uint64_t)sResult.timestamp;
   1122       break;
   1123    }
   1124    case PIPE_QUERY_PRIMITIVES_GENERATED: {
   1125       SVGADXStreamOutStatisticsQueryResult sResult;
   1126 
   1127       assert(svga_have_vgpu10(svga));
   1128       ret = get_query_result_vgpu10(svga, sq, wait,
   1129                                     (void *)&sResult, sizeof sResult);
   1130       *result = (uint64_t)sResult.numPrimitivesRequired;
   1131       break;
   1132    }
   1133    case PIPE_QUERY_PRIMITIVES_EMITTED: {
   1134       SVGADXStreamOutStatisticsQueryResult sResult;
   1135 
   1136       assert(svga_have_vgpu10(svga));
   1137       ret = get_query_result_vgpu10(svga, sq, wait,
   1138                                     (void *)&sResult, sizeof sResult);
   1139       *result = (uint64_t)sResult.numPrimitivesWritten;
   1140       break;
   1141    }
   1142    /* These are per-frame counters */
   1143    case SVGA_QUERY_NUM_DRAW_CALLS:
   1144    case SVGA_QUERY_NUM_FALLBACKS:
   1145    case SVGA_QUERY_NUM_FLUSHES:
   1146    case SVGA_QUERY_NUM_VALIDATIONS:
   1147    case SVGA_QUERY_MAP_BUFFER_TIME:
   1148    case SVGA_QUERY_NUM_BUFFERS_MAPPED:
   1149    case SVGA_QUERY_NUM_TEXTURES_MAPPED:
   1150    case SVGA_QUERY_NUM_BYTES_UPLOADED:
   1151    case SVGA_QUERY_COMMAND_BUFFER_SIZE:
   1152    case SVGA_QUERY_FLUSH_TIME:
   1153    case SVGA_QUERY_SURFACE_WRITE_FLUSHES:
   1154    case SVGA_QUERY_NUM_READBACKS:
   1155    case SVGA_QUERY_NUM_RESOURCE_UPDATES:
   1156    case SVGA_QUERY_NUM_BUFFER_UPLOADS:
   1157    case SVGA_QUERY_NUM_CONST_BUF_UPDATES:
   1158    case SVGA_QUERY_NUM_CONST_UPDATES:
   1159       vresult->u64 = sq->end_count - sq->begin_count;
   1160       break;
   1161    /* These are running total counters */
   1162    case SVGA_QUERY_MEMORY_USED:
   1163       vresult->u64 = svgascreen->hud.total_resource_bytes;
   1164       break;
   1165    case SVGA_QUERY_NUM_SHADERS:
   1166       vresult->u64 = svga->hud.num_shaders;
   1167       break;
   1168    case SVGA_QUERY_NUM_RESOURCES:
   1169       vresult->u64 = svgascreen->hud.num_resources;
   1170       break;
   1171    case SVGA_QUERY_NUM_STATE_OBJECTS:
   1172       vresult->u64 = (svga->hud.num_blend_objects +
   1173                       svga->hud.num_depthstencil_objects +
   1174                       svga->hud.num_rasterizer_objects +
   1175                       svga->hud.num_sampler_objects +
   1176                       svga->hud.num_samplerview_objects +
   1177                       svga->hud.num_vertexelement_objects);
   1178       break;
   1179    case SVGA_QUERY_NUM_SURFACE_VIEWS:
   1180       vresult->u64 = svga->hud.num_surface_views;
   1181       break;
   1182    case SVGA_QUERY_NUM_GENERATE_MIPMAP:
   1183       vresult->u64 = svga->hud.num_generate_mipmap;
   1184       break;
   1185    default:
   1186       assert(!"unexpected query type in svga_get_query_result");
   1187    }
   1188 
   1189    SVGA_DBG(DEBUG_QUERY, "%s result %d\n", __FUNCTION__, *((uint64_t *)vresult));
   1190 
   1191    return ret;
   1192 }
   1193 
   1194 static void
   1195 svga_render_condition(struct pipe_context *pipe, struct pipe_query *q,
   1196                       boolean condition, uint mode)
   1197 {
   1198    struct svga_context *svga = svga_context(pipe);
   1199    struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
   1200    struct svga_query *sq = svga_query(q);
   1201    SVGA3dQueryId queryId;
   1202    enum pipe_error ret;
   1203 
   1204    SVGA_DBG(DEBUG_QUERY, "%s\n", __FUNCTION__);
   1205 
   1206    assert(svga_have_vgpu10(svga));
   1207    if (sq == NULL) {
   1208       queryId = SVGA3D_INVALID_ID;
   1209    }
   1210    else {
   1211       assert(sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION ||
   1212              sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSIONPREDICATE);
   1213 
   1214       if (sq->svga_type == SVGA3D_QUERYTYPE_OCCLUSION) {
   1215          assert(sq->predicate);
   1216          /**
   1217           * For conditional rendering, make sure to use the associated
   1218           * predicate query.
   1219           */
   1220          sq = svga_query(sq->predicate);
   1221       }
   1222       queryId = sq->id;
   1223 
   1224       if ((mode == PIPE_RENDER_COND_WAIT ||
   1225            mode == PIPE_RENDER_COND_BY_REGION_WAIT) && sq->fence) {
   1226          sws->fence_finish(sws, sq->fence, SVGA_FENCE_FLAG_QUERY);
   1227       }
   1228    }
   1229    /*
   1230     * if the kernel module doesn't support the predication command,
   1231     * we'll just render unconditionally.
   1232     * This is probably acceptable for the typical case of occlusion culling.
   1233     */
   1234    if (sws->have_set_predication_cmd) {
   1235       ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
   1236                                          (uint32) condition);
   1237       if (ret != PIPE_OK) {
   1238          svga_context_flush(svga, NULL);
   1239          ret = SVGA3D_vgpu10_SetPredication(svga->swc, queryId,
   1240                                             (uint32) condition);
   1241       }
   1242       svga->pred.query_id = queryId;
   1243       svga->pred.cond = condition;
   1244    }
   1245 
   1246    svga->render_condition = (sq != NULL);
   1247 }
   1248 
   1249 
   1250 /*
   1251  * This function is a workaround because we lack the ability to query
   1252  * renderer's time synchornously.
   1253  */
   1254 static uint64_t
   1255 svga_get_timestamp(struct pipe_context *pipe)
   1256 {
   1257    struct pipe_query *q = svga_create_query(pipe, PIPE_QUERY_TIMESTAMP, 0);
   1258    union pipe_query_result result;
   1259 
   1260    svga_begin_query(pipe, q);
   1261    svga_end_query(pipe,q);
   1262    svga_get_query_result(pipe, q, TRUE, &result);
   1263    svga_destroy_query(pipe, q);
   1264 
   1265    return result.u64;
   1266 }
   1267 
   1268 
   1269 static void
   1270 svga_set_active_query_state(struct pipe_context *pipe, boolean enable)
   1271 {
   1272 }
   1273 
   1274 
   1275 void
   1276 svga_init_query_functions(struct svga_context *svga)
   1277 {
   1278    svga->pipe.create_query = svga_create_query;
   1279    svga->pipe.destroy_query = svga_destroy_query;
   1280    svga->pipe.begin_query = svga_begin_query;
   1281    svga->pipe.end_query = svga_end_query;
   1282    svga->pipe.get_query_result = svga_get_query_result;
   1283    svga->pipe.set_active_query_state = svga_set_active_query_state;
   1284    svga->pipe.render_condition = svga_render_condition;
   1285    svga->pipe.get_timestamp = svga_get_timestamp;
   1286 }
   1287