Home | History | Annotate | Download | only in svga
      1 /**********************************************************
      2  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
      3  *
      4  * Permission is hereby granted, free of charge, to any person
      5  * obtaining a copy of this software and associated documentation
      6  * files (the "Software"), to deal in the Software without
      7  * restriction, including without limitation the rights to use, copy,
      8  * modify, merge, publish, distribute, sublicense, and/or sell copies
      9  * of the Software, and to permit persons to whom the Software is
     10  * furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice shall be
     13  * included in all copies or substantial portions of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     22  * SOFTWARE.
     23  *
     24  **********************************************************/
     25 
     26 
     27 #include "os/os_thread.h"
     28 #include "pipe/p_state.h"
     29 #include "pipe/p_defines.h"
     30 #include "util/u_inlines.h"
     31 #include "util/u_math.h"
     32 #include "util/u_memory.h"
     33 
     34 #include "svga_cmd.h"
     35 #include "svga_context.h"
     36 #include "svga_debug.h"
     37 #include "svga_resource_buffer.h"
     38 #include "svga_resource_buffer_upload.h"
     39 #include "svga_screen.h"
     40 #include "svga_winsys.h"
     41 
     42 /**
     43  * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
     44  *
     45  */
     46 struct svga_3d_update_gb_image {
     47    SVGA3dCmdHeader header;
     48    SVGA3dCmdUpdateGBImage body;
     49 };
     50 
     51 struct svga_3d_invalidate_gb_image {
     52    SVGA3dCmdHeader header;
     53    SVGA3dCmdInvalidateGBImage body;
     54 };
     55 
     56 
     57 /**
     58  * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
     59  *
     60  * It will flush and retry in case the first attempt to create a DMA buffer
     61  * fails, so it should not be called from any function involved in flushing
     62  * to avoid recursion.
     63  */
     64 struct svga_winsys_buffer *
     65 svga_winsys_buffer_create( struct svga_context *svga,
     66                            unsigned alignment,
     67                            unsigned usage,
     68                            unsigned size )
     69 {
     70    struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
     71    struct svga_winsys_screen *sws = svgascreen->sws;
     72    struct svga_winsys_buffer *buf;
     73 
     74    /* Just try */
     75    buf = sws->buffer_create(sws, alignment, usage, size);
     76    if (!buf) {
     77       SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
     78                size);
     79 
     80       /* Try flushing all pending DMAs */
     81       svga_context_flush(svga, NULL);
     82       buf = sws->buffer_create(sws, alignment, usage, size);
     83    }
     84 
     85    return buf;
     86 }
     87 
     88 
     89 /**
     90  * Destroy HW storage if separate from the host surface.
     91  * In the GB case, the HW storage is associated with the host surface
     92  * and is therefore a No-op.
     93  */
     94 void
     95 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
     96 {
     97    struct svga_winsys_screen *sws = ss->sws;
     98 
     99    assert(sbuf->map.count == 0);
    100    assert(sbuf->hwbuf);
    101    if (sbuf->hwbuf) {
    102       sws->buffer_destroy(sws, sbuf->hwbuf);
    103       sbuf->hwbuf = NULL;
    104    }
    105 }
    106 
    107 
    108 
    109 /**
    110  * Allocate DMA'ble or Updatable storage for the buffer.
    111  *
    112  * Called before mapping a buffer.
    113  */
    114 enum pipe_error
    115 svga_buffer_create_hw_storage(struct svga_screen *ss,
    116                               struct svga_buffer *sbuf,
    117                               unsigned bind_flags)
    118 {
    119    assert(!sbuf->user);
    120 
    121    if (ss->sws->have_gb_objects) {
    122       assert(sbuf->handle || !sbuf->dma.pending);
    123       return svga_buffer_create_host_surface(ss, sbuf, bind_flags);
    124    }
    125    if (!sbuf->hwbuf) {
    126       struct svga_winsys_screen *sws = ss->sws;
    127       unsigned alignment = 16;
    128       unsigned usage = 0;
    129       unsigned size = sbuf->b.b.width0;
    130 
    131       sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
    132       if (!sbuf->hwbuf)
    133          return PIPE_ERROR_OUT_OF_MEMORY;
    134 
    135       assert(!sbuf->dma.pending);
    136    }
    137 
    138    return PIPE_OK;
    139 }
    140 
    141 
    142 /**
    143  * Allocate graphics memory for vertex/index/constant/etc buffer (not
    144  * textures).
    145  */
    146 enum pipe_error
    147 svga_buffer_create_host_surface(struct svga_screen *ss,
    148                                 struct svga_buffer *sbuf,
    149                                 unsigned bind_flags)
    150 {
    151    enum pipe_error ret = PIPE_OK;
    152 
    153    assert(!sbuf->user);
    154 
    155    if (!sbuf->handle) {
    156       boolean validated;
    157 
    158       sbuf->key.flags = 0;
    159 
    160       sbuf->key.format = SVGA3D_BUFFER;
    161       if (bind_flags & PIPE_BIND_VERTEX_BUFFER) {
    162          sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
    163          sbuf->key.flags |= SVGA3D_SURFACE_BIND_VERTEX_BUFFER;
    164       }
    165       if (bind_flags & PIPE_BIND_INDEX_BUFFER) {
    166          sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
    167          sbuf->key.flags |= SVGA3D_SURFACE_BIND_INDEX_BUFFER;
    168       }
    169       if (bind_flags & PIPE_BIND_CONSTANT_BUFFER)
    170          sbuf->key.flags |= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER;
    171 
    172       if (bind_flags & PIPE_BIND_STREAM_OUTPUT)
    173          sbuf->key.flags |= SVGA3D_SURFACE_BIND_STREAM_OUTPUT;
    174 
    175       if (bind_flags & PIPE_BIND_SAMPLER_VIEW)
    176          sbuf->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
    177 
    178       if (!bind_flags && sbuf->b.b.usage == PIPE_USAGE_STAGING) {
    179          /* This surface is to be used with the
    180           * SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
    181           * bind flags are allowed to be set for this surface.
    182           */
    183          sbuf->key.flags = SVGA3D_SURFACE_TRANSFER_FROM_BUFFER;
    184       }
    185 
    186       sbuf->key.size.width = sbuf->b.b.width0;
    187       sbuf->key.size.height = 1;
    188       sbuf->key.size.depth = 1;
    189 
    190       sbuf->key.numFaces = 1;
    191       sbuf->key.numMipLevels = 1;
    192       sbuf->key.cachable = 1;
    193       sbuf->key.arraySize = 1;
    194 
    195       SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n",
    196                sbuf->b.b.width0);
    197 
    198       sbuf->handle = svga_screen_surface_create(ss, bind_flags,
    199                                                 sbuf->b.b.usage,
    200                                                 &validated, &sbuf->key);
    201       if (!sbuf->handle)
    202          return PIPE_ERROR_OUT_OF_MEMORY;
    203 
    204       /* Always set the discard flag on the first time the buffer is written
    205        * as svga_screen_surface_create might have passed a recycled host
    206        * buffer.
    207        */
    208       sbuf->dma.flags.discard = TRUE;
    209 
    210       SVGA_DBG(DEBUG_DMA, "   --> got sid %p sz %d (buffer)\n",
    211                sbuf->handle, sbuf->b.b.width0);
    212 
    213       /* Add the new surface to the buffer surface list */
    214       ret = svga_buffer_add_host_surface(sbuf, sbuf->handle, &sbuf->key,
    215                                          bind_flags);
    216    }
    217 
    218    return ret;
    219 }
    220 
    221 
    222 /**
    223  * Recreates a host surface with the new bind flags.
    224  */
    225 enum pipe_error
    226 svga_buffer_recreate_host_surface(struct svga_context *svga,
    227                                   struct svga_buffer *sbuf,
    228                                   unsigned bind_flags)
    229 {
    230    enum pipe_error ret = PIPE_OK;
    231    struct svga_winsys_surface *old_handle = sbuf->handle;
    232 
    233    assert(sbuf->bind_flags != bind_flags);
    234    assert(old_handle);
    235 
    236    sbuf->handle = NULL;
    237 
    238    /* Create a new resource with the requested bind_flags */
    239    ret = svga_buffer_create_host_surface(svga_screen(svga->pipe.screen),
    240                                          sbuf, bind_flags);
    241    if (ret == PIPE_OK) {
    242       /* Copy the surface data */
    243       assert(sbuf->handle);
    244       ret = SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle, sbuf->handle,
    245                                      0, 0, sbuf->b.b.width0);
    246       if (ret != PIPE_OK) {
    247          svga_context_flush(svga, NULL);
    248          ret = SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle, sbuf->handle,
    249                                         0, 0, sbuf->b.b.width0);
    250          assert(ret == PIPE_OK);
    251       }
    252    }
    253 
    254    /* Set the new bind flags for this buffer resource */
    255    sbuf->bind_flags = bind_flags;
    256 
    257    return ret;
    258 }
    259 
    260 
    261 /**
    262  * Returns TRUE if the surface bind flags is compatible with the new bind flags.
    263  */
    264 static boolean
    265 compatible_bind_flags(unsigned bind_flags,
    266                       unsigned tobind_flags)
    267 {
    268    if ((bind_flags & tobind_flags) == tobind_flags)
    269       return TRUE;
    270    else if ((bind_flags|tobind_flags) & PIPE_BIND_CONSTANT_BUFFER)
    271       return FALSE;
    272    else
    273       return TRUE;
    274 }
    275 
    276 
    277 /**
    278  * Returns a buffer surface from the surface list
    279  * that has the requested bind flags or its existing bind flags
    280  * can be promoted to include the new bind flags.
    281  */
    282 static struct svga_buffer_surface *
    283 svga_buffer_get_host_surface(struct svga_buffer *sbuf,
    284                              unsigned bind_flags)
    285 {
    286    struct svga_buffer_surface *bufsurf;
    287 
    288    LIST_FOR_EACH_ENTRY(bufsurf, &sbuf->surfaces, list) {
    289       if (compatible_bind_flags(bufsurf->bind_flags, bind_flags))
    290          return bufsurf;
    291    }
    292    return NULL;
    293 }
    294 
    295 
    296 /**
    297  * Adds the host surface to the buffer surface list.
    298  */
    299 enum pipe_error
    300 svga_buffer_add_host_surface(struct svga_buffer *sbuf,
    301                              struct svga_winsys_surface *handle,
    302                              struct svga_host_surface_cache_key *key,
    303                              unsigned bind_flags)
    304 {
    305    struct svga_buffer_surface *bufsurf;
    306 
    307    bufsurf = CALLOC_STRUCT(svga_buffer_surface);
    308    if (!bufsurf)
    309       return PIPE_ERROR_OUT_OF_MEMORY;
    310 
    311    bufsurf->bind_flags = bind_flags;
    312    bufsurf->handle = handle;
    313    bufsurf->key = *key;
    314 
    315    /* add the surface to the surface list */
    316    LIST_ADD(&bufsurf->list, &sbuf->surfaces);
    317 
    318    return PIPE_OK;
    319 }
    320 
    321 
    322 /**
    323  * Start using the specified surface for this buffer resource.
    324  */
    325 void
    326 svga_buffer_bind_host_surface(struct svga_context *svga,
    327                               struct svga_buffer *sbuf,
    328                               struct svga_buffer_surface *bufsurf)
    329 {
    330    enum pipe_error ret;
    331 
    332    /* Update the to-bind surface */
    333    assert(bufsurf->handle);
    334    assert(sbuf->handle);
    335 
    336    /* If we are switching from stream output to other buffer,
    337     * make sure to copy the buffer content.
    338     */
    339    if (sbuf->bind_flags & PIPE_BIND_STREAM_OUTPUT) {
    340       ret = SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle, bufsurf->handle,
    341                                      0, 0, sbuf->b.b.width0);
    342       if (ret != PIPE_OK) {
    343          svga_context_flush(svga, NULL);
    344          ret = SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle, bufsurf->handle,
    345                                         0, 0, sbuf->b.b.width0);
    346          assert(ret == PIPE_OK);
    347       }
    348    }
    349 
    350    /* Set this surface as the current one */
    351    sbuf->handle = bufsurf->handle;
    352    sbuf->key = bufsurf->key;
    353    sbuf->bind_flags = bufsurf->bind_flags;
    354 }
    355 
    356 
    357 /**
    358  * Prepare a host surface that can be used as indicated in the
    359  * tobind_flags. If the existing host surface is not created
    360  * with the necessary binding flags and if the new bind flags can be
    361  * combined with the existing bind flags, then we will recreate a
    362  * new surface with the combined bind flags. Otherwise, we will create
    363  * a surface for that incompatible bind flags.
    364  * For example, if a stream output buffer is reused as a constant buffer,
    365  * since constant buffer surface cannot be bound as a stream output surface,
    366  * two surfaces will be created, one for stream output,
    367  * and another one for constant buffer.
    368  */
    369 enum pipe_error
    370 svga_buffer_validate_host_surface(struct svga_context *svga,
    371                                   struct svga_buffer *sbuf,
    372                                   unsigned tobind_flags)
    373 {
    374    struct svga_buffer_surface *bufsurf;
    375    enum pipe_error ret = PIPE_OK;
    376 
    377    /* Flush any pending upload first */
    378    svga_buffer_upload_flush(svga, sbuf);
    379 
    380    /* First check from the cached buffer surface list to see if there is
    381     * already a buffer surface that has the requested bind flags, or
    382     * surface with compatible bind flags that can be promoted.
    383     */
    384    bufsurf = svga_buffer_get_host_surface(sbuf, tobind_flags);
    385 
    386    if (bufsurf) {
    387       if ((bufsurf->bind_flags & tobind_flags) == tobind_flags) {
    388          /* there is a surface with the requested bind flags */
    389          svga_buffer_bind_host_surface(svga, sbuf, bufsurf);
    390       } else {
    391 
    392          /* Recreate a host surface with the combined bind flags */
    393          ret = svga_buffer_recreate_host_surface(svga, sbuf,
    394                                                  bufsurf->bind_flags |
    395                                                  tobind_flags);
    396 
    397          /* Destroy the old surface */
    398          svga_screen_surface_destroy(svga_screen(sbuf->b.b.screen),
    399                                      &bufsurf->key, &bufsurf->handle);
    400 
    401          LIST_DEL(&bufsurf->list);
    402          FREE(bufsurf);
    403       }
    404    } else {
    405       /* Need to create a new surface if the bind flags are incompatible,
    406        * such as constant buffer surface & stream output surface.
    407        */
    408       ret = svga_buffer_recreate_host_surface(svga, sbuf,
    409                                               tobind_flags);
    410    }
    411    return ret;
    412 }
    413 
    414 
    415 void
    416 svga_buffer_destroy_host_surface(struct svga_screen *ss,
    417                                  struct svga_buffer *sbuf)
    418 {
    419    struct svga_buffer_surface *bufsurf, *next;
    420 
    421    LIST_FOR_EACH_ENTRY_SAFE(bufsurf, next, &sbuf->surfaces, list) {
    422       SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n",
    423                bufsurf->handle, sbuf->b.b.width0);
    424       svga_screen_surface_destroy(ss, &bufsurf->key, &bufsurf->handle);
    425       FREE(bufsurf);
    426    }
    427 }
    428 
    429 
    430 /**
    431  * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
    432  * command buffer, equal to the current number of mapped ranges.
    433  * The UPDATE_GB_IMAGE commands will be patched with the
    434  * actual ranges just before flush.
    435  */
    436 static enum pipe_error
    437 svga_buffer_upload_gb_command(struct svga_context *svga,
    438                               struct svga_buffer *sbuf)
    439 {
    440    struct svga_winsys_context *swc = svga->swc;
    441    SVGA3dCmdUpdateGBImage *update_cmd;
    442    struct svga_3d_update_gb_image *whole_update_cmd = NULL;
    443    const uint32 numBoxes = sbuf->map.num_ranges;
    444    struct pipe_resource *dummy;
    445    unsigned i;
    446 
    447    assert(svga_have_gb_objects(svga));
    448    assert(numBoxes);
    449    assert(sbuf->dma.updates == NULL);
    450 
    451    if (sbuf->dma.flags.discard) {
    452       struct svga_3d_invalidate_gb_image *cicmd = NULL;
    453       SVGA3dCmdInvalidateGBImage *invalidate_cmd;
    454       const unsigned total_commands_size =
    455          sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd);
    456 
    457       /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
    458        * 'numBoxes' UPDATE_GB_IMAGE commands.  Allocate all at once rather
    459        * than with separate commands because we need to properly deal with
    460        * filling the command buffer.
    461        */
    462       invalidate_cmd = SVGA3D_FIFOReserve(swc,
    463                                           SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
    464                                           total_commands_size, 1 + numBoxes);
    465       if (!invalidate_cmd)
    466          return PIPE_ERROR_OUT_OF_MEMORY;
    467 
    468       cicmd = container_of(invalidate_cmd, cicmd, body);
    469       cicmd->header.size = sizeof(*invalidate_cmd);
    470       swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL,
    471                               sbuf->handle,
    472                               (SVGA_RELOC_WRITE |
    473                                SVGA_RELOC_INTERNAL |
    474                                SVGA_RELOC_DMA));
    475       invalidate_cmd->image.face = 0;
    476       invalidate_cmd->image.mipmap = 0;
    477 
    478       /* The whole_update_command is a SVGA3dCmdHeader plus the
    479        * SVGA3dCmdUpdateGBImage command.
    480        */
    481       whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1];
    482       /* initialize the first UPDATE_GB_IMAGE command */
    483       whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
    484       update_cmd = &whole_update_cmd->body;
    485 
    486    } else {
    487       /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
    488       const unsigned total_commands_size =
    489          sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd);
    490 
    491       update_cmd = SVGA3D_FIFOReserve(swc,
    492                                       SVGA_3D_CMD_UPDATE_GB_IMAGE,
    493                                       total_commands_size, numBoxes);
    494       if (!update_cmd)
    495          return PIPE_ERROR_OUT_OF_MEMORY;
    496 
    497       /* The whole_update_command is a SVGA3dCmdHeader plus the
    498        * SVGA3dCmdUpdateGBImage command.
    499        */
    500       whole_update_cmd = container_of(update_cmd, whole_update_cmd, body);
    501    }
    502 
    503    /* Init the first UPDATE_GB_IMAGE command */
    504    whole_update_cmd->header.size = sizeof(*update_cmd);
    505    swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle,
    506                            SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
    507    update_cmd->image.face = 0;
    508    update_cmd->image.mipmap = 0;
    509 
    510    /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
    511     * fill in the box info below.
    512     */
    513    sbuf->dma.updates = whole_update_cmd;
    514 
    515    /*
    516     * Copy the face, mipmap, etc. info to all subsequent commands.
    517     * Also do the surface relocation for each subsequent command.
    518     */
    519    for (i = 1; i < numBoxes; ++i) {
    520       whole_update_cmd++;
    521       memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd));
    522 
    523       swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL,
    524                               sbuf->handle,
    525                               SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
    526    }
    527 
    528    /* Increment reference count */
    529    sbuf->dma.svga = svga;
    530    dummy = NULL;
    531    pipe_resource_reference(&dummy, &sbuf->b.b);
    532    SVGA_FIFOCommitAll(swc);
    533 
    534    swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
    535    sbuf->dma.flags.discard = FALSE;
    536 
    537    svga->hud.num_resource_updates++;
    538 
    539    return PIPE_OK;
    540 }
    541 
    542 
    543 /**
    544  * Issue DMA commands to transfer guest memory to the host.
    545  * Note that the memory segments (offset, size) will be patched in
    546  * later in the svga_buffer_upload_flush() function.
    547  */
    548 static enum pipe_error
    549 svga_buffer_upload_hb_command(struct svga_context *svga,
    550                               struct svga_buffer *sbuf)
    551 {
    552    struct svga_winsys_context *swc = svga->swc;
    553    struct svga_winsys_buffer *guest = sbuf->hwbuf;
    554    struct svga_winsys_surface *host = sbuf->handle;
    555    const SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
    556    SVGA3dCmdSurfaceDMA *cmd;
    557    const uint32 numBoxes = sbuf->map.num_ranges;
    558    SVGA3dCopyBox *boxes;
    559    SVGA3dCmdSurfaceDMASuffix *pSuffix;
    560    unsigned region_flags;
    561    unsigned surface_flags;
    562    struct pipe_resource *dummy;
    563 
    564    assert(!svga_have_gb_objects(svga));
    565 
    566    if (transfer == SVGA3D_WRITE_HOST_VRAM) {
    567       region_flags = SVGA_RELOC_READ;
    568       surface_flags = SVGA_RELOC_WRITE;
    569    }
    570    else if (transfer == SVGA3D_READ_HOST_VRAM) {
    571       region_flags = SVGA_RELOC_WRITE;
    572       surface_flags = SVGA_RELOC_READ;
    573    }
    574    else {
    575       assert(0);
    576       return PIPE_ERROR_BAD_INPUT;
    577    }
    578 
    579    assert(numBoxes);
    580 
    581    cmd = SVGA3D_FIFOReserve(swc,
    582                             SVGA_3D_CMD_SURFACE_DMA,
    583                             sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
    584                             2);
    585    if (!cmd)
    586       return PIPE_ERROR_OUT_OF_MEMORY;
    587 
    588    swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
    589    cmd->guest.pitch = 0;
    590 
    591    swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags);
    592    cmd->host.face = 0;
    593    cmd->host.mipmap = 0;
    594 
    595    cmd->transfer = transfer;
    596 
    597    sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
    598    sbuf->dma.svga = svga;
    599 
    600    /* Increment reference count */
    601    dummy = NULL;
    602    pipe_resource_reference(&dummy, &sbuf->b.b);
    603 
    604    pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
    605    pSuffix->suffixSize = sizeof *pSuffix;
    606    pSuffix->maximumOffset = sbuf->b.b.width0;
    607    pSuffix->flags = sbuf->dma.flags;
    608 
    609    SVGA_FIFOCommitAll(swc);
    610 
    611    swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
    612    sbuf->dma.flags.discard = FALSE;
    613 
    614    svga->hud.num_buffer_uploads++;
    615 
    616    return PIPE_OK;
    617 }
    618 
    619 
    620 /**
    621  * Issue commands to transfer guest memory to the host.
    622  */
    623 static enum pipe_error
    624 svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf)
    625 {
    626    if (svga_have_gb_objects(svga)) {
    627       return svga_buffer_upload_gb_command(svga, sbuf);
    628    } else {
    629       return svga_buffer_upload_hb_command(svga, sbuf);
    630    }
    631 }
    632 
    633 
    634 /**
    635  * Patch up the upload DMA command reserved by svga_buffer_upload_command
    636  * with the final ranges.
    637  */
    638 void
    639 svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
    640 {
    641    unsigned i;
    642    struct pipe_resource *dummy;
    643 
    644    if (!sbuf->dma.pending) {
    645       //debug_printf("no dma pending on buffer\n");
    646       return;
    647    }
    648 
    649    assert(sbuf->handle);
    650    assert(sbuf->map.num_ranges);
    651    assert(sbuf->dma.svga == svga);
    652 
    653    /*
    654     * Patch the DMA/update command with the final copy box.
    655     */
    656    if (svga_have_gb_objects(svga)) {
    657       struct svga_3d_update_gb_image *update = sbuf->dma.updates;
    658       assert(update);
    659 
    660       for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
    661          SVGA3dBox *box = &update->body.box;
    662 
    663          SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
    664                   sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
    665 
    666          box->x = sbuf->map.ranges[i].start;
    667          box->y = 0;
    668          box->z = 0;
    669          box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
    670          box->h = 1;
    671          box->d = 1;
    672 
    673          assert(box->x <= sbuf->b.b.width0);
    674          assert(box->x + box->w <= sbuf->b.b.width0);
    675 
    676          svga->hud.num_bytes_uploaded += box->w;
    677          svga->hud.num_buffer_uploads++;
    678       }
    679    }
    680    else {
    681       assert(sbuf->hwbuf);
    682       assert(sbuf->dma.boxes);
    683       SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
    684 
    685       for (i = 0; i < sbuf->map.num_ranges; ++i) {
    686          SVGA3dCopyBox *box = sbuf->dma.boxes + i;
    687 
    688          SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
    689                sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
    690 
    691          box->x = sbuf->map.ranges[i].start;
    692          box->y = 0;
    693          box->z = 0;
    694          box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
    695          box->h = 1;
    696          box->d = 1;
    697          box->srcx = sbuf->map.ranges[i].start;
    698          box->srcy = 0;
    699          box->srcz = 0;
    700 
    701          assert(box->x <= sbuf->b.b.width0);
    702          assert(box->x + box->w <= sbuf->b.b.width0);
    703 
    704          svga->hud.num_bytes_uploaded += box->w;
    705          svga->hud.num_buffer_uploads++;
    706       }
    707    }
    708 
    709    /* Reset sbuf for next use/upload */
    710 
    711    sbuf->map.num_ranges = 0;
    712 
    713    assert(sbuf->head.prev && sbuf->head.next);
    714    LIST_DEL(&sbuf->head);  /* remove from svga->dirty_buffers list */
    715 #ifdef DEBUG
    716    sbuf->head.next = sbuf->head.prev = NULL;
    717 #endif
    718    sbuf->dma.pending = FALSE;
    719    sbuf->dma.flags.discard = FALSE;
    720    sbuf->dma.flags.unsynchronized = FALSE;
    721 
    722    sbuf->dma.svga = NULL;
    723    sbuf->dma.boxes = NULL;
    724    sbuf->dma.updates = NULL;
    725 
    726    /* Decrement reference count (and potentially destroy) */
    727    dummy = &sbuf->b.b;
    728    pipe_resource_reference(&dummy, NULL);
    729 }
    730 
    731 
    732 /**
    733  * Note a dirty range.
    734  *
    735  * This function only notes the range down. It doesn't actually emit a DMA
    736  * upload command. That only happens when a context tries to refer to this
    737  * buffer, and the DMA upload command is added to that context's command
    738  * buffer.
    739  *
    740  * We try to lump as many contiguous DMA transfers together as possible.
    741  */
    742 void
    743 svga_buffer_add_range(struct svga_buffer *sbuf, unsigned start, unsigned end)
    744 {
    745    unsigned i;
    746    unsigned nearest_range;
    747    unsigned nearest_dist;
    748 
    749    assert(end > start);
    750 
    751    if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
    752       nearest_range = sbuf->map.num_ranges;
    753       nearest_dist = ~0;
    754    } else {
    755       nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
    756       nearest_dist = 0;
    757    }
    758 
    759    /*
    760     * Try to grow one of the ranges.
    761     */
    762    for (i = 0; i < sbuf->map.num_ranges; ++i) {
    763       const int left_dist = start - sbuf->map.ranges[i].end;
    764       const int right_dist = sbuf->map.ranges[i].start - end;
    765       const int dist = MAX2(left_dist, right_dist);
    766 
    767       if (dist <= 0) {
    768          /*
    769           * Ranges are contiguous or overlapping -- extend this one and return.
    770           *
    771           * Note that it is not this function's task to prevent overlapping
    772           * ranges, as the GMR was already given so it is too late to do
    773           * anything.  If the ranges overlap here it must surely be because
    774           * PIPE_TRANSFER_UNSYNCHRONIZED was set.
    775           */
    776          sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
    777          sbuf->map.ranges[i].end   = MAX2(sbuf->map.ranges[i].end,   end);
    778          return;
    779       }
    780       else {
    781          /*
    782           * Discontiguous ranges -- keep track of the nearest range.
    783           */
    784          if (dist < nearest_dist) {
    785             nearest_range = i;
    786             nearest_dist = dist;
    787          }
    788       }
    789    }
    790 
    791    /*
    792     * We cannot add a new range to an existing DMA command, so patch-up the
    793     * pending DMA upload and start clean.
    794     */
    795 
    796    svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
    797 
    798    assert(!sbuf->dma.pending);
    799    assert(!sbuf->dma.svga);
    800    assert(!sbuf->dma.boxes);
    801 
    802    if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
    803       /*
    804        * Add a new range.
    805        */
    806 
    807       sbuf->map.ranges[sbuf->map.num_ranges].start = start;
    808       sbuf->map.ranges[sbuf->map.num_ranges].end = end;
    809       ++sbuf->map.num_ranges;
    810    } else {
    811       /*
    812        * Everything else failed, so just extend the nearest range.
    813        *
    814        * It is OK to do this because we always keep a local copy of the
    815        * host buffer data, for SW TNL, and the host never modifies the buffer.
    816        */
    817 
    818       assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
    819       assert(nearest_range < sbuf->map.num_ranges);
    820       sbuf->map.ranges[nearest_range].start =
    821          MIN2(sbuf->map.ranges[nearest_range].start, start);
    822       sbuf->map.ranges[nearest_range].end =
    823          MAX2(sbuf->map.ranges[nearest_range].end, end);
    824    }
    825 }
    826 
    827 
    828 
    829 /**
    830  * Copy the contents of the malloc buffer to a hardware buffer.
    831  */
    832 static enum pipe_error
    833 svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf,
    834                       unsigned bind_flags)
    835 {
    836    assert(!sbuf->user);
    837    if (!svga_buffer_has_hw_storage(sbuf)) {
    838       struct svga_screen *ss = svga_screen(sbuf->b.b.screen);
    839       enum pipe_error ret;
    840       boolean retry;
    841       void *map;
    842       unsigned i;
    843 
    844       assert(sbuf->swbuf);
    845       if (!sbuf->swbuf)
    846          return PIPE_ERROR;
    847 
    848       ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen), sbuf,
    849                                           bind_flags);
    850       if (ret != PIPE_OK)
    851          return ret;
    852 
    853       mtx_lock(&ss->swc_mutex);
    854       map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
    855       assert(map);
    856       assert(!retry);
    857       if (!map) {
    858          mtx_unlock(&ss->swc_mutex);
    859          svga_buffer_destroy_hw_storage(ss, sbuf);
    860          return PIPE_ERROR;
    861       }
    862 
    863       /* Copy data from malloc'd swbuf to the new hardware buffer */
    864       for (i = 0; i < sbuf->map.num_ranges; i++) {
    865          unsigned start = sbuf->map.ranges[i].start;
    866          unsigned len = sbuf->map.ranges[i].end - start;
    867          memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
    868       }
    869 
    870       svga_buffer_hw_storage_unmap(svga, sbuf);
    871 
    872       /* This user/malloc buffer is now indistinguishable from a gpu buffer */
    873       assert(sbuf->map.count == 0);
    874       if (sbuf->map.count == 0) {
    875          if (sbuf->user)
    876             sbuf->user = FALSE;
    877          else
    878             align_free(sbuf->swbuf);
    879          sbuf->swbuf = NULL;
    880       }
    881 
    882       mtx_unlock(&ss->swc_mutex);
    883    }
    884 
    885    return PIPE_OK;
    886 }
    887 
    888 
    889 /**
    890  * Upload the buffer to the host in a piecewise fashion.
    891  *
    892  * Used when the buffer is too big to fit in the GMR aperture.
    893  * This function should never get called in the guest-backed case
    894  * since we always have a full-sized hardware storage backing the
    895  * host surface.
    896  */
    897 static enum pipe_error
    898 svga_buffer_upload_piecewise(struct svga_screen *ss,
    899                              struct svga_context *svga,
    900                              struct svga_buffer *sbuf)
    901 {
    902    struct svga_winsys_screen *sws = ss->sws;
    903    const unsigned alignment = sizeof(void *);
    904    const unsigned usage = 0;
    905    unsigned i;
    906 
    907    assert(sbuf->map.num_ranges);
    908    assert(!sbuf->dma.pending);
    909    assert(!svga_have_gb_objects(svga));
    910 
    911    SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
    912 
    913    for (i = 0; i < sbuf->map.num_ranges; ++i) {
    914       const struct svga_buffer_range *range = &sbuf->map.ranges[i];
    915       unsigned offset = range->start;
    916       unsigned size = range->end - range->start;
    917 
    918       while (offset < range->end) {
    919          struct svga_winsys_buffer *hwbuf;
    920          uint8_t *map;
    921          enum pipe_error ret;
    922 
    923          if (offset + size > range->end)
    924             size = range->end - offset;
    925 
    926          hwbuf = sws->buffer_create(sws, alignment, usage, size);
    927          while (!hwbuf) {
    928             size /= 2;
    929             if (!size)
    930                return PIPE_ERROR_OUT_OF_MEMORY;
    931             hwbuf = sws->buffer_create(sws, alignment, usage, size);
    932          }
    933 
    934          SVGA_DBG(DEBUG_DMA, "  bytes %u - %u\n",
    935                   offset, offset + size);
    936 
    937          map = sws->buffer_map(sws, hwbuf,
    938                                PIPE_TRANSFER_WRITE |
    939                                PIPE_TRANSFER_DISCARD_RANGE);
    940          assert(map);
    941          if (map) {
    942             memcpy(map, (const char *) sbuf->swbuf + offset, size);
    943             sws->buffer_unmap(sws, hwbuf);
    944          }
    945 
    946          ret = SVGA3D_BufferDMA(svga->swc,
    947                                 hwbuf, sbuf->handle,
    948                                 SVGA3D_WRITE_HOST_VRAM,
    949                                 size, 0, offset, sbuf->dma.flags);
    950          if (ret != PIPE_OK) {
    951             svga_context_flush(svga, NULL);
    952             ret =  SVGA3D_BufferDMA(svga->swc,
    953                                     hwbuf, sbuf->handle,
    954                                     SVGA3D_WRITE_HOST_VRAM,
    955                                     size, 0, offset, sbuf->dma.flags);
    956             assert(ret == PIPE_OK);
    957          }
    958 
    959          sbuf->dma.flags.discard = FALSE;
    960 
    961          sws->buffer_destroy(sws, hwbuf);
    962 
    963          offset += size;
    964       }
    965    }
    966 
    967    sbuf->map.num_ranges = 0;
    968 
    969    return PIPE_OK;
    970 }
    971 
    972 
    973 /**
    974  * Get (or create/upload) the winsys surface handle so that we can
    975  * refer to this buffer in fifo commands.
    976  * This function will create the host surface, and in the GB case also the
    977  * hardware storage. In the non-GB case, the hardware storage will be created
    978  * if there are mapped ranges and the data is currently in a malloc'ed buffer.
    979  */
    980 struct svga_winsys_surface *
    981 svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf,
    982                    unsigned tobind_flags)
    983 {
    984    struct pipe_screen *screen = svga->pipe.screen;
    985    struct svga_screen *ss = svga_screen(screen);
    986    struct svga_buffer *sbuf;
    987    enum pipe_error ret;
    988 
    989    if (!buf)
    990       return NULL;
    991 
    992    sbuf = svga_buffer(buf);
    993 
    994    assert(!sbuf->user);
    995 
    996    if (sbuf->handle) {
    997       if ((sbuf->bind_flags & tobind_flags) != tobind_flags) {
    998          /* If the allocated resource's bind flags do not include the
    999           * requested bind flags, validate the host surface.
   1000           */
   1001          ret = svga_buffer_validate_host_surface(svga, sbuf, tobind_flags);
   1002          if (ret != PIPE_OK)
   1003             return NULL;
   1004       }
   1005    } else {
   1006       if (!sbuf->bind_flags) {
   1007          sbuf->bind_flags = tobind_flags;
   1008       }
   1009 
   1010       assert((sbuf->bind_flags & tobind_flags) == tobind_flags);
   1011 
   1012       /* This call will set sbuf->handle */
   1013       if (svga_have_gb_objects(svga)) {
   1014          ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
   1015       } else {
   1016          ret = svga_buffer_create_host_surface(ss, sbuf, sbuf->bind_flags);
   1017       }
   1018       if (ret != PIPE_OK)
   1019          return NULL;
   1020    }
   1021 
   1022    assert(sbuf->handle);
   1023 
   1024    if (sbuf->map.num_ranges) {
   1025       if (!sbuf->dma.pending) {
   1026          /* No pending DMA/update commands yet. */
   1027 
   1028          /* Migrate the data from swbuf -> hwbuf if necessary */
   1029          ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
   1030          if (ret == PIPE_OK) {
   1031             /* Emit DMA or UpdateGBImage commands */
   1032             ret = svga_buffer_upload_command(svga, sbuf);
   1033             if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
   1034                svga_context_flush(svga, NULL);
   1035                ret = svga_buffer_upload_command(svga, sbuf);
   1036                assert(ret == PIPE_OK);
   1037             }
   1038             if (ret == PIPE_OK) {
   1039                sbuf->dma.pending = TRUE;
   1040                assert(!sbuf->head.prev && !sbuf->head.next);
   1041                LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
   1042             }
   1043          }
   1044          else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
   1045             /*
   1046              * The buffer is too big to fit in the GMR aperture, so break it in
   1047              * smaller pieces.
   1048              */
   1049             ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
   1050          }
   1051 
   1052          if (ret != PIPE_OK) {
   1053             /*
   1054              * Something unexpected happened above. There is very little that
   1055              * we can do other than proceeding while ignoring the dirty ranges.
   1056              */
   1057             assert(0);
   1058             sbuf->map.num_ranges = 0;
   1059          }
   1060       }
   1061       else {
   1062          /*
   1063           * There a pending dma already. Make sure it is from this context.
   1064           */
   1065          assert(sbuf->dma.svga == svga);
   1066       }
   1067    }
   1068 
   1069    assert(sbuf->map.num_ranges == 0 || sbuf->dma.pending);
   1070 
   1071    return sbuf->handle;
   1072 }
   1073 
   1074 
   1075 void
   1076 svga_context_flush_buffers(struct svga_context *svga)
   1077 {
   1078    struct list_head *curr, *next;
   1079 
   1080    SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERSFLUSH);
   1081 
   1082    curr = svga->dirty_buffers.next;
   1083    next = curr->next;
   1084    while (curr != &svga->dirty_buffers) {
   1085       struct svga_buffer *sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
   1086 
   1087       assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
   1088       assert(sbuf->dma.pending);
   1089 
   1090       svga_buffer_upload_flush(svga, sbuf);
   1091 
   1092       curr = next;
   1093       next = curr->next;
   1094    }
   1095 
   1096    SVGA_STATS_TIME_POP(svga_sws(svga));
   1097 }
   1098