Home | History | Annotate | Download | only in svga
      1 /**********************************************************
      2  * Copyright 2008-2009 VMware, Inc.  All rights reserved.
      3  *
      4  * Permission is hereby granted, free of charge, to any person
      5  * obtaining a copy of this software and associated documentation
      6  * files (the "Software"), to deal in the Software without
      7  * restriction, including without limitation the rights to use, copy,
      8  * modify, merge, publish, distribute, sublicense, and/or sell copies
      9  * of the Software, and to permit persons to whom the Software is
     10  * furnished to do so, subject to the following conditions:
     11  *
     12  * The above copyright notice and this permission notice shall be
     13  * included in all copies or substantial portions of the Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
     18  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
     19  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
     20  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
     21  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     22  * SOFTWARE.
     23  *
     24  **********************************************************/
     25 
     26 #include "svga_cmd.h"
     27 
     28 #include "pipe/p_state.h"
     29 #include "pipe/p_defines.h"
     30 #include "util/u_inlines.h"
     31 #include "os/os_thread.h"
     32 #include "util/u_math.h"
     33 #include "util/u_memory.h"
     34 
     35 #include "svga_context.h"
     36 #include "svga_screen.h"
     37 #include "svga_resource_buffer.h"
     38 #include "svga_resource_buffer_upload.h"
     39 #include "svga_winsys.h"
     40 #include "svga_debug.h"
     41 
     42 
     43 /**
     44  * Vertex and index buffers need hardware backing.  Constant buffers
     45  * do not.  No other types of buffers currently supported.
     46  */
     47 static INLINE boolean
     48 svga_buffer_needs_hw_storage(unsigned usage)
     49 {
     50    return usage & (PIPE_BIND_VERTEX_BUFFER | PIPE_BIND_INDEX_BUFFER);
     51 }
     52 
     53 
     54 /**
     55  * Create a buffer transfer.
     56  *
     57  * Unlike texture DMAs (which are written immediately to the command buffer and
     58  * therefore inherently serialized with other context operations), for buffers
     59  * we try to coalesce multiple range mappings (i.e, multiple calls to this
     60  * function) into a single DMA command, for better efficiency in command
     61  * processing.  This means we need to exercise extra care here to ensure that
     62  * the end result is exactly the same as if one DMA was used for every mapped
     63  * range.
     64  */
     65 static struct pipe_transfer *
     66 svga_buffer_get_transfer(struct pipe_context *pipe,
     67                          struct pipe_resource *resource,
     68                          unsigned level,
     69                          unsigned usage,
     70                          const struct pipe_box *box)
     71 {
     72    struct svga_context *svga = svga_context(pipe);
     73    struct svga_screen *ss = svga_screen(pipe->screen);
     74    struct svga_buffer *sbuf = svga_buffer(resource);
     75    struct pipe_transfer *transfer;
     76 
     77    transfer = CALLOC_STRUCT(pipe_transfer);
     78    if (transfer == NULL) {
     79       return NULL;
     80    }
     81 
     82    transfer->resource = resource;
     83    transfer->level = level;
     84    transfer->usage = usage;
     85    transfer->box = *box;
     86 
     87    if (usage & PIPE_TRANSFER_WRITE) {
     88       if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
     89          /*
     90           * Flush any pending primitives, finish writing any pending DMA
     91           * commands, and tell the host to discard the buffer contents on
     92           * the next DMA operation.
     93           */
     94 
     95          svga_hwtnl_flush_buffer(svga, resource);
     96 
     97          if (sbuf->dma.pending) {
     98             svga_buffer_upload_flush(svga, sbuf);
     99 
    100             /*
    101              * Instead of flushing the context command buffer, simply discard
    102              * the current hwbuf, and start a new one.
    103              */
    104 
    105             svga_buffer_destroy_hw_storage(ss, sbuf);
    106          }
    107 
    108          sbuf->map.num_ranges = 0;
    109          sbuf->dma.flags.discard = TRUE;
    110       }
    111 
    112       if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
    113          if (!sbuf->map.num_ranges) {
    114             /*
    115              * No pending ranges to upload so far, so we can tell the host to
    116              * not synchronize on the next DMA command.
    117              */
    118 
    119             sbuf->dma.flags.unsynchronized = TRUE;
    120          }
    121       } else {
    122          /*
    123           * Synchronizing, so flush any pending primitives, finish writing any
    124           * pending DMA command, and ensure the next DMA will be done in order.
    125           */
    126 
    127          svga_hwtnl_flush_buffer(svga, resource);
    128 
    129          if (sbuf->dma.pending) {
    130             svga_buffer_upload_flush(svga, sbuf);
    131 
    132             if (sbuf->hwbuf) {
    133                /*
    134                 * We have a pending DMA upload from a hardware buffer, therefore
    135                 * we need to ensure that the host finishes processing that DMA
    136                 * command before the state tracker can start overwriting the
    137                 * hardware buffer.
    138                 *
    139                 * XXX: This could be avoided by tying the hardware buffer to
    140                 * the transfer (just as done with textures), which would allow
    141                 * overlapping DMAs commands to be queued on the same context
    142                 * buffer. However, due to the likelihood of software vertex
    143                 * processing, it is more convenient to hold on to the hardware
    144                 * buffer, allowing to quickly access the contents from the CPU
    145                 * without having to do a DMA download from the host.
    146                 */
    147 
    148                if (usage & PIPE_TRANSFER_DONTBLOCK) {
    149                   /*
    150                    * Flushing the command buffer here will most likely cause
    151                    * the map of the hwbuf below to block, so preemptively
    152                    * return NULL here if DONTBLOCK is set to prevent unnecessary
    153                    * command buffer flushes.
    154                    */
    155 
    156                   FREE(transfer);
    157                   return NULL;
    158                }
    159 
    160                svga_context_flush(svga, NULL);
    161             }
    162          }
    163 
    164          sbuf->dma.flags.unsynchronized = FALSE;
    165       }
    166    }
    167 
    168    if (!sbuf->swbuf && !sbuf->hwbuf) {
    169       if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
    170          /*
    171           * We can't create a hardware buffer big enough, so create a malloc
    172           * buffer instead.
    173           */
    174          if (0) {
    175             debug_printf("%s: failed to allocate %u KB of DMA, "
    176                          "splitting DMA transfers\n",
    177                          __FUNCTION__,
    178                          (sbuf->b.b.width0 + 1023)/1024);
    179          }
    180 
    181          sbuf->swbuf = align_malloc(sbuf->b.b.width0, 16);
    182          if (!sbuf->swbuf) {
    183             FREE(transfer);
    184             return NULL;
    185          }
    186       }
    187    }
    188 
    189    return transfer;
    190 }
    191 
    192 
    193 /**
    194  * Map a range of a buffer.
    195  */
    196 static void *
    197 svga_buffer_transfer_map( struct pipe_context *pipe,
    198                           struct pipe_transfer *transfer )
    199 {
    200    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
    201 
    202    uint8_t *map;
    203 
    204    if (sbuf->swbuf) {
    205       /* User/malloc buffer */
    206       map = sbuf->swbuf;
    207    }
    208    else if (sbuf->hwbuf) {
    209       struct svga_screen *ss = svga_screen(pipe->screen);
    210       struct svga_winsys_screen *sws = ss->sws;
    211 
    212       map = sws->buffer_map(sws, sbuf->hwbuf, transfer->usage);
    213    }
    214    else {
    215       map = NULL;
    216    }
    217 
    218    if (map) {
    219       ++sbuf->map.count;
    220       map += transfer->box.x;
    221    }
    222 
    223    return map;
    224 }
    225 
    226 
    227 static void
    228 svga_buffer_transfer_flush_region( struct pipe_context *pipe,
    229                                    struct pipe_transfer *transfer,
    230                                    const struct pipe_box *box)
    231 {
    232    struct svga_screen *ss = svga_screen(pipe->screen);
    233    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
    234 
    235    unsigned offset = transfer->box.x + box->x;
    236    unsigned length = box->width;
    237 
    238    assert(transfer->usage & PIPE_TRANSFER_WRITE);
    239    assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
    240 
    241    pipe_mutex_lock(ss->swc_mutex);
    242    svga_buffer_add_range(sbuf, offset, offset + length);
    243    pipe_mutex_unlock(ss->swc_mutex);
    244 }
    245 
    246 
    247 static void
    248 svga_buffer_transfer_unmap( struct pipe_context *pipe,
    249                             struct pipe_transfer *transfer )
    250 {
    251    struct svga_screen *ss = svga_screen(pipe->screen);
    252    struct svga_winsys_screen *sws = ss->sws;
    253    struct svga_buffer *sbuf = svga_buffer(transfer->resource);
    254 
    255    pipe_mutex_lock(ss->swc_mutex);
    256 
    257    assert(sbuf->map.count);
    258    if (sbuf->map.count) {
    259       --sbuf->map.count;
    260    }
    261 
    262    if (sbuf->hwbuf) {
    263       sws->buffer_unmap(sws, sbuf->hwbuf);
    264    }
    265 
    266    if (transfer->usage & PIPE_TRANSFER_WRITE) {
    267       if (!(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT)) {
    268          /*
    269           * Mapped range not flushed explicitly, so flush the whole buffer,
    270           * and tell the host to discard the contents when processing the DMA
    271           * command.
    272           */
    273 
    274          SVGA_DBG(DEBUG_DMA, "flushing the whole buffer\n");
    275 
    276          sbuf->dma.flags.discard = TRUE;
    277 
    278          svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
    279       }
    280    }
    281 
    282    pipe_mutex_unlock(ss->swc_mutex);
    283 }
    284 
    285 
    286 /**
    287  * Destroy transfer
    288  */
    289 static void
    290 svga_buffer_transfer_destroy(struct pipe_context *pipe,
    291                              struct pipe_transfer *transfer)
    292 {
    293    FREE(transfer);
    294 }
    295 
    296 
    297 static void
    298 svga_buffer_destroy( struct pipe_screen *screen,
    299 		     struct pipe_resource *buf )
    300 {
    301    struct svga_screen *ss = svga_screen(screen);
    302    struct svga_buffer *sbuf = svga_buffer( buf );
    303 
    304    assert(!p_atomic_read(&buf->reference.count));
    305 
    306    assert(!sbuf->dma.pending);
    307 
    308    if(sbuf->handle)
    309       svga_buffer_destroy_host_surface(ss, sbuf);
    310 
    311    if(sbuf->uploaded.buffer)
    312       pipe_resource_reference(&sbuf->uploaded.buffer, NULL);
    313 
    314    if(sbuf->hwbuf)
    315       svga_buffer_destroy_hw_storage(ss, sbuf);
    316 
    317    if(sbuf->swbuf && !sbuf->user)
    318       align_free(sbuf->swbuf);
    319 
    320    FREE(sbuf);
    321 }
    322 
    323 
    324 struct u_resource_vtbl svga_buffer_vtbl =
    325 {
    326    u_default_resource_get_handle,      /* get_handle */
    327    svga_buffer_destroy,		     /* resource_destroy */
    328    svga_buffer_get_transfer,	     /* get_transfer */
    329    svga_buffer_transfer_destroy,     /* transfer_destroy */
    330    svga_buffer_transfer_map,	     /* transfer_map */
    331    svga_buffer_transfer_flush_region,  /* transfer_flush_region */
    332    svga_buffer_transfer_unmap,	     /* transfer_unmap */
    333    u_default_transfer_inline_write   /* transfer_inline_write */
    334 };
    335 
    336 
    337 
    338 struct pipe_resource *
    339 svga_buffer_create(struct pipe_screen *screen,
    340 		   const struct pipe_resource *template)
    341 {
    342    struct svga_screen *ss = svga_screen(screen);
    343    struct svga_buffer *sbuf;
    344 
    345    sbuf = CALLOC_STRUCT(svga_buffer);
    346    if(!sbuf)
    347       goto error1;
    348 
    349    sbuf->b.b = *template;
    350    sbuf->b.vtbl = &svga_buffer_vtbl;
    351    pipe_reference_init(&sbuf->b.b.reference, 1);
    352    sbuf->b.b.screen = screen;
    353 
    354    if(svga_buffer_needs_hw_storage(template->bind)) {
    355       if(svga_buffer_create_host_surface(ss, sbuf) != PIPE_OK)
    356          goto error2;
    357    }
    358    else {
    359       sbuf->swbuf = align_malloc(template->width0, 64);
    360       if(!sbuf->swbuf)
    361          goto error2;
    362    }
    363 
    364    debug_reference(&sbuf->b.b.reference,
    365                    (debug_reference_descriptor)debug_describe_resource, 0);
    366 
    367    return &sbuf->b.b;
    368 
    369 error2:
    370    FREE(sbuf);
    371 error1:
    372    return NULL;
    373 }
    374 
    375 struct pipe_resource *
    376 svga_user_buffer_create(struct pipe_screen *screen,
    377                         void *ptr,
    378                         unsigned bytes,
    379 			unsigned bind)
    380 {
    381    struct svga_buffer *sbuf;
    382 
    383    sbuf = CALLOC_STRUCT(svga_buffer);
    384    if(!sbuf)
    385       goto no_sbuf;
    386 
    387    pipe_reference_init(&sbuf->b.b.reference, 1);
    388    sbuf->b.vtbl = &svga_buffer_vtbl;
    389    sbuf->b.b.screen = screen;
    390    sbuf->b.b.format = PIPE_FORMAT_R8_UNORM; /* ?? */
    391    sbuf->b.b.usage = PIPE_USAGE_IMMUTABLE;
    392    sbuf->b.b.bind = bind;
    393    sbuf->b.b.width0 = bytes;
    394    sbuf->b.b.height0 = 1;
    395    sbuf->b.b.depth0 = 1;
    396    sbuf->b.b.array_size = 1;
    397 
    398    sbuf->swbuf = ptr;
    399    sbuf->user = TRUE;
    400 
    401    debug_reference(&sbuf->b.b.reference,
    402                    (debug_reference_descriptor)debug_describe_resource, 0);
    403 
    404    return &sbuf->b.b;
    405 
    406 no_sbuf:
    407    return NULL;
    408 }
    409 
    410 
    411 
    412