Home | History | Annotate | Download | only in util
      1 /**************************************************************************
      2  *
      3  * Copyright 2012 VMware, Inc.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
     22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 /**
     29  * @file
     30  * u_debug_flush.c Debug flush and map-related issues:
     31  * - Flush while synchronously mapped.
     32  * - Command stream reference while synchronously mapped.
     33  * - Synchronous map while referenced on command stream.
     34  * - Recursive maps.
     35  * - Unmap while not mapped.
     36  *
     37  * @author Thomas Hellstrom <thellstrom (at) vmware.com>
     38  */
     39 
     40 #ifdef DEBUG
     41 #include "pipe/p_compiler.h"
     42 #include "util/u_debug_stack.h"
     43 #include "util/u_debug.h"
     44 #include "util/u_memory.h"
     45 #include "util/u_debug_flush.h"
     46 #include "util/u_hash_table.h"
     47 #include "util/list.h"
     48 #include "util/u_inlines.h"
     49 #include "util/u_string.h"
     50 #include "os/os_thread.h"
     51 #include <stdio.h>
     52 
     53 struct debug_flush_buf {
     54    /* Atomic */
     55    struct pipe_reference reference; /* Must be the first member. */
     56    mtx_t mutex;
     57    /* Immutable */
     58    boolean supports_unsync;
     59    unsigned bt_depth;
     60    /* Protected by mutex */
     61    boolean mapped;
     62    boolean mapped_sync;
     63    struct debug_stack_frame *map_frame;
     64 };
     65 
     66 struct debug_flush_item {
     67    struct debug_flush_buf *fbuf;
     68    unsigned bt_depth;
     69    struct debug_stack_frame *ref_frame;
     70 };
     71 
     72 struct debug_flush_ctx {
     73    /* Contexts are used by a single thread at a time */
     74    unsigned bt_depth;
     75    boolean catch_map_of_referenced;
     76    struct util_hash_table *ref_hash;
     77    struct list_head head;
     78 };
     79 
     80 static mtx_t list_mutex = _MTX_INITIALIZER_NP;
     81 static struct list_head ctx_list = {&ctx_list, &ctx_list};
     82 
     83 static struct debug_stack_frame *
     84 debug_flush_capture_frame(int start, int depth)
     85 {
     86    struct debug_stack_frame *frames;
     87 
     88    frames = CALLOC(depth, sizeof(*frames));
     89    if (!frames)
     90       return NULL;
     91 
     92    debug_backtrace_capture(frames, start, depth);
     93    return frames;
     94 }
     95 
     96 static int
     97 debug_flush_pointer_compare(void *key1, void *key2)
     98 {
     99    return (key1 == key2) ? 0 : 1;
    100 }
    101 
    102 static unsigned
    103 debug_flush_pointer_hash(void *key)
    104 {
    105    return (unsigned) (unsigned long) key;
    106 }
    107 
    108 struct debug_flush_buf *
    109 debug_flush_buf_create(boolean supports_unsync, unsigned bt_depth)
    110 {
    111    struct debug_flush_buf *fbuf = CALLOC_STRUCT(debug_flush_buf);
    112 
    113    if (!fbuf)
    114       goto out_no_buf;
    115 
    116    fbuf->supports_unsync = supports_unsync;
    117    fbuf->bt_depth = bt_depth;
    118    pipe_reference_init(&fbuf->reference, 1);
    119    (void) mtx_init(&fbuf->mutex, mtx_plain);
    120 
    121    return fbuf;
    122 out_no_buf:
    123    debug_printf("Debug flush buffer creation failed.\n");
    124    debug_printf("Debug flush checking for this buffer will be incomplete.\n");
    125    return NULL;
    126 }
    127 
    128 void
    129 debug_flush_buf_reference(struct debug_flush_buf **dst,
    130                           struct debug_flush_buf *src)
    131 {
    132    struct debug_flush_buf *fbuf = *dst;
    133 
    134    if (pipe_reference(&(*dst)->reference, &src->reference)) {
    135       FREE(fbuf->map_frame);
    136 
    137       FREE(fbuf);
    138    }
    139 
    140    *dst = src;
    141 }
    142 
    143 static void
    144 debug_flush_item_destroy(struct debug_flush_item *item)
    145 {
    146    debug_flush_buf_reference(&item->fbuf, NULL);
    147 
    148    FREE(item->ref_frame);
    149 
    150    FREE(item);
    151 }
    152 
    153 struct debug_flush_ctx *
    154 debug_flush_ctx_create(UNUSED boolean catch_reference_of_mapped,
    155                        unsigned bt_depth)
    156 {
    157    struct debug_flush_ctx *fctx = CALLOC_STRUCT(debug_flush_ctx);
    158 
    159    if (!fctx)
    160       goto out_no_ctx;
    161 
    162    fctx->ref_hash = util_hash_table_create(debug_flush_pointer_hash,
    163                                            debug_flush_pointer_compare);
    164 
    165    if (!fctx->ref_hash)
    166       goto out_no_ref_hash;
    167 
    168    fctx->bt_depth = bt_depth;
    169    mtx_lock(&list_mutex);
    170    list_addtail(&fctx->head, &ctx_list);
    171    mtx_unlock(&list_mutex);
    172 
    173    return fctx;
    174 
    175  out_no_ref_hash:
    176    FREE(fctx);
    177 out_no_ctx:
    178    debug_printf("Debug flush context creation failed.\n");
    179    debug_printf("Debug flush checking for this context will be incomplete.\n");
    180    return NULL;
    181 }
    182 
    183 static void
    184 debug_flush_alert(const char *s, const char *op,
    185                   unsigned start, unsigned depth,
    186                   boolean continued,
    187                   boolean capture,
    188                   const struct debug_stack_frame *frame)
    189 {
    190    if (capture)
    191       frame = debug_flush_capture_frame(start, depth);
    192 
    193    if (s)
    194       debug_printf("%s ", s);
    195    if (frame) {
    196       debug_printf("%s backtrace follows:\n", op);
    197       debug_backtrace_dump(frame, depth);
    198    } else
    199       debug_printf("No %s backtrace was captured.\n", op);
    200 
    201    if (continued)
    202       debug_printf("**********************************\n");
    203    else
    204       debug_printf("*********END OF MESSAGE***********\n\n\n");
    205 
    206    if (capture)
    207       FREE((void *)frame);
    208 }
    209 
    210 
    211 void
    212 debug_flush_map(struct debug_flush_buf *fbuf, unsigned flags)
    213 {
    214    boolean mapped_sync = FALSE;
    215 
    216    if (!fbuf)
    217       return;
    218 
    219    mtx_lock(&fbuf->mutex);
    220    if (fbuf->mapped) {
    221       debug_flush_alert("Recursive map detected.", "Map",
    222                         2, fbuf->bt_depth, TRUE, TRUE, NULL);
    223       debug_flush_alert(NULL, "Previous map", 0, fbuf->bt_depth, FALSE,
    224                         FALSE, fbuf->map_frame);
    225    } else if (!(flags & PIPE_TRANSFER_UNSYNCHRONIZED) ||
    226               !fbuf->supports_unsync) {
    227       fbuf->mapped_sync = mapped_sync = TRUE;
    228    }
    229    fbuf->map_frame = debug_flush_capture_frame(1, fbuf->bt_depth);
    230    fbuf->mapped = TRUE;
    231    mtx_unlock(&fbuf->mutex);
    232 
    233    if (mapped_sync) {
    234       struct debug_flush_ctx *fctx;
    235 
    236       mtx_lock(&list_mutex);
    237       LIST_FOR_EACH_ENTRY(fctx, &ctx_list, head) {
    238          struct debug_flush_item *item =
    239             util_hash_table_get(fctx->ref_hash, fbuf);
    240 
    241          if (item && fctx->catch_map_of_referenced) {
    242             debug_flush_alert("Already referenced map detected.",
    243                               "Map", 2, fbuf->bt_depth, TRUE, TRUE, NULL);
    244             debug_flush_alert(NULL, "Reference", 0, item->bt_depth,
    245                               FALSE, FALSE, item->ref_frame);
    246          }
    247       }
    248       mtx_unlock(&list_mutex);
    249    }
    250 }
    251 
    252 void
    253 debug_flush_unmap(struct debug_flush_buf *fbuf)
    254 {
    255    if (!fbuf)
    256       return;
    257 
    258    mtx_lock(&fbuf->mutex);
    259    if (!fbuf->mapped)
    260       debug_flush_alert("Unmap not previously mapped detected.", "Map",
    261                         2, fbuf->bt_depth, FALSE, TRUE, NULL);
    262 
    263    fbuf->mapped_sync = FALSE;
    264    fbuf->mapped = FALSE;
    265    FREE(fbuf->map_frame);
    266    fbuf->map_frame = NULL;
    267    mtx_unlock(&fbuf->mutex);
    268 }
    269 
    270 
    271 /**
    272  * Add the given buffer to the list of active buffers.  Active buffers
    273  * are those which are referenced by the command buffer currently being
    274  * constructed.
    275  */
    276 void
    277 debug_flush_cb_reference(struct debug_flush_ctx *fctx,
    278                          struct debug_flush_buf *fbuf)
    279 {
    280    struct debug_flush_item *item;
    281 
    282    if (!fctx || !fbuf)
    283       return;
    284 
    285    item = util_hash_table_get(fctx->ref_hash, fbuf);
    286 
    287    mtx_lock(&fbuf->mutex);
    288    if (fbuf->mapped_sync) {
    289       debug_flush_alert("Reference of mapped buffer detected.", "Reference",
    290                         2, fctx->bt_depth, TRUE, TRUE, NULL);
    291       debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, FALSE,
    292                         FALSE, fbuf->map_frame);
    293    }
    294    mtx_unlock(&fbuf->mutex);
    295 
    296    if (!item) {
    297       item = CALLOC_STRUCT(debug_flush_item);
    298       if (item) {
    299          debug_flush_buf_reference(&item->fbuf, fbuf);
    300          item->bt_depth = fctx->bt_depth;
    301          item->ref_frame = debug_flush_capture_frame(2, item->bt_depth);
    302          if (util_hash_table_set(fctx->ref_hash, fbuf, item) != PIPE_OK) {
    303             debug_flush_item_destroy(item);
    304             goto out_no_item;
    305          }
    306          return;
    307       }
    308       goto out_no_item;
    309    }
    310    return;
    311 
    312 out_no_item:
    313    debug_printf("Debug flush command buffer reference creation failed.\n");
    314    debug_printf("Debug flush checking will be incomplete "
    315                 "for this command batch.\n");
    316 }
    317 
    318 static enum pipe_error
    319 debug_flush_might_flush_cb(UNUSED void *key, void *value, void *data)
    320 {
    321    struct debug_flush_item *item =
    322       (struct debug_flush_item *) value;
    323    struct debug_flush_buf *fbuf = item->fbuf;
    324 
    325    mtx_lock(&fbuf->mutex);
    326    if (fbuf->mapped_sync) {
    327       const char *reason = (const char *) data;
    328       char message[80];
    329 
    330       util_snprintf(message, sizeof(message),
    331                     "%s referenced mapped buffer detected.", reason);
    332 
    333       debug_flush_alert(message, reason, 3, item->bt_depth, TRUE, TRUE, NULL);
    334       debug_flush_alert(NULL, "Map", 0, fbuf->bt_depth, TRUE, FALSE,
    335                         fbuf->map_frame);
    336       debug_flush_alert(NULL, "First reference", 0, item->bt_depth, FALSE,
    337                         FALSE, item->ref_frame);
    338    }
    339    mtx_unlock(&fbuf->mutex);
    340 
    341    return PIPE_OK;
    342 }
    343 
    344 /**
    345  * Called when we're about to possibly flush a command buffer.
    346  * We check if any active buffers are in a mapped state.  If so, print an alert.
    347  */
    348 void
    349 debug_flush_might_flush(struct debug_flush_ctx *fctx)
    350 {
    351    if (!fctx)
    352       return;
    353 
    354    util_hash_table_foreach(fctx->ref_hash,
    355                            debug_flush_might_flush_cb,
    356                            "Might flush");
    357 }
    358 
    359 static enum pipe_error
    360 debug_flush_flush_cb(UNUSED void *key, void *value, UNUSED void *data)
    361 {
    362    struct debug_flush_item *item =
    363       (struct debug_flush_item *) value;
    364 
    365    debug_flush_item_destroy(item);
    366 
    367    return PIPE_OK;
    368 }
    369 
    370 
    371 /**
    372  * Called when we flush a command buffer.  Two things are done:
    373  * 1. Check if any of the active buffers are currently mapped (alert if so).
    374  * 2. Discard/unreference all the active buffers.
    375  */
    376 void
    377 debug_flush_flush(struct debug_flush_ctx *fctx)
    378 {
    379    if (!fctx)
    380       return;
    381 
    382    util_hash_table_foreach(fctx->ref_hash,
    383                            debug_flush_might_flush_cb,
    384                            "Flush");
    385    util_hash_table_foreach(fctx->ref_hash,
    386                            debug_flush_flush_cb,
    387                            NULL);
    388    util_hash_table_clear(fctx->ref_hash);
    389 }
    390 
    391 void
    392 debug_flush_ctx_destroy(struct debug_flush_ctx *fctx)
    393 {
    394    if (!fctx)
    395       return;
    396 
    397    list_del(&fctx->head);
    398    util_hash_table_foreach(fctx->ref_hash,
    399                            debug_flush_flush_cb,
    400                            NULL);
    401    util_hash_table_clear(fctx->ref_hash);
    402    util_hash_table_destroy(fctx->ref_hash);
    403    FREE(fctx);
    404 }
    405 #endif
    406