Home | History | Annotate | Download | only in i965
      1 /*
      2  * Copyright  2008 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *    Eric Anholt <eric (at) anholt.net>
     25  *    Kenneth Graunke <kenneth (at) whitecape.org>
     26  */
     27 
     28 /** @file gen6_queryobj.c
     29  *
     30  * Support for query objects (GL_ARB_occlusion_query, GL_ARB_timer_query,
     31  * GL_EXT_transform_feedback, and friends) on platforms that support
     32  * hardware contexts (Gen6+).
     33  */
     34 #include "main/imports.h"
     35 
     36 #include "brw_context.h"
     37 #include "brw_defines.h"
     38 #include "brw_state.h"
     39 #include "intel_batchbuffer.h"
     40 #include "intel_buffer_objects.h"
     41 
     42 static inline void
     43 set_query_availability(struct brw_context *brw, struct brw_query_object *query,
     44                        bool available)
     45 {
     46    /* For platforms that support ARB_query_buffer_object, we write the
     47     * query availability for "pipelined" queries.
     48     *
     49     * Most counter snapshots are written by the command streamer, by
     50     * doing a CS stall and then MI_STORE_REGISTER_MEM.  For these
     51     * counters, the CS stall guarantees that the results will be
     52     * available when subsequent CS commands run.  So we don't need to
     53     * do any additional tracking.
     54     *
     55     * Other counters (occlusion queries and timestamp) are written by
     56     * PIPE_CONTROL, without a CS stall.  This means that we can't be
     57     * sure whether the writes have landed yet or not.  Performing a
     58     * PIPE_CONTROL with an immediate write will synchronize with
     59     * those earlier writes, so we write 1 when the value has landed.
     60     */
     61    if (brw->ctx.Extensions.ARB_query_buffer_object &&
     62        brw_is_query_pipelined(query)) {
     63       brw_emit_pipe_control_write(brw,
     64                                   PIPE_CONTROL_WRITE_IMMEDIATE,
     65                                   query->bo, 2 * sizeof(uint64_t),
     66                                   available, 0);
     67    }
     68 }
     69 
     70 static void
     71 write_primitives_generated(struct brw_context *brw,
     72                            drm_intel_bo *query_bo, int stream, int idx)
     73 {
     74    brw_emit_mi_flush(brw);
     75 
     76    if (brw->gen >= 7 && stream > 0) {
     77       brw_store_register_mem64(brw, query_bo,
     78                                GEN7_SO_PRIM_STORAGE_NEEDED(stream),
     79                                idx * sizeof(uint64_t));
     80    } else {
     81       brw_store_register_mem64(brw, query_bo, CL_INVOCATION_COUNT,
     82                                idx * sizeof(uint64_t));
     83    }
     84 }
     85 
     86 static void
     87 write_xfb_primitives_written(struct brw_context *brw,
     88                              drm_intel_bo *bo, int stream, int idx)
     89 {
     90    brw_emit_mi_flush(brw);
     91 
     92    if (brw->gen >= 7) {
     93       brw_store_register_mem64(brw, bo, GEN7_SO_NUM_PRIMS_WRITTEN(stream),
     94                                idx * sizeof(uint64_t));
     95    } else {
     96       brw_store_register_mem64(brw, bo, GEN6_SO_NUM_PRIMS_WRITTEN,
     97                                idx * sizeof(uint64_t));
     98    }
     99 }
    100 
    101 static inline int
    102 pipeline_target_to_index(int target)
    103 {
    104    if (target == GL_GEOMETRY_SHADER_INVOCATIONS)
    105       return MAX_PIPELINE_STATISTICS - 1;
    106    else
    107       return target - GL_VERTICES_SUBMITTED_ARB;
    108 }
    109 
    110 static void
    111 emit_pipeline_stat(struct brw_context *brw, drm_intel_bo *bo,
    112                    int stream, int target, int idx)
    113 {
    114    /* One source of confusion is the tessellation shader statistics. The
    115     * hardware has no statistics specific to the TE unit. Ideally we could have
    116     * the HS primitives for TESS_CONTROL_SHADER_PATCHES_ARB, and the DS
    117     * invocations as the register for TESS_CONTROL_SHADER_PATCHES_ARB.
    118     * Unfortunately we don't have HS primitives, we only have HS invocations.
    119     */
    120 
    121    /* Everything except GEOMETRY_SHADER_INVOCATIONS can be kept in a simple
    122     * lookup table
    123     */
    124    static const uint32_t target_to_register[] = {
    125       IA_VERTICES_COUNT,   /* VERTICES_SUBMITTED */
    126       IA_PRIMITIVES_COUNT, /* PRIMITIVES_SUBMITTED */
    127       VS_INVOCATION_COUNT, /* VERTEX_SHADER_INVOCATIONS */
    128       HS_INVOCATION_COUNT, /* TESS_CONTROL_SHADER_PATCHES */
    129       DS_INVOCATION_COUNT, /* TESS_EVALUATION_SHADER_INVOCATIONS */
    130       GS_PRIMITIVES_COUNT, /* GEOMETRY_SHADER_PRIMITIVES_EMITTED */
    131       PS_INVOCATION_COUNT, /* FRAGMENT_SHADER_INVOCATIONS */
    132       CS_INVOCATION_COUNT, /* COMPUTE_SHADER_INVOCATIONS */
    133       CL_INVOCATION_COUNT, /* CLIPPING_INPUT_PRIMITIVES */
    134       CL_PRIMITIVES_COUNT, /* CLIPPING_OUTPUT_PRIMITIVES */
    135       GS_INVOCATION_COUNT /* This one is special... */
    136    };
    137    STATIC_ASSERT(ARRAY_SIZE(target_to_register) == MAX_PIPELINE_STATISTICS);
    138    uint32_t reg = target_to_register[pipeline_target_to_index(target)];
    139    /* Gen6 GS code counts full primitives, that is, it won't count individual
    140     * triangles in a triangle strip. Use CL_INVOCATION_COUNT for that.
    141     */
    142    if (brw->gen == 6 && target == GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB)
    143       reg = CL_INVOCATION_COUNT;
    144    assert(reg != 0);
    145 
    146    /* Emit a flush to make sure various parts of the pipeline are complete and
    147     * we get an accurate value
    148     */
    149    brw_emit_mi_flush(brw);
    150 
    151    brw_store_register_mem64(brw, bo, reg, idx * sizeof(uint64_t));
    152 }
    153 
    154 
    155 /**
    156  * Wait on the query object's BO and calculate the final result.
    157  */
    158 static void
    159 gen6_queryobj_get_results(struct gl_context *ctx,
    160                           struct brw_query_object *query)
    161 {
    162    struct brw_context *brw = brw_context(ctx);
    163 
    164    if (query->bo == NULL)
    165       return;
    166 
    167    brw_bo_map(brw, query->bo, false, "query object");
    168    uint64_t *results = query->bo->virtual;
    169    switch (query->Base.Target) {
    170    case GL_TIME_ELAPSED:
    171       /* The query BO contains the starting and ending timestamps.
    172        * Subtract the two and convert to nanoseconds.
    173        */
    174       query->Base.Result += 80 * (results[1] - results[0]);
    175       break;
    176 
    177    case GL_TIMESTAMP:
    178       /* Our timer is a clock that increments every 80ns (regardless of
    179        * other clock scaling in the system).  The timestamp register we can
    180        * read for glGetTimestamp() masks out the top 32 bits, so we do that
    181        * here too to let the two counters be compared against each other.
    182        *
    183        * If we just multiplied that 32 bits of data by 80, it would roll
    184        * over at a non-power-of-two, so an application couldn't use
    185        * GL_QUERY_COUNTER_BITS to handle rollover correctly.  Instead, we
    186        * report 36 bits and truncate at that (rolling over 5 times as often
    187        * as the HW counter), and when the 32-bit counter rolls over, it
    188        * happens to also be at a rollover in the reported value from near
    189        * (1<<36) to 0.
    190        *
    191        * The low 32 bits rolls over in ~343 seconds.  Our 36-bit result
    192        * rolls over every ~69 seconds.
    193        *
    194        * The query BO contains a single timestamp value in results[0].
    195        */
    196       query->Base.Result = 80 * (results[0] & 0xffffffff);
    197       query->Base.Result &= (1ull << 36) - 1;
    198       break;
    199 
    200    case GL_SAMPLES_PASSED_ARB:
    201       /* We need to use += rather than = here since some BLT-based operations
    202        * may have added additional samples to our occlusion query value.
    203        */
    204       query->Base.Result += results[1] - results[0];
    205       break;
    206 
    207    case GL_ANY_SAMPLES_PASSED:
    208    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
    209       if (results[0] != results[1])
    210          query->Base.Result = true;
    211       break;
    212 
    213    case GL_PRIMITIVES_GENERATED:
    214    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
    215    case GL_VERTICES_SUBMITTED_ARB:
    216    case GL_PRIMITIVES_SUBMITTED_ARB:
    217    case GL_VERTEX_SHADER_INVOCATIONS_ARB:
    218    case GL_GEOMETRY_SHADER_INVOCATIONS:
    219    case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
    220    case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
    221    case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
    222    case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
    223    case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
    224    case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
    225       query->Base.Result = results[1] - results[0];
    226       break;
    227 
    228    case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
    229       query->Base.Result = (results[1] - results[0]);
    230       /* Implement the "WaDividePSInvocationCountBy4:HSW,BDW" workaround:
    231        * "Invocation counter is 4 times actual.  WA: SW to divide HW reported
    232        *  PS Invocations value by 4."
    233        *
    234        * Prior to Haswell, invocation count was counted by the WM, and it
    235        * buggily counted invocations in units of subspans (2x2 unit). To get the
    236        * correct value, the CS multiplied this by 4. With HSW the logic moved,
    237        * and correctly emitted the number of pixel shader invocations, but,
    238        * whomever forgot to undo the multiply by 4.
    239        */
    240       if (brw->gen == 8 || brw->is_haswell)
    241          query->Base.Result /= 4;
    242       break;
    243 
    244    default:
    245       unreachable("Unrecognized query target in brw_queryobj_get_results()");
    246    }
    247    drm_intel_bo_unmap(query->bo);
    248 
    249    /* Now that we've processed the data stored in the query's buffer object,
    250     * we can release it.
    251     */
    252    drm_intel_bo_unreference(query->bo);
    253    query->bo = NULL;
    254 
    255    query->Base.Ready = true;
    256 }
    257 
    258 /**
    259  * Driver hook for glBeginQuery().
    260  *
    261  * Initializes driver structures and emits any GPU commands required to begin
    262  * recording data for the query.
    263  */
    264 static void
    265 gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
    266 {
    267    struct brw_context *brw = brw_context(ctx);
    268    struct brw_query_object *query = (struct brw_query_object *)q;
    269 
    270    /* Since we're starting a new query, we need to throw away old results. */
    271    drm_intel_bo_unreference(query->bo);
    272    query->bo = drm_intel_bo_alloc(brw->bufmgr, "query results", 4096, 4096);
    273 
    274    /* For ARB_query_buffer_object: The result is not available */
    275    set_query_availability(brw, query, false);
    276 
    277    switch (query->Base.Target) {
    278    case GL_TIME_ELAPSED:
    279       /* For timestamp queries, we record the starting time right away so that
    280        * we measure the full time between BeginQuery and EndQuery.  There's
    281        * some debate about whether this is the right thing to do.  Our decision
    282        * is based on the following text from the ARB_timer_query extension:
    283        *
    284        * "(5) Should the extension measure total time elapsed between the full
    285        *      completion of the BeginQuery and EndQuery commands, or just time
    286        *      spent in the graphics library?
    287        *
    288        *  RESOLVED:  This extension will measure the total time elapsed
    289        *  between the full completion of these commands.  Future extensions
    290        *  may implement a query to determine time elapsed at different stages
    291        *  of the graphics pipeline."
    292        *
    293        * We write a starting timestamp now (at index 0).  At EndQuery() time,
    294        * we'll write a second timestamp (at index 1), and subtract the two to
    295        * obtain the time elapsed.  Notably, this includes time elapsed while
    296        * the system was doing other work, such as running other applications.
    297        */
    298       brw_write_timestamp(brw, query->bo, 0);
    299       break;
    300 
    301    case GL_ANY_SAMPLES_PASSED:
    302    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
    303    case GL_SAMPLES_PASSED_ARB:
    304       brw_write_depth_count(brw, query->bo, 0);
    305       break;
    306 
    307    case GL_PRIMITIVES_GENERATED:
    308       write_primitives_generated(brw, query->bo, query->Base.Stream, 0);
    309       if (query->Base.Stream == 0)
    310          ctx->NewDriverState |= BRW_NEW_RASTERIZER_DISCARD;
    311       break;
    312 
    313    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
    314       write_xfb_primitives_written(brw, query->bo, query->Base.Stream, 0);
    315       break;
    316 
    317    case GL_VERTICES_SUBMITTED_ARB:
    318    case GL_PRIMITIVES_SUBMITTED_ARB:
    319    case GL_VERTEX_SHADER_INVOCATIONS_ARB:
    320    case GL_GEOMETRY_SHADER_INVOCATIONS:
    321    case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
    322    case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
    323    case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
    324    case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
    325    case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
    326    case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
    327    case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
    328       emit_pipeline_stat(brw, query->bo, query->Base.Stream, query->Base.Target, 0);
    329       break;
    330 
    331    default:
    332       unreachable("Unrecognized query target in brw_begin_query()");
    333    }
    334 }
    335 
    336 /**
    337  * Driver hook for glEndQuery().
    338  *
    339  * Emits GPU commands to record a final query value, ending any data capturing.
    340  * However, the final result isn't necessarily available until the GPU processes
    341  * those commands.  brw_queryobj_get_results() processes the captured data to
    342  * produce the final result.
    343  */
    344 static void
    345 gen6_end_query(struct gl_context *ctx, struct gl_query_object *q)
    346 {
    347    struct brw_context *brw = brw_context(ctx);
    348    struct brw_query_object *query = (struct brw_query_object *)q;
    349 
    350    switch (query->Base.Target) {
    351    case GL_TIME_ELAPSED:
    352       brw_write_timestamp(brw, query->bo, 1);
    353       break;
    354 
    355    case GL_ANY_SAMPLES_PASSED:
    356    case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
    357    case GL_SAMPLES_PASSED_ARB:
    358       brw_write_depth_count(brw, query->bo, 1);
    359       break;
    360 
    361    case GL_PRIMITIVES_GENERATED:
    362       write_primitives_generated(brw, query->bo, query->Base.Stream, 1);
    363       if (query->Base.Stream == 0)
    364          ctx->NewDriverState |= BRW_NEW_RASTERIZER_DISCARD;
    365       break;
    366 
    367    case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
    368       write_xfb_primitives_written(brw, query->bo, query->Base.Stream, 1);
    369       break;
    370 
    371    case GL_VERTICES_SUBMITTED_ARB:
    372    case GL_PRIMITIVES_SUBMITTED_ARB:
    373    case GL_VERTEX_SHADER_INVOCATIONS_ARB:
    374    case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
    375    case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
    376    case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
    377    case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
    378    case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
    379    case GL_GEOMETRY_SHADER_INVOCATIONS:
    380    case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
    381    case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
    382       emit_pipeline_stat(brw, query->bo,
    383                          query->Base.Stream, query->Base.Target, 1);
    384       break;
    385 
    386    default:
    387       unreachable("Unrecognized query target in brw_end_query()");
    388    }
    389 
    390    /* The current batch contains the commands to handle EndQuery(),
    391     * but they won't actually execute until it is flushed.
    392     */
    393    query->flushed = false;
    394 
    395    /* For ARB_query_buffer_object: The result is now available */
    396    set_query_availability(brw, query, true);
    397 }
    398 
    399 /**
    400  * Flush the batch if it still references the query object BO.
    401  */
    402 static void
    403 flush_batch_if_needed(struct brw_context *brw, struct brw_query_object *query)
    404 {
    405    /* If the batch doesn't reference the BO, it must have been flushed
    406     * (for example, due to being full).  Record that it's been flushed.
    407     */
    408    query->flushed = query->flushed ||
    409       !drm_intel_bo_references(brw->batch.bo, query->bo);
    410 
    411    if (!query->flushed)
    412       intel_batchbuffer_flush(brw);
    413 }
    414 
    415 /**
    416  * The WaitQuery() driver hook.
    417  *
    418  * Wait for a query result to become available and return it.  This is the
    419  * backing for glGetQueryObjectiv() with the GL_QUERY_RESULT pname.
    420  */
    421 static void gen6_wait_query(struct gl_context *ctx, struct gl_query_object *q)
    422 {
    423    struct brw_context *brw = brw_context(ctx);
    424    struct brw_query_object *query = (struct brw_query_object *)q;
    425 
    426    /* If the application has requested the query result, but this batch is
    427     * still contributing to it, flush it now to finish that work so the
    428     * result will become available (eventually).
    429     */
    430    flush_batch_if_needed(brw, query);
    431 
    432    gen6_queryobj_get_results(ctx, query);
    433 }
    434 
    435 /**
    436  * The CheckQuery() driver hook.
    437  *
    438  * Checks whether a query result is ready yet.  If not, flushes.
    439  * This is the backing for glGetQueryObjectiv()'s QUERY_RESULT_AVAILABLE pname.
    440  */
    441 static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
    442 {
    443    struct brw_context *brw = brw_context(ctx);
    444    struct brw_query_object *query = (struct brw_query_object *)q;
    445 
    446    /* If query->bo is NULL, we've already gathered the results - this is a
    447     * redundant CheckQuery call.  Ignore it.
    448     */
    449    if (query->bo == NULL)
    450       return;
    451 
    452    /* From the GL_ARB_occlusion_query spec:
    453     *
    454     *     "Instead of allowing for an infinite loop, performing a
    455     *      QUERY_RESULT_AVAILABLE_ARB will perform a flush if the result is
    456     *      not ready yet on the first time it is queried.  This ensures that
    457     *      the async query will return true in finite time.
    458     */
    459    flush_batch_if_needed(brw, query);
    460 
    461    if (!drm_intel_bo_busy(query->bo)) {
    462       gen6_queryobj_get_results(ctx, query);
    463    }
    464 }
    465 
    466 static void
    467 gen6_query_counter(struct gl_context *ctx, struct gl_query_object *q)
    468 {
    469    struct brw_context *brw = brw_context(ctx);
    470    struct brw_query_object *query = (struct brw_query_object *)q;
    471    brw_query_counter(ctx, q);
    472    set_query_availability(brw, query, true);
    473 }
    474 
    475 /* Initialize Gen6+-specific query object functions. */
    476 void gen6_init_queryobj_functions(struct dd_function_table *functions)
    477 {
    478    functions->BeginQuery = gen6_begin_query;
    479    functions->EndQuery = gen6_end_query;
    480    functions->CheckQuery = gen6_check_query;
    481    functions->WaitQuery = gen6_wait_query;
    482    functions->QueryCounter = gen6_query_counter;
    483 }
    484