Home | History | Annotate | Download | only in i965
      1 /*
      2  * Copyright  2014 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     21  * DEALINGS IN THE SOFTWARE.
     22  */
     23 
     24 #include <sys/errno.h>
     25 
     26 #include "main/condrender.h"
     27 #include "main/mtypes.h"
     28 #include "main/state.h"
     29 #include "brw_context.h"
     30 #include "brw_draw.h"
     31 #include "brw_state.h"
     32 #include "intel_batchbuffer.h"
     33 #include "intel_buffer_objects.h"
     34 #include "brw_defines.h"
     35 
     36 
     37 static void
     38 prepare_indirect_gpgpu_walker(struct brw_context *brw)
     39 {
     40    const struct gen_device_info *devinfo = &brw->screen->devinfo;
     41    GLintptr indirect_offset = brw->compute.num_work_groups_offset;
     42    struct brw_bo *bo = brw->compute.num_work_groups_bo;
     43 
     44    brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMX, bo, indirect_offset + 0);
     45    brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMY, bo, indirect_offset + 4);
     46    brw_load_register_mem(brw, GEN7_GPGPU_DISPATCHDIMZ, bo, indirect_offset + 8);
     47 
     48    if (devinfo->gen > 7)
     49       return;
     50 
     51    /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
     52    BEGIN_BATCH(7);
     53    OUT_BATCH(MI_LOAD_REGISTER_IMM | (7 - 2));
     54    OUT_BATCH(MI_PREDICATE_SRC0 + 4);
     55    OUT_BATCH(0u);
     56    OUT_BATCH(MI_PREDICATE_SRC1 + 0);
     57    OUT_BATCH(0u);
     58    OUT_BATCH(MI_PREDICATE_SRC1 + 4);
     59    OUT_BATCH(0u);
     60    ADVANCE_BATCH();
     61 
     62    /* Load compute_dispatch_indirect_x_size into SRC0 */
     63    brw_load_register_mem(brw, MI_PREDICATE_SRC0, bo, indirect_offset + 0);
     64 
     65    /* predicate = (compute_dispatch_indirect_x_size == 0); */
     66    BEGIN_BATCH(1);
     67    OUT_BATCH(GEN7_MI_PREDICATE |
     68              MI_PREDICATE_LOADOP_LOAD |
     69              MI_PREDICATE_COMBINEOP_SET |
     70              MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
     71    ADVANCE_BATCH();
     72 
     73    /* Load compute_dispatch_indirect_y_size into SRC0 */
     74    brw_load_register_mem(brw, MI_PREDICATE_SRC0, bo, indirect_offset + 4);
     75 
     76    /* predicate |= (compute_dispatch_indirect_y_size == 0); */
     77    BEGIN_BATCH(1);
     78    OUT_BATCH(GEN7_MI_PREDICATE |
     79              MI_PREDICATE_LOADOP_LOAD |
     80              MI_PREDICATE_COMBINEOP_OR |
     81              MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
     82    ADVANCE_BATCH();
     83 
     84    /* Load compute_dispatch_indirect_z_size into SRC0 */
     85    brw_load_register_mem(brw, MI_PREDICATE_SRC0, bo, indirect_offset + 8);
     86 
     87    /* predicate |= (compute_dispatch_indirect_z_size == 0); */
     88    BEGIN_BATCH(1);
     89    OUT_BATCH(GEN7_MI_PREDICATE |
     90              MI_PREDICATE_LOADOP_LOAD |
     91              MI_PREDICATE_COMBINEOP_OR |
     92              MI_PREDICATE_COMPAREOP_SRCS_EQUAL);
     93    ADVANCE_BATCH();
     94 
     95    /* predicate = !predicate; */
     96    BEGIN_BATCH(1);
     97    OUT_BATCH(GEN7_MI_PREDICATE |
     98              MI_PREDICATE_LOADOP_LOADINV |
     99              MI_PREDICATE_COMBINEOP_OR |
    100              MI_PREDICATE_COMPAREOP_FALSE);
    101    ADVANCE_BATCH();
    102 }
    103 
    104 static void
    105 brw_emit_gpgpu_walker(struct brw_context *brw)
    106 {
    107    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    108    const struct brw_cs_prog_data *prog_data =
    109       brw_cs_prog_data(brw->cs.base.prog_data);
    110 
    111    const GLuint *num_groups = brw->compute.num_work_groups;
    112    uint32_t indirect_flag;
    113 
    114    if (brw->compute.num_work_groups_bo == NULL) {
    115       indirect_flag = 0;
    116    } else {
    117       indirect_flag =
    118          GEN7_GPGPU_INDIRECT_PARAMETER_ENABLE |
    119          (devinfo->gen == 7 ? GEN7_GPGPU_PREDICATE_ENABLE : 0);
    120       prepare_indirect_gpgpu_walker(brw);
    121    }
    122 
    123    const unsigned simd_size = prog_data->simd_size;
    124    unsigned group_size = prog_data->local_size[0] *
    125       prog_data->local_size[1] * prog_data->local_size[2];
    126    unsigned thread_width_max =
    127       (group_size + simd_size - 1) / simd_size;
    128 
    129    uint32_t right_mask = 0xffffffffu >> (32 - simd_size);
    130    const unsigned right_non_aligned = group_size & (simd_size - 1);
    131    if (right_non_aligned != 0)
    132       right_mask >>= (simd_size - right_non_aligned);
    133 
    134    uint32_t dwords = devinfo->gen < 8 ? 11 : 15;
    135    BEGIN_BATCH(dwords);
    136    OUT_BATCH(GPGPU_WALKER << 16 | (dwords - 2) | indirect_flag);
    137    OUT_BATCH(0);
    138    if (devinfo->gen >= 8) {
    139       OUT_BATCH(0);                     /* Indirect Data Length */
    140       OUT_BATCH(0);                     /* Indirect Data Start Address */
    141    }
    142    assert(thread_width_max <= brw->screen->devinfo.max_cs_threads);
    143    OUT_BATCH(SET_FIELD(simd_size / 16, GPGPU_WALKER_SIMD_SIZE) |
    144              SET_FIELD(thread_width_max - 1, GPGPU_WALKER_THREAD_WIDTH_MAX));
    145    OUT_BATCH(0);                        /* Thread Group ID Starting X */
    146    if (devinfo->gen >= 8)
    147       OUT_BATCH(0);                     /* MBZ */
    148    OUT_BATCH(num_groups[0]);            /* Thread Group ID X Dimension */
    149    OUT_BATCH(0);                        /* Thread Group ID Starting Y */
    150    if (devinfo->gen >= 8)
    151       OUT_BATCH(0);                     /* MBZ */
    152    OUT_BATCH(num_groups[1]);            /* Thread Group ID Y Dimension */
    153    OUT_BATCH(0);                        /* Thread Group ID Starting/Resume Z */
    154    OUT_BATCH(num_groups[2]);            /* Thread Group ID Z Dimension */
    155    OUT_BATCH(right_mask);               /* Right Execution Mask */
    156    OUT_BATCH(0xffffffff);               /* Bottom Execution Mask */
    157    ADVANCE_BATCH();
    158 
    159    BEGIN_BATCH(2);
    160    OUT_BATCH(MEDIA_STATE_FLUSH << 16 | (2 - 2));
    161    OUT_BATCH(0);
    162    ADVANCE_BATCH();
    163 }
    164 
    165 
    166 static void
    167 brw_dispatch_compute_common(struct gl_context *ctx)
    168 {
    169    struct brw_context *brw = brw_context(ctx);
    170    bool fail_next = false;
    171 
    172    if (!_mesa_check_conditional_render(ctx))
    173       return;
    174 
    175    if (ctx->NewState)
    176       _mesa_update_state(ctx);
    177 
    178    brw_validate_textures(brw);
    179 
    180    brw_predraw_resolve_inputs(brw, false, NULL);
    181 
    182    /* Flush the batch if the batch/state buffers are nearly full.  We can
    183     * grow them if needed, but this is not free, so we'd like to avoid it.
    184     */
    185    intel_batchbuffer_require_space(brw, 600, RENDER_RING);
    186    brw_require_statebuffer_space(brw, 2500);
    187    intel_batchbuffer_save_state(brw);
    188 
    189  retry:
    190    brw->batch.no_wrap = true;
    191    brw_upload_compute_state(brw);
    192 
    193    brw_emit_gpgpu_walker(brw);
    194 
    195    brw->batch.no_wrap = false;
    196 
    197    if (!brw_batch_has_aperture_space(brw, 0)) {
    198       if (!fail_next) {
    199          intel_batchbuffer_reset_to_saved(brw);
    200          intel_batchbuffer_flush(brw);
    201          fail_next = true;
    202          goto retry;
    203       } else {
    204          int ret = intel_batchbuffer_flush(brw);
    205          WARN_ONCE(ret == -ENOSPC,
    206                    "i965: Single compute shader dispatch "
    207                    "exceeded available aperture space\n");
    208       }
    209    }
    210 
    211    /* Now that we know we haven't run out of aperture space, we can safely
    212     * reset the dirty bits.
    213     */
    214    brw_compute_state_finished(brw);
    215 
    216    if (brw->always_flush_batch)
    217       intel_batchbuffer_flush(brw);
    218 
    219    brw_program_cache_check_size(brw);
    220 
    221    /* Note: since compute shaders can't write to framebuffers, there's no need
    222     * to call brw_postdraw_set_buffers_need_resolve().
    223     */
    224 }
    225 
    226 static void
    227 brw_dispatch_compute(struct gl_context *ctx, const GLuint *num_groups) {
    228    struct brw_context *brw = brw_context(ctx);
    229 
    230    brw->compute.num_work_groups_bo = NULL;
    231    brw->compute.num_work_groups = num_groups;
    232    ctx->NewDriverState |= BRW_NEW_CS_WORK_GROUPS;
    233 
    234    brw_dispatch_compute_common(ctx);
    235 }
    236 
    237 static void
    238 brw_dispatch_compute_indirect(struct gl_context *ctx, GLintptr indirect)
    239 {
    240    struct brw_context *brw = brw_context(ctx);
    241    static const GLuint indirect_group_counts[3] = { 0, 0, 0 };
    242    struct gl_buffer_object *indirect_buffer = ctx->DispatchIndirectBuffer;
    243    struct brw_bo *bo =
    244       intel_bufferobj_buffer(brw,
    245                              intel_buffer_object(indirect_buffer),
    246                              indirect, 3 * sizeof(GLuint), false);
    247 
    248    brw->compute.num_work_groups_bo = bo;
    249    brw->compute.num_work_groups_offset = indirect;
    250    brw->compute.num_work_groups = indirect_group_counts;
    251    ctx->NewDriverState |= BRW_NEW_CS_WORK_GROUPS;
    252 
    253    brw_dispatch_compute_common(ctx);
    254 }
    255 
    256 void
    257 brw_init_compute_functions(struct dd_function_table *functions)
    258 {
    259    functions->DispatchCompute = brw_dispatch_compute;
    260    functions->DispatchComputeIndirect = brw_dispatch_compute_indirect;
    261 }
    262