Home | History | Annotate | Download | only in i965
      1 /*
      2  Copyright (C) Intel Corp.  2006.  All Rights Reserved.
      3  Intel funded Tungsten Graphics to
      4  develop this 3D driver.
      5 
      6  Permission is hereby granted, free of charge, to any person obtaining
      7  a copy of this software and associated documentation files (the
      8  "Software"), to deal in the Software without restriction, including
      9  without limitation the rights to use, copy, modify, merge, publish,
     10  distribute, sublicense, and/or sell copies of the Software, and to
     11  permit persons to whom the Software is furnished to do so, subject to
     12  the following conditions:
     13 
     14  The above copyright notice and this permission notice (including the
     15  next paragraph) shall be included in all copies or substantial
     16  portions of the Software.
     17 
     18  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     19  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     20  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     21  IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     22  LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     23  OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     24  WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     25 
     26  **********************************************************************/
     27  /*
     28   * Authors:
     29   *   Keith Whitwell <keithw (at) vmware.com>
     30   */
     31 
     32 
     33 
     34 #include "intel_batchbuffer.h"
     35 #include "intel_fbo.h"
     36 #include "intel_mipmap_tree.h"
     37 
     38 #include "brw_context.h"
     39 #include "brw_state.h"
     40 #include "brw_defines.h"
     41 #include "compiler/brw_eu_defines.h"
     42 
     43 #include "main/framebuffer.h"
     44 #include "main/fbobject.h"
     45 #include "main/format_utils.h"
     46 #include "main/glformats.h"
     47 
     48 /**
     49  * Upload pointers to the per-stage state.
     50  *
     51  * The state pointers in this packet are all relative to the general state
     52  * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
     53  */
     54 static void
     55 upload_pipelined_state_pointers(struct brw_context *brw)
     56 {
     57    const struct gen_device_info *devinfo = &brw->screen->devinfo;
     58 
     59    if (devinfo->gen == 5) {
     60       /* Need to flush before changing clip max threads for errata. */
     61       BEGIN_BATCH(1);
     62       OUT_BATCH(MI_FLUSH);
     63       ADVANCE_BATCH();
     64    }
     65 
     66    BEGIN_BATCH(7);
     67    OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
     68    OUT_RELOC(brw->batch.state.bo, 0, brw->vs.base.state_offset);
     69    if (brw->ff_gs.prog_active)
     70       OUT_RELOC(brw->batch.state.bo, 0, brw->ff_gs.state_offset | 1);
     71    else
     72       OUT_BATCH(0);
     73    OUT_RELOC(brw->batch.state.bo, 0, brw->clip.state_offset | 1);
     74    OUT_RELOC(brw->batch.state.bo, 0, brw->sf.state_offset);
     75    OUT_RELOC(brw->batch.state.bo, 0, brw->wm.base.state_offset);
     76    OUT_RELOC(brw->batch.state.bo, 0, brw->cc.state_offset);
     77    ADVANCE_BATCH();
     78 
     79    brw->ctx.NewDriverState |= BRW_NEW_PSP;
     80 }
     81 
     82 static void
     83 upload_psp_urb_cbs(struct brw_context *brw)
     84 {
     85    upload_pipelined_state_pointers(brw);
     86    brw_upload_urb_fence(brw);
     87    brw_upload_cs_urb_state(brw);
     88 }
     89 
     90 const struct brw_tracked_state brw_psp_urb_cbs = {
     91    .dirty = {
     92       .mesa = 0,
     93       .brw = BRW_NEW_BATCH |
     94              BRW_NEW_BLORP |
     95              BRW_NEW_FF_GS_PROG_DATA |
     96              BRW_NEW_GEN4_UNIT_STATE |
     97              BRW_NEW_STATE_BASE_ADDRESS |
     98              BRW_NEW_URB_FENCE,
     99    },
    100    .emit = upload_psp_urb_cbs,
    101 };
    102 
    103 uint32_t
    104 brw_depthbuffer_format(struct brw_context *brw)
    105 {
    106    struct gl_context *ctx = &brw->ctx;
    107    struct gl_framebuffer *fb = ctx->DrawBuffer;
    108    struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
    109    struct intel_renderbuffer *srb;
    110 
    111    if (!drb &&
    112        (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
    113        !srb->mt->stencil_mt &&
    114        (intel_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
    115 	intel_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
    116       drb = srb;
    117    }
    118 
    119    if (!drb)
    120       return BRW_DEPTHFORMAT_D32_FLOAT;
    121 
    122    return brw_depth_format(brw, drb->mt->format);
    123 }
    124 
    125 static struct intel_mipmap_tree *
    126 get_stencil_miptree(struct intel_renderbuffer *irb)
    127 {
    128    if (!irb)
    129       return NULL;
    130    if (irb->mt->stencil_mt)
    131       return irb->mt->stencil_mt;
    132    return intel_renderbuffer_get_mt(irb);
    133 }
    134 
    135 static bool
    136 rebase_depth_stencil(struct brw_context *brw, struct intel_renderbuffer *irb,
    137                      bool invalidate)
    138 {
    139    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    140    struct gl_context *ctx = &brw->ctx;
    141    uint32_t tile_mask_x = 0, tile_mask_y = 0;
    142 
    143    intel_get_tile_masks(irb->mt->surf.tiling, irb->mt->cpp,
    144                         &tile_mask_x, &tile_mask_y);
    145    assert(!intel_miptree_level_has_hiz(irb->mt, irb->mt_level));
    146 
    147    uint32_t tile_x = irb->draw_x & tile_mask_x;
    148    uint32_t tile_y = irb->draw_y & tile_mask_y;
    149 
    150    /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
    151     * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
    152     * Coordinate Offset X/Y":
    153     *
    154     *   "The 3 LSBs of both offsets must be zero to ensure correct
    155     *   alignment"
    156     */
    157    bool rebase = tile_x & 7 || tile_y & 7;
    158 
    159    /* We didn't even have intra-tile offsets before g45. */
    160    rebase |= (!devinfo->has_surface_tile_offset && (tile_x || tile_y));
    161 
    162    if (rebase) {
    163       perf_debug("HW workaround: blitting depth level %d to a temporary "
    164                  "to fix alignment (depth tile offset %d,%d)\n",
    165                  irb->mt_level, tile_x, tile_y);
    166       intel_renderbuffer_move_to_temp(brw, irb, invalidate);
    167 
    168       /* There is now only single slice miptree. */
    169       brw->depthstencil.tile_x = 0;
    170       brw->depthstencil.tile_y = 0;
    171       brw->depthstencil.depth_offset = 0;
    172       return true;
    173    }
    174 
    175    /* While we just tried to get everything aligned, we may have failed to do
    176     * so in the case of rendering to array or 3D textures, where nonzero faces
    177     * will still have an offset post-rebase.  At least give an informative
    178     * warning.
    179     */
    180    WARN_ONCE((tile_x & 7) || (tile_y & 7),
    181              "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
    182              "Truncating offset (%u:%u), bad rendering may occur.\n",
    183              tile_x, tile_y);
    184    tile_x &= ~7;
    185    tile_y &= ~7;
    186 
    187    brw->depthstencil.tile_x = tile_x;
    188    brw->depthstencil.tile_y = tile_y;
    189    brw->depthstencil.depth_offset = intel_miptree_get_aligned_offset(
    190                                        irb->mt,
    191                                        irb->draw_x & ~tile_mask_x,
    192                                        irb->draw_y & ~tile_mask_y);
    193 
    194    return false;
    195 }
    196 
    197 void
    198 brw_workaround_depthstencil_alignment(struct brw_context *brw,
    199                                       GLbitfield clear_mask)
    200 {
    201    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    202    struct gl_context *ctx = &brw->ctx;
    203    struct gl_framebuffer *fb = ctx->DrawBuffer;
    204    struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
    205    struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
    206    struct intel_mipmap_tree *depth_mt = NULL;
    207    bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
    208    bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
    209 
    210    if (depth_irb)
    211       depth_mt = depth_irb->mt;
    212 
    213    /* Initialize brw->depthstencil to 'nop' workaround state.
    214     */
    215    brw->depthstencil.tile_x = 0;
    216    brw->depthstencil.tile_y = 0;
    217    brw->depthstencil.depth_offset = 0;
    218 
    219    /* Gen6+ doesn't require the workarounds, since we always program the
    220     * surface state at the start of the whole surface.
    221     */
    222    if (devinfo->gen >= 6)
    223       return;
    224 
    225    /* Check if depth buffer is in depth/stencil format.  If so, then it's only
    226     * safe to invalidate it if we're also clearing stencil.
    227     */
    228    if (depth_irb && invalidate_depth &&
    229       _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL)
    230       invalidate_depth = invalidate_stencil && stencil_irb;
    231 
    232    if (depth_irb) {
    233       if (rebase_depth_stencil(brw, depth_irb, invalidate_depth)) {
    234          /* In the case of stencil_irb being the same packed depth/stencil
    235           * texture but not the same rb, make it point at our rebased mt, too.
    236           */
    237          if (stencil_irb &&
    238              stencil_irb != depth_irb &&
    239              stencil_irb->mt == depth_mt) {
    240             intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
    241             intel_renderbuffer_set_draw_offset(stencil_irb);
    242          }
    243       }
    244 
    245       if (stencil_irb) {
    246          assert(stencil_irb->mt == depth_irb->mt);
    247          assert(stencil_irb->mt_level == depth_irb->mt_level);
    248          assert(stencil_irb->mt_layer == depth_irb->mt_layer);
    249       }
    250    }
    251 
    252    /* If there is no depth attachment, consider if stencil needs rebase. */
    253    if (!depth_irb && stencil_irb)
    254        rebase_depth_stencil(brw, stencil_irb, invalidate_stencil);
    255 }
    256 
    257 void
    258 brw_emit_depthbuffer(struct brw_context *brw)
    259 {
    260    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    261    struct gl_context *ctx = &brw->ctx;
    262    struct gl_framebuffer *fb = ctx->DrawBuffer;
    263    /* _NEW_BUFFERS */
    264    struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
    265    struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
    266    struct intel_mipmap_tree *depth_mt = intel_renderbuffer_get_mt(depth_irb);
    267    struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
    268    uint32_t tile_x = brw->depthstencil.tile_x;
    269    uint32_t tile_y = brw->depthstencil.tile_y;
    270    bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
    271    bool separate_stencil = false;
    272    uint32_t depth_surface_type = BRW_SURFACE_NULL;
    273    uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
    274    uint32_t depth_offset = 0;
    275    uint32_t width = 1, height = 1;
    276 
    277    if (stencil_mt) {
    278       separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
    279 
    280       /* Gen7 supports only separate stencil */
    281       assert(separate_stencil || devinfo->gen < 7);
    282    }
    283 
    284    /* If there's a packed depth/stencil bound to stencil only, we need to
    285     * emit the packed depth/stencil buffer packet.
    286     */
    287    if (!depth_irb && stencil_irb && !separate_stencil) {
    288       depth_irb = stencil_irb;
    289       depth_mt = stencil_mt;
    290    }
    291 
    292    if (depth_irb && depth_mt) {
    293       /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
    294        * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
    295        * depthstencil format.
    296        *
    297        * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
    298        * set to the same value. Gens after 7 implicitly always set
    299        * Separate_Stencil_Enable; software cannot disable it.
    300        */
    301       if ((devinfo->gen < 7 && hiz) || devinfo->gen >= 7) {
    302          assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
    303       }
    304 
    305       /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
    306       assert(devinfo->gen >= 7 || !separate_stencil || hiz);
    307 
    308       assert(devinfo->gen < 6 || depth_mt->surf.tiling == ISL_TILING_Y0);
    309       assert(!hiz || depth_mt->surf.tiling == ISL_TILING_Y0);
    310 
    311       depthbuffer_format = brw_depthbuffer_format(brw);
    312       depth_surface_type = BRW_SURFACE_2D;
    313       depth_offset = brw->depthstencil.depth_offset;
    314       width = depth_irb->Base.Base.Width;
    315       height = depth_irb->Base.Base.Height;
    316    } else if (separate_stencil) {
    317       /*
    318        * There exists a separate stencil buffer but no depth buffer.
    319        *
    320        * The stencil buffer inherits most of its fields from
    321        * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
    322        * height.
    323        *
    324        * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
    325        * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
    326        *     [DevGT+]: This field must be set to TRUE.
    327        */
    328       assert(brw->has_separate_stencil);
    329 
    330       depth_surface_type = BRW_SURFACE_2D;
    331       width = stencil_irb->Base.Base.Width;
    332       height = stencil_irb->Base.Base.Height;
    333    }
    334 
    335    if (depth_mt)
    336       brw_cache_flush_for_depth(brw, depth_mt->bo);
    337    if (stencil_mt)
    338       brw_cache_flush_for_depth(brw, stencil_mt->bo);
    339 
    340    brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
    341                                     depthbuffer_format, depth_surface_type,
    342                                     stencil_mt, hiz, separate_stencil,
    343                                     width, height, tile_x, tile_y);
    344 }
    345 
    346 uint32_t
    347 brw_convert_depth_value(mesa_format format, float value)
    348 {
    349    switch (format) {
    350    case MESA_FORMAT_Z_FLOAT32:
    351       return float_as_int(value);
    352    case MESA_FORMAT_Z_UNORM16:
    353       return value * ((1u << 16) - 1);
    354    case MESA_FORMAT_Z24_UNORM_X8_UINT:
    355       return value * ((1u << 24) - 1);
    356    default:
    357       unreachable("Invalid depth format");
    358    }
    359 }
    360 
    361 void
    362 brw_emit_depth_stencil_hiz(struct brw_context *brw,
    363                            struct intel_mipmap_tree *depth_mt,
    364                            uint32_t depth_offset, uint32_t depthbuffer_format,
    365                            uint32_t depth_surface_type,
    366                            struct intel_mipmap_tree *stencil_mt,
    367                            bool hiz, bool separate_stencil,
    368                            uint32_t width, uint32_t height,
    369                            uint32_t tile_x, uint32_t tile_y)
    370 {
    371    (void)hiz;
    372    (void)separate_stencil;
    373    (void)stencil_mt;
    374 
    375    assert(!hiz);
    376    assert(!separate_stencil);
    377 
    378    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    379    const unsigned len = (devinfo->is_g4x || devinfo->gen == 5) ? 6 : 5;
    380 
    381    BEGIN_BATCH(len);
    382    OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
    383    OUT_BATCH((depth_mt ? depth_mt->surf.row_pitch - 1 : 0) |
    384              (depthbuffer_format << 18) |
    385              (BRW_TILEWALK_YMAJOR << 26) |
    386              (1 << 27) |
    387              (depth_surface_type << 29));
    388 
    389    if (depth_mt) {
    390       OUT_RELOC(depth_mt->bo, RELOC_WRITE, depth_offset);
    391    } else {
    392       OUT_BATCH(0);
    393    }
    394 
    395    OUT_BATCH(((width + tile_x - 1) << 6) |
    396              ((height + tile_y - 1) << 19));
    397    OUT_BATCH(0);
    398 
    399    if (devinfo->is_g4x || devinfo->gen >= 5)
    400       OUT_BATCH(tile_x | (tile_y << 16));
    401    else
    402       assert(tile_x == 0 && tile_y == 0);
    403 
    404    if (devinfo->gen >= 6)
    405       OUT_BATCH(0);
    406 
    407    ADVANCE_BATCH();
    408 }
    409 
    410 const struct brw_tracked_state brw_depthbuffer = {
    411    .dirty = {
    412       .mesa = _NEW_BUFFERS,
    413       .brw = BRW_NEW_BATCH |
    414              BRW_NEW_BLORP,
    415    },
    416    .emit = brw_emit_depthbuffer,
    417 };
    418 
    419 void
    420 brw_emit_select_pipeline(struct brw_context *brw, enum brw_pipeline pipeline)
    421 {
    422    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    423    const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
    424    const uint32_t _3DSTATE_PIPELINE_SELECT =
    425       is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
    426 
    427    if (devinfo->gen >= 8 && devinfo->gen < 10) {
    428       /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
    429        *
    430        *   Software must clear the COLOR_CALC_STATE Valid field in
    431        *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
    432        *   with Pipeline Select set to GPGPU.
    433        *
    434        * The internal hardware docs recommend the same workaround for Gen9
    435        * hardware too.
    436        */
    437       if (pipeline == BRW_COMPUTE_PIPELINE) {
    438          BEGIN_BATCH(2);
    439          OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
    440          OUT_BATCH(0);
    441          ADVANCE_BATCH();
    442 
    443          brw->ctx.NewDriverState |= BRW_NEW_CC_STATE;
    444       }
    445    }
    446 
    447    if (devinfo->gen >= 6) {
    448       /* From "BXML  GT  MI  vol1a GPU Overview  [Instruction]
    449        * PIPELINE_SELECT [DevBWR+]":
    450        *
    451        *   Project: DEVSNB+
    452        *
    453        *   Software must ensure all the write caches are flushed through a
    454        *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
    455        *   command to invalidate read only caches prior to programming
    456        *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
    457        */
    458       const unsigned dc_flush =
    459          devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
    460 
    461       brw_emit_pipe_control_flush(brw,
    462                                   PIPE_CONTROL_RENDER_TARGET_FLUSH |
    463                                   PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    464                                   dc_flush |
    465                                   PIPE_CONTROL_NO_WRITE |
    466                                   PIPE_CONTROL_CS_STALL);
    467 
    468       brw_emit_pipe_control_flush(brw,
    469                                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
    470                                   PIPE_CONTROL_CONST_CACHE_INVALIDATE |
    471                                   PIPE_CONTROL_STATE_CACHE_INVALIDATE |
    472                                   PIPE_CONTROL_INSTRUCTION_INVALIDATE |
    473                                   PIPE_CONTROL_NO_WRITE);
    474 
    475    } else {
    476       /* From "BXML  GT  MI  vol1a GPU Overview  [Instruction]
    477        * PIPELINE_SELECT [DevBWR+]":
    478        *
    479        *   Project: PRE-DEVSNB
    480        *
    481        *   Software must ensure the current pipeline is flushed via an
    482        *   MI_FLUSH or PIPE_CONTROL prior to the execution of PIPELINE_SELECT.
    483        */
    484       BEGIN_BATCH(1);
    485       OUT_BATCH(MI_FLUSH);
    486       ADVANCE_BATCH();
    487    }
    488 
    489    /* Select the pipeline */
    490    BEGIN_BATCH(1);
    491    OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 |
    492              (devinfo->gen >= 9 ? (3 << 8) : 0) |
    493              (pipeline == BRW_COMPUTE_PIPELINE ? 2 : 0));
    494    ADVANCE_BATCH();
    495 
    496    if (devinfo->gen == 7 && !devinfo->is_haswell &&
    497        pipeline == BRW_RENDER_PIPELINE) {
    498       /* From "BXML  GT  MI  vol1a GPU Overview  [Instruction]
    499        * PIPELINE_SELECT [DevBWR+]":
    500        *
    501        *   Project: DEVIVB, DEVHSW:GT3:A0
    502        *
    503        *   Software must send a pipe_control with a CS stall and a post sync
    504        *   operation and then a dummy DRAW after every MI_SET_CONTEXT and
    505        *   after any PIPELINE_SELECT that is enabling 3D mode.
    506        */
    507       gen7_emit_cs_stall_flush(brw);
    508 
    509       BEGIN_BATCH(7);
    510       OUT_BATCH(CMD_3D_PRIM << 16 | (7 - 2));
    511       OUT_BATCH(_3DPRIM_POINTLIST);
    512       OUT_BATCH(0);
    513       OUT_BATCH(0);
    514       OUT_BATCH(0);
    515       OUT_BATCH(0);
    516       OUT_BATCH(0);
    517       ADVANCE_BATCH();
    518    }
    519 
    520    if (devinfo->is_geminilake) {
    521       /* Project: DevGLK
    522        *
    523        * "This chicken bit works around a hardware issue with barrier logic
    524        *  encountered when switching between GPGPU and 3D pipelines.  To
    525        *  workaround the issue, this mode bit should be set after a pipeline
    526        *  is selected."
    527        */
    528       const unsigned barrier_mode =
    529          pipeline == BRW_RENDER_PIPELINE ? GLK_SCEC_BARRIER_MODE_3D_HULL
    530                                          : GLK_SCEC_BARRIER_MODE_GPGPU;
    531       brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
    532                               barrier_mode | GLK_SCEC_BARRIER_MODE_MASK);
    533    }
    534 }
    535 
    536 /**
    537  * Misc invariant state packets
    538  */
    539 void
    540 brw_upload_invariant_state(struct brw_context *brw)
    541 {
    542    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    543    const bool is_965 = devinfo->gen == 4 && !devinfo->is_g4x;
    544 
    545    brw_emit_select_pipeline(brw, BRW_RENDER_PIPELINE);
    546    brw->last_pipeline = BRW_RENDER_PIPELINE;
    547 
    548    if (devinfo->gen >= 8) {
    549       BEGIN_BATCH(3);
    550       OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
    551       OUT_BATCH(0);
    552       OUT_BATCH(0);
    553       ADVANCE_BATCH();
    554    } else {
    555       BEGIN_BATCH(2);
    556       OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
    557       OUT_BATCH(0);
    558       ADVANCE_BATCH();
    559    }
    560 
    561    /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
    562    if (!is_965) {
    563       BEGIN_BATCH(3);
    564       OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
    565       /* use legacy aa line coverage computation */
    566       OUT_BATCH(0);
    567       OUT_BATCH(0);
    568       ADVANCE_BATCH();
    569    }
    570 
    571    const uint32_t _3DSTATE_VF_STATISTICS =
    572       is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
    573    BEGIN_BATCH(1);
    574    OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 | 1);
    575    ADVANCE_BATCH();
    576 }
    577 
    578 /**
    579  * Define the base addresses which some state is referenced from.
    580  *
    581  * This allows us to avoid having to emit relocations for the objects,
    582  * and is actually required for binding table pointers on gen6.
    583  *
    584  * Surface state base address covers binding table pointers and
    585  * surface state objects, but not the surfaces that the surface state
    586  * objects point to.
    587  */
    588 void
    589 brw_upload_state_base_address(struct brw_context *brw)
    590 {
    591    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    592 
    593    if (brw->batch.state_base_address_emitted)
    594       return;
    595 
    596    /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
    597     * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
    598     * programmed prior to STATE_BASE_ADDRESS.
    599     *
    600     * However, given that the instruction SBA (general state base
    601     * address) on this chipset is always set to 0 across X and GL,
    602     * maybe this isn't required for us in particular.
    603     */
    604 
    605    if (devinfo->gen >= 6) {
    606       const unsigned dc_flush =
    607          devinfo->gen >= 7 ? PIPE_CONTROL_DATA_CACHE_FLUSH : 0;
    608 
    609       /* Emit a render target cache flush.
    610        *
    611        * This isn't documented anywhere in the PRM.  However, it seems to be
    612        * necessary prior to changing the surface state base adress.  We've
    613        * seen issues in Vulkan where we get GPU hangs when using multi-level
    614        * command buffers which clear depth, reset state base address, and then
    615        * go render stuff.
    616        *
    617        * Normally, in GL, we would trust the kernel to do sufficient stalls
    618        * and flushes prior to executing our batch.  However, it doesn't seem
    619        * as if the kernel's flushing is always sufficient and we don't want to
    620        * rely on it.
    621        *
    622        * We make this an end-of-pipe sync instead of a normal flush because we
    623        * do not know the current status of the GPU.  On Haswell at least,
    624        * having a fast-clear operation in flight at the same time as a normal
    625        * rendering operation can cause hangs.  Since the kernel's flushing is
    626        * insufficient, we need to ensure that any rendering operations from
    627        * other processes are definitely complete before we try to do our own
    628        * rendering.  It's a bit of a big hammer but it appears to work.
    629        */
    630       brw_emit_end_of_pipe_sync(brw,
    631                                 PIPE_CONTROL_RENDER_TARGET_FLUSH |
    632                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    633                                 dc_flush);
    634    }
    635 
    636    if (devinfo->gen >= 8) {
    637       uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
    638       int pkt_len = devinfo->gen >= 9 ? 19 : 16;
    639 
    640       BEGIN_BATCH(pkt_len);
    641       OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (pkt_len - 2));
    642       /* General state base address: stateless DP read/write requests */
    643       OUT_BATCH(mocs_wb << 4 | 1);
    644       OUT_BATCH(0);
    645       OUT_BATCH(mocs_wb << 16);
    646       /* Surface state base address: */
    647       OUT_RELOC64(brw->batch.state.bo, 0, mocs_wb << 4 | 1);
    648       /* Dynamic state base address: */
    649       OUT_RELOC64(brw->batch.state.bo, 0, mocs_wb << 4 | 1);
    650       /* Indirect object base address: MEDIA_OBJECT data */
    651       OUT_BATCH(mocs_wb << 4 | 1);
    652       OUT_BATCH(0);
    653       /* Instruction base address: shader kernels (incl. SIP) */
    654       OUT_RELOC64(brw->cache.bo, 0, mocs_wb << 4 | 1);
    655 
    656       /* General state buffer size */
    657       OUT_BATCH(0xfffff001);
    658       /* Dynamic state buffer size */
    659       OUT_BATCH(ALIGN(MAX_STATE_SIZE, 4096) | 1);
    660       /* Indirect object upper bound */
    661       OUT_BATCH(0xfffff001);
    662       /* Instruction access upper bound */
    663       OUT_BATCH(ALIGN(brw->cache.bo->size, 4096) | 1);
    664       if (devinfo->gen >= 9) {
    665          OUT_BATCH(1);
    666          OUT_BATCH(0);
    667          OUT_BATCH(0);
    668       }
    669       ADVANCE_BATCH();
    670    } else if (devinfo->gen >= 6) {
    671       uint8_t mocs = devinfo->gen == 7 ? GEN7_MOCS_L3 : 0;
    672 
    673        BEGIN_BATCH(10);
    674        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
    675        OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
    676                  mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
    677                  1); /* General State Base Address Modify Enable */
    678        /* Surface state base address:
    679 	* BINDING_TABLE_STATE
    680 	* SURFACE_STATE
    681 	*/
    682        OUT_RELOC(brw->batch.state.bo, 0, 1);
    683         /* Dynamic state base address:
    684 	 * SAMPLER_STATE
    685 	 * SAMPLER_BORDER_COLOR_STATE
    686 	 * CLIP, SF, WM/CC viewport state
    687 	 * COLOR_CALC_STATE
    688 	 * DEPTH_STENCIL_STATE
    689 	 * BLEND_STATE
    690 	 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
    691 	 * Disable is clear, which we rely on)
    692 	 */
    693        OUT_RELOC(brw->batch.state.bo, 0, 1);
    694 
    695        OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
    696 
    697        /* Instruction base address: shader kernels (incl. SIP) */
    698        OUT_RELOC(brw->cache.bo, 0, 1);
    699 
    700        OUT_BATCH(1); /* General state upper bound */
    701        /* Dynamic state upper bound.  Although the documentation says that
    702 	* programming it to zero will cause it to be ignored, that is a lie.
    703 	* If this isn't programmed to a real bound, the sampler border color
    704 	* pointer is rejected, causing border color to mysteriously fail.
    705 	*/
    706        OUT_BATCH(0xfffff001);
    707        OUT_BATCH(1); /* Indirect object upper bound */
    708        OUT_BATCH(1); /* Instruction access upper bound */
    709        ADVANCE_BATCH();
    710    } else if (devinfo->gen == 5) {
    711        BEGIN_BATCH(8);
    712        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
    713        OUT_BATCH(1); /* General state base address */
    714        OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
    715        OUT_BATCH(1); /* Indirect object base address */
    716        OUT_RELOC(brw->cache.bo, 0, 1); /* Instruction base address */
    717        OUT_BATCH(0xfffff001); /* General state upper bound */
    718        OUT_BATCH(1); /* Indirect object upper bound */
    719        OUT_BATCH(1); /* Instruction access upper bound */
    720        ADVANCE_BATCH();
    721    } else {
    722        BEGIN_BATCH(6);
    723        OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
    724        OUT_BATCH(1); /* General state base address */
    725        OUT_RELOC(brw->batch.state.bo, 0, 1); /* Surface state base address */
    726        OUT_BATCH(1); /* Indirect object base address */
    727        OUT_BATCH(1); /* General state upper bound */
    728        OUT_BATCH(1); /* Indirect object upper bound */
    729        ADVANCE_BATCH();
    730    }
    731 
    732    if (devinfo->gen >= 6) {
    733       brw_emit_pipe_control_flush(brw,
    734                                   PIPE_CONTROL_INSTRUCTION_INVALIDATE |
    735                                   PIPE_CONTROL_STATE_CACHE_INVALIDATE |
    736                                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
    737    }
    738 
    739    /* According to section 3.6.1 of VOL1 of the 965 PRM,
    740     * STATE_BASE_ADDRESS updates require a reissue of:
    741     *
    742     * 3DSTATE_PIPELINE_POINTERS
    743     * 3DSTATE_BINDING_TABLE_POINTERS
    744     * MEDIA_STATE_POINTERS
    745     *
    746     * and this continues through Ironlake.  The Sandy Bridge PRM, vol
    747     * 1 part 1 says that the folowing packets must be reissued:
    748     *
    749     * 3DSTATE_CC_POINTERS
    750     * 3DSTATE_BINDING_TABLE_POINTERS
    751     * 3DSTATE_SAMPLER_STATE_POINTERS
    752     * 3DSTATE_VIEWPORT_STATE_POINTERS
    753     * MEDIA_STATE_POINTERS
    754     *
    755     * Those are always reissued following SBA updates anyway (new
    756     * batch time), except in the case of the program cache BO
    757     * changing.  Having a separate state flag makes the sequence more
    758     * obvious.
    759     */
    760 
    761    brw->ctx.NewDriverState |= BRW_NEW_STATE_BASE_ADDRESS;
    762    brw->batch.state_base_address_emitted = true;
    763 }
    764