Home | History | Annotate | Download | only in i965
      1 /*
      2  * Copyright  2011 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #include "intel_batchbuffer.h"
     25 #include "intel_mipmap_tree.h"
     26 #include "intel_fbo.h"
     27 #include "intel_resolve_map.h"
     28 #include "brw_context.h"
     29 #include "brw_state.h"
     30 #include "brw_defines.h"
     31 #include "brw_wm.h"
     32 #include "main/framebuffer.h"
     33 
     34 /**
     35  * Helper function to emit depth related command packets.
     36  */
     37 static void
     38 emit_depth_packets(struct brw_context *brw,
     39                    struct intel_mipmap_tree *depth_mt,
     40                    uint32_t depthbuffer_format,
     41                    uint32_t depth_surface_type,
     42                    bool depth_writable,
     43                    struct intel_mipmap_tree *stencil_mt,
     44                    bool stencil_writable,
     45                    bool hiz,
     46                    uint32_t width,
     47                    uint32_t height,
     48                    uint32_t depth,
     49                    uint32_t lod,
     50                    uint32_t min_array_element)
     51 {
     52    uint32_t mocs_wb = brw->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
     53 
     54    /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
     55    if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
     56       assert(brw->hw_ctx);
     57       return;
     58    }
     59 
     60    brw_emit_depth_stall_flushes(brw);
     61 
     62    /* _NEW_BUFFERS, _NEW_DEPTH, _NEW_STENCIL */
     63    BEGIN_BATCH(8);
     64    OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (8 - 2));
     65    OUT_BATCH(depth_surface_type << 29 |
     66              (depth_writable ? (1 << 28) : 0) |
     67              (stencil_mt != NULL && stencil_writable) << 27 |
     68              (hiz ? 1 : 0) << 22 |
     69              depthbuffer_format << 18 |
     70              (depth_mt ? depth_mt->pitch - 1 : 0));
     71    if (depth_mt) {
     72       OUT_RELOC64(depth_mt->bo,
     73                   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
     74    } else {
     75       OUT_BATCH(0);
     76       OUT_BATCH(0);
     77    }
     78    OUT_BATCH(((width - 1) << 4) | ((height - 1) << 18) | lod);
     79    OUT_BATCH(((depth - 1) << 21) | (min_array_element << 10) | mocs_wb);
     80    OUT_BATCH(0);
     81    OUT_BATCH(((depth - 1) << 21) | (depth_mt ? depth_mt->qpitch >> 2 : 0));
     82    ADVANCE_BATCH();
     83 
     84    if (!hiz) {
     85       BEGIN_BATCH(5);
     86       OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
     87       OUT_BATCH(0);
     88       OUT_BATCH(0);
     89       OUT_BATCH(0);
     90       OUT_BATCH(0);
     91       ADVANCE_BATCH();
     92    } else {
     93       assert(depth_mt);
     94       BEGIN_BATCH(5);
     95       OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
     96       OUT_BATCH((depth_mt->hiz_buf->aux_base.pitch - 1) | mocs_wb << 25);
     97       OUT_RELOC64(depth_mt->hiz_buf->aux_base.bo,
     98                   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
     99       OUT_BATCH(depth_mt->hiz_buf->aux_base.qpitch >> 2);
    100       ADVANCE_BATCH();
    101    }
    102 
    103    if (stencil_mt == NULL) {
    104       BEGIN_BATCH(5);
    105       OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
    106       OUT_BATCH(0);
    107       OUT_BATCH(0);
    108       OUT_BATCH(0);
    109       OUT_BATCH(0);
    110       ADVANCE_BATCH();
    111    } else {
    112       BEGIN_BATCH(5);
    113       OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
    114       /* The stencil buffer has quirky pitch requirements.  From the Graphics
    115        * BSpec: vol2a.11 3D Pipeline Windower > Early Depth/Stencil Processing
    116        * > Depth/Stencil Buffer State > 3DSTATE_STENCIL_BUFFER [DevIVB+],
    117        * field "Surface Pitch":
    118        *
    119        *    The pitch must be set to 2x the value computed based on width, as
    120        *    the stencil buffer is stored with two rows interleaved.
    121        *
    122        * (Note that it is not 100% clear whether this intended to apply to
    123        * Gen7; the BSpec flags this comment as "DevILK,DevSNB" (which would
    124        * imply that it doesn't), however the comment appears on a "DevIVB+"
    125        * page (which would imply that it does).  Experiments with the hardware
    126        * indicate that it does.
    127        */
    128       OUT_BATCH(HSW_STENCIL_ENABLED | mocs_wb << 22 |
    129                 (2 * stencil_mt->pitch - 1));
    130       OUT_RELOC64(stencil_mt->bo,
    131                   I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER, 0);
    132       OUT_BATCH(stencil_mt ? stencil_mt->qpitch >> 2 : 0);
    133       ADVANCE_BATCH();
    134    }
    135 
    136    BEGIN_BATCH(3);
    137    OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
    138    OUT_BATCH(depth_mt ? depth_mt->depth_clear_value : 0);
    139    OUT_BATCH(1);
    140    ADVANCE_BATCH();
    141 
    142    brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
    143 }
    144 
    145 /* Awful vtable-compatible function; should be cleaned up in the future. */
    146 void
    147 gen8_emit_depth_stencil_hiz(struct brw_context *brw,
    148                             struct intel_mipmap_tree *depth_mt,
    149                             uint32_t depth_offset,
    150                             uint32_t depthbuffer_format,
    151                             uint32_t depth_surface_type,
    152                             struct intel_mipmap_tree *stencil_mt,
    153                             bool hiz, bool separate_stencil,
    154                             uint32_t width, uint32_t height,
    155                             uint32_t tile_x, uint32_t tile_y)
    156 {
    157    struct gl_context *ctx = &brw->ctx;
    158    struct gl_framebuffer *fb = ctx->DrawBuffer;
    159    uint32_t surftype;
    160    unsigned int depth = 1;
    161    unsigned int min_array_element;
    162    GLenum gl_target = GL_TEXTURE_2D;
    163    unsigned int lod;
    164    const struct intel_mipmap_tree *mt = depth_mt ? depth_mt : stencil_mt;
    165    const struct intel_renderbuffer *irb = NULL;
    166    const struct gl_renderbuffer *rb = NULL;
    167 
    168    irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
    169    if (!irb)
    170       irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
    171    rb = (struct gl_renderbuffer *) irb;
    172 
    173    if (rb) {
    174       depth = MAX2(irb->layer_count, 1);
    175       if (rb->TexImage)
    176          gl_target = rb->TexImage->TexObject->Target;
    177    }
    178 
    179    switch (gl_target) {
    180    case GL_TEXTURE_CUBE_MAP_ARRAY:
    181    case GL_TEXTURE_CUBE_MAP:
    182       /* The PRM claims that we should use BRW_SURFACE_CUBE for this
    183        * situation, but experiments show that gl_Layer doesn't work when we do
    184        * this.  So we use BRW_SURFACE_2D, since for rendering purposes this is
    185        * equivalent.
    186        */
    187       surftype = BRW_SURFACE_2D;
    188       depth *= 6;
    189       break;
    190    case GL_TEXTURE_3D:
    191       assert(mt);
    192       depth = MAX2(mt->logical_depth0, 1);
    193       surftype = translate_tex_target(gl_target);
    194       break;
    195    case GL_TEXTURE_1D_ARRAY:
    196    case GL_TEXTURE_1D:
    197       if (brw->gen >= 9) {
    198          /* WaDisable1DDepthStencil. Skylake+ doesn't support 1D depth
    199           * textures but it does allow pretending it's a 2D texture
    200           * instead.
    201           */
    202          surftype = BRW_SURFACE_2D;
    203          break;
    204       }
    205       /* fallthrough */
    206    default:
    207       surftype = translate_tex_target(gl_target);
    208       break;
    209    }
    210 
    211    min_array_element = irb ? irb->mt_layer : 0;
    212 
    213    lod = irb ? irb->mt_level - irb->mt->first_level : 0;
    214 
    215    if (mt) {
    216       width = mt->logical_width0;
    217       height = mt->logical_height0;
    218    }
    219 
    220    emit_depth_packets(brw, depth_mt, brw_depthbuffer_format(brw), surftype,
    221                       brw_depth_writes_enabled(brw),
    222                       stencil_mt, ctx->Stencil._WriteEnabled,
    223                       hiz, width, height, depth, lod, min_array_element);
    224 }
    225 
    226 /**
    227  * Should we set the PMA FIX ENABLE bit?
    228  *
    229  * To avoid unnecessary depth related stalls, we need to set this bit.
    230  * However, there is a very complicated formula which governs when it
    231  * is legal to do so.  This function computes that.
    232  *
    233  * See the documenation for the CACHE_MODE_1 register, bit 11.
    234  */
    235 static bool
    236 pma_fix_enable(const struct brw_context *brw)
    237 {
    238    const struct gl_context *ctx = &brw->ctx;
    239    /* BRW_NEW_FS_PROG_DATA */
    240    const struct brw_wm_prog_data *wm_prog_data =
    241       brw_wm_prog_data(brw->wm.base.prog_data);
    242    /* _NEW_BUFFERS */
    243    struct intel_renderbuffer *depth_irb =
    244       intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
    245 
    246    /* 3DSTATE_WM::ForceThreadDispatch is never used. */
    247    const bool wm_force_thread_dispatch = false;
    248 
    249    /* 3DSTATE_RASTER::ForceSampleCount is never used. */
    250    const bool raster_force_sample_count_nonzero = false;
    251 
    252    /* _NEW_BUFFERS:
    253     * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
    254     * 3DSTATE_DEPTH_BUFFER::HIZ Enable
    255     */
    256    const bool hiz_enabled = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
    257 
    258    /* 3DSTATE_WM::Early Depth/Stencil Control != EDSC_PREPS (2). */
    259    const bool edsc_not_preps = !wm_prog_data->early_fragment_tests;
    260 
    261    /* 3DSTATE_PS_EXTRA::PixelShaderValid is always true. */
    262    const bool pixel_shader_valid = true;
    263 
    264    /* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
    265     *   3DSTATE_WM_HZ_OP::DepthBufferResolve ||
    266     *   3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
    267     *   3DSTATE_WM_HZ_OP::StencilBufferClear)
    268     *
    269     * HiZ operations are done outside of the normal state upload, so they're
    270     * definitely not happening now.
    271     */
    272    const bool in_hiz_op = false;
    273 
    274    /* _NEW_DEPTH:
    275     * DEPTH_STENCIL_STATE::DepthTestEnable
    276     */
    277    const bool depth_test_enabled = depth_irb && ctx->Depth.Test;
    278 
    279    /* _NEW_DEPTH:
    280     * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
    281     * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE.
    282     */
    283    const bool depth_writes_enabled = brw_depth_writes_enabled(brw);
    284 
    285    /* _NEW_STENCIL:
    286     * !DEPTH_STENCIL_STATE::Stencil Buffer Write Enable ||
    287     * !3DSTATE_DEPTH_BUFFER::Stencil Buffer Enable ||
    288     * !3DSTATE_STENCIL_BUFFER::Stencil Buffer Enable
    289     */
    290    const bool stencil_writes_enabled = ctx->Stencil._WriteEnabled;
    291 
    292    /* 3DSTATE_PS_EXTRA::Pixel Shader Computed Depth Mode != PSCDEPTH_OFF */
    293    const bool ps_computes_depth =
    294       wm_prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF;
    295 
    296    /* BRW_NEW_FS_PROG_DATA:     3DSTATE_PS_EXTRA::PixelShaderKillsPixels
    297     * BRW_NEW_FS_PROG_DATA:     3DSTATE_PS_EXTRA::oMask Present to RenderTarget
    298     * _NEW_MULTISAMPLE:         3DSTATE_PS_BLEND::AlphaToCoverageEnable
    299     * _NEW_COLOR:               3DSTATE_PS_BLEND::AlphaTestEnable
    300     * _NEW_BUFFERS:             3DSTATE_PS_BLEND::AlphaTestEnable
    301     *                           3DSTATE_PS_BLEND::AlphaToCoverageEnable
    302     *
    303     * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false.
    304     * 3DSTATE_WM::ForceKillPix != ForceOff is always true.
    305     */
    306    const bool kill_pixel =
    307       wm_prog_data->uses_kill ||
    308       wm_prog_data->uses_omask ||
    309       _mesa_is_alpha_test_enabled(ctx) ||
    310       _mesa_is_alpha_to_coverage_enabled(ctx);
    311 
    312    /* The big formula in CACHE_MODE_1::NP PMA FIX ENABLE. */
    313    return !wm_force_thread_dispatch &&
    314           !raster_force_sample_count_nonzero &&
    315           hiz_enabled &&
    316           edsc_not_preps &&
    317           pixel_shader_valid &&
    318           !in_hiz_op &&
    319           depth_test_enabled &&
    320           (ps_computes_depth ||
    321            (kill_pixel && (depth_writes_enabled || stencil_writes_enabled)));
    322 }
    323 
    324 void
    325 gen8_write_pma_stall_bits(struct brw_context *brw, uint32_t pma_stall_bits)
    326 {
    327    struct gl_context *ctx = &brw->ctx;
    328 
    329    /* If we haven't actually changed the value, bail now to avoid unnecessary
    330     * pipeline stalls and register writes.
    331     */
    332    if (brw->pma_stall_bits == pma_stall_bits)
    333       return;
    334 
    335    brw->pma_stall_bits = pma_stall_bits;
    336 
    337    /* According to the PIPE_CONTROL documentation, software should emit a
    338     * PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set prior
    339     * to the LRI.  If stencil buffer writes are enabled, then a Render Cache
    340     * Flush is also necessary.
    341     */
    342    const uint32_t render_cache_flush =
    343       ctx->Stencil._WriteEnabled ? PIPE_CONTROL_RENDER_TARGET_FLUSH : 0;
    344    brw_emit_pipe_control_flush(brw,
    345                                PIPE_CONTROL_CS_STALL |
    346                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    347                                render_cache_flush);
    348 
    349    /* CACHE_MODE_1 is a non-privileged register. */
    350    BEGIN_BATCH(3);
    351    OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
    352    OUT_BATCH(GEN7_CACHE_MODE_1);
    353    OUT_BATCH(GEN8_HIZ_PMA_MASK_BITS | pma_stall_bits);
    354    ADVANCE_BATCH();
    355 
    356    /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
    357     * Flush bits is often necessary.  We do it regardless because it's easier.
    358     * The render cache flush is also necessary if stencil writes are enabled.
    359     */
    360    brw_emit_pipe_control_flush(brw,
    361                                PIPE_CONTROL_DEPTH_STALL |
    362                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    363                                render_cache_flush);
    364 
    365 }
    366 
    367 static void
    368 gen8_emit_pma_stall_workaround(struct brw_context *brw)
    369 {
    370    uint32_t bits = 0;
    371 
    372    if (brw->gen >= 9)
    373       return;
    374 
    375    if (pma_fix_enable(brw))
    376       bits |= GEN8_HIZ_NP_PMA_FIX_ENABLE | GEN8_HIZ_NP_EARLY_Z_FAILS_DISABLE;
    377 
    378    gen8_write_pma_stall_bits(brw, bits);
    379 }
    380 
    381 const struct brw_tracked_state gen8_pma_fix = {
    382    .dirty = {
    383       .mesa = _NEW_BUFFERS |
    384               _NEW_COLOR |
    385               _NEW_DEPTH |
    386               _NEW_MULTISAMPLE |
    387               _NEW_STENCIL,
    388       .brw = BRW_NEW_BLORP |
    389              BRW_NEW_FS_PROG_DATA,
    390    },
    391    .emit = gen8_emit_pma_stall_workaround
    392 };
    393 
    394 /**
    395  * Emit packets to perform a depth/HiZ resolve or fast depth/stencil clear.
    396  *
    397  * See the "Optimized Depth Buffer Clear and/or Stencil Buffer Clear" section
    398  * of the hardware documentation for details.
    399  */
    400 void
    401 gen8_hiz_exec(struct brw_context *brw, struct intel_mipmap_tree *mt,
    402               unsigned int level, unsigned int layer, enum blorp_hiz_op op)
    403 {
    404    if (op == BLORP_HIZ_OP_NONE)
    405       return;
    406 
    407    /* Disable the PMA stall fix since we're about to do a HiZ operation. */
    408    if (brw->gen == 8)
    409       gen8_write_pma_stall_bits(brw, 0);
    410 
    411    assert(mt->first_level == 0);
    412    assert(mt->logical_depth0 >= 1);
    413 
    414    /* If we're operating on LOD 0, align to 8x4 to meet the alignment
    415     * requirements for most HiZ operations.  Otherwise, use the actual size
    416     * to allow the hardware to calculate the miplevel offsets correctly.
    417     */
    418    uint32_t surface_width  = ALIGN(mt->logical_width0,  level == 0 ? 8 : 1);
    419    uint32_t surface_height = ALIGN(mt->logical_height0, level == 0 ? 4 : 1);
    420 
    421    /* From the documentation for 3DSTATE_WM_HZ_OP: "3DSTATE_MULTISAMPLE packet
    422     * must be used prior to this packet to change the Number of Multisamples.
    423     * This packet must not be used to change Number of Multisamples in a
    424     * rendering sequence."
    425     */
    426    if (brw->num_samples != mt->num_samples) {
    427       gen8_emit_3dstate_multisample(brw, mt->num_samples);
    428       brw->NewGLState |= _NEW_MULTISAMPLE;
    429    }
    430 
    431    /* The basic algorithm is:
    432     * - If needed, emit 3DSTATE_{DEPTH,HIER_DEPTH,STENCIL}_BUFFER and
    433     *   3DSTATE_CLEAR_PARAMS packets to set up the relevant buffers.
    434     * - If needed, emit 3DSTATE_DRAWING_RECTANGLE.
    435     * - Emit 3DSTATE_WM_HZ_OP with a bit set for the particular operation.
    436     * - Do a special PIPE_CONTROL to trigger an implicit rectangle primitive.
    437     * - Emit 3DSTATE_WM_HZ_OP with no bits set to return to normal rendering.
    438     */
    439    emit_depth_packets(brw, mt,
    440                       brw_depth_format(brw, mt->format),
    441                       BRW_SURFACE_2D,
    442                       true, /* depth writes */
    443                       NULL, false, /* no stencil for now */
    444                       true, /* hiz */
    445                       surface_width,
    446                       surface_height,
    447                       mt->logical_depth0,
    448                       level,
    449                       layer); /* min_array_element */
    450 
    451    /* Depth buffer clears and HiZ resolves must use an 8x4 aligned rectangle.
    452     * Note that intel_miptree_level_enable_hiz disables HiZ for miplevels > 0
    453     * which aren't 8x4 aligned, so expanding the size is safe - it'll just
    454     * draw into empty padding space.
    455     */
    456    unsigned rect_width = ALIGN(minify(mt->logical_width0, level), 8);
    457    unsigned rect_height = ALIGN(minify(mt->logical_height0, level), 4);
    458 
    459    BEGIN_BATCH(4);
    460    OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
    461    OUT_BATCH(0);
    462    OUT_BATCH(((rect_width - 1) & 0xffff) | ((rect_height - 1) << 16));
    463    OUT_BATCH(0);
    464    ADVANCE_BATCH();
    465 
    466    /* Emit 3DSTATE_WM_HZ_OP to override pipeline state for the particular
    467     * resolve or clear operation we want to perform.
    468     */
    469    uint32_t dw1 = 0;
    470 
    471    switch (op) {
    472    case BLORP_HIZ_OP_DEPTH_RESOLVE:
    473       dw1 |= GEN8_WM_HZ_DEPTH_RESOLVE;
    474       break;
    475    case BLORP_HIZ_OP_HIZ_RESOLVE:
    476       dw1 |= GEN8_WM_HZ_HIZ_RESOLVE;
    477       break;
    478    case BLORP_HIZ_OP_DEPTH_CLEAR:
    479       dw1 |= GEN8_WM_HZ_DEPTH_CLEAR;
    480 
    481       /* The "Clear Rectangle X Max" (and Y Max) fields are exclusive,
    482        * rather than inclusive, and limited to 16383.  This means that
    483        * for a 16384x16384 render target, we would miss the last row
    484        * or column of pixels along the edge.
    485        *
    486        * To work around this, we have to set the "Full Surface Depth
    487        * and Stencil Clear" bit.  We can do this in all cases because
    488        * we always clear the full rectangle anyway.  We'll need to
    489        * change this if we ever add scissored clear support.
    490        */
    491       dw1 |= GEN8_WM_HZ_FULL_SURFACE_DEPTH_CLEAR;
    492       break;
    493    case BLORP_HIZ_OP_NONE:
    494       unreachable("Should not get here.");
    495    }
    496 
    497    if (mt->num_samples > 0)
    498       dw1 |= SET_FIELD(ffs(mt->num_samples) - 1, GEN8_WM_HZ_NUM_SAMPLES);
    499 
    500    BEGIN_BATCH(5);
    501    OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
    502    OUT_BATCH(dw1);
    503    OUT_BATCH(0);
    504    OUT_BATCH(SET_FIELD(rect_width, GEN8_WM_HZ_CLEAR_RECTANGLE_X_MAX) |
    505              SET_FIELD(rect_height, GEN8_WM_HZ_CLEAR_RECTANGLE_Y_MAX));
    506    OUT_BATCH(SET_FIELD(0xFFFF, GEN8_WM_HZ_SAMPLE_MASK));
    507    ADVANCE_BATCH();
    508 
    509    /* Emit a PIPE_CONTROL with "Post-Sync Operation" set to "Write Immediate
    510     * Data", and no other bits set.  This causes 3DSTATE_WM_HZ_OP's state to
    511     * take effect, and spawns a rectangle primitive.
    512     */
    513    brw_emit_pipe_control_write(brw,
    514                                PIPE_CONTROL_WRITE_IMMEDIATE,
    515                                brw->workaround_bo, 0, 0, 0);
    516 
    517    /* Emit 3DSTATE_WM_HZ_OP again to disable the state overrides. */
    518    BEGIN_BATCH(5);
    519    OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
    520    OUT_BATCH(0);
    521    OUT_BATCH(0);
    522    OUT_BATCH(0);
    523    OUT_BATCH(0);
    524    ADVANCE_BATCH();
    525 
    526    /*
    527     * From the Broadwell PRM, volume 7, "Depth Buffer Clear":
    528     *
    529     *  Depth buffer clear pass using any of the methods (WM_STATE, 3DSTATE_WM
    530     *  or 3DSTATE_WM_HZ_OP) must be followed by a PIPE_CONTROL command with
    531     *  DEPTH_STALL bit and Depth FLUSH bits "set" before starting to render.
    532     *  DepthStall and DepthFlush are not needed between consecutive depth
    533     *  clear passes nor is it required if th e depth clear pass was done with
    534     *  "full_surf_clear" bit set in the 3DSTATE_WM_HZ_OP.
    535     *
    536     *  TODO: Such as the spec says, this could be conditional.
    537     */
    538    brw_emit_pipe_control_flush(brw,
    539                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    540                                PIPE_CONTROL_DEPTH_STALL);
    541 
    542    /* Mark this buffer as needing a TC flush, as we've rendered to it. */
    543    brw_render_cache_set_add_bo(brw, mt->bo);
    544 
    545    /* We've clobbered all of the depth packets, and the drawing rectangle,
    546     * so we need to ensure those packets are re-emitted before the next
    547     * primitive.
    548     *
    549     * Setting _NEW_DEPTH and _NEW_BUFFERS covers it, but is rather overkill.
    550     */
    551    brw->NewGLState |= _NEW_DEPTH | _NEW_BUFFERS;
    552 }
    553