Home | History | Annotate | Download | only in i965
      1 /*
      2  * Copyright  2011 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #include "intel_batchbuffer.h"
     25 #include "intel_mipmap_tree.h"
     26 #include "intel_fbo.h"
     27 #include "brw_context.h"
     28 #include "brw_state.h"
     29 #include "brw_defines.h"
     30 #include "compiler/brw_eu_defines.h"
     31 #include "brw_wm.h"
     32 #include "main/framebuffer.h"
     33 
     34 /**
     35  * Helper function to emit depth related command packets.
     36  */
     37 static void
     38 emit_depth_packets(struct brw_context *brw,
     39                    struct intel_mipmap_tree *depth_mt,
     40                    uint32_t depthbuffer_format,
     41                    uint32_t depth_surface_type,
     42                    bool depth_writable,
     43                    struct intel_mipmap_tree *stencil_mt,
     44                    bool stencil_writable,
     45                    bool hiz,
     46                    uint32_t width,
     47                    uint32_t height,
     48                    uint32_t depth,
     49                    uint32_t lod,
     50                    uint32_t min_array_element)
     51 {
     52    const struct gen_device_info *devinfo = &brw->screen->devinfo;
     53    uint32_t mocs_wb = devinfo->gen >= 9 ? SKL_MOCS_WB : BDW_MOCS_WB;
     54 
     55    /* Skip repeated NULL depth/stencil emits (think 2D rendering). */
     56    if (!depth_mt && !stencil_mt && brw->no_depth_or_stencil) {
     57       assert(brw->hw_ctx);
     58       return;
     59    }
     60 
     61    brw_emit_depth_stall_flushes(brw);
     62 
     63    /* _NEW_BUFFERS, _NEW_DEPTH, _NEW_STENCIL */
     64    BEGIN_BATCH(8);
     65    OUT_BATCH(GEN7_3DSTATE_DEPTH_BUFFER << 16 | (8 - 2));
     66    OUT_BATCH(depth_surface_type << 29 |
     67              (depth_writable ? (1 << 28) : 0) |
     68              (stencil_mt != NULL && stencil_writable) << 27 |
     69              (hiz ? 1 : 0) << 22 |
     70              depthbuffer_format << 18 |
     71              (depth_mt ? depth_mt->surf.row_pitch - 1 : 0));
     72    if (depth_mt) {
     73       OUT_RELOC64(depth_mt->bo, RELOC_WRITE, 0);
     74    } else {
     75       OUT_BATCH(0);
     76       OUT_BATCH(0);
     77    }
     78    OUT_BATCH(((width - 1) << 4) | ((height - 1) << 18) | lod);
     79    OUT_BATCH(((depth - 1) << 21) | (min_array_element << 10) | mocs_wb);
     80    OUT_BATCH(0);
     81    OUT_BATCH(((depth - 1) << 21) |
     82               (depth_mt ? depth_mt->surf.array_pitch_el_rows >> 2 : 0));
     83    ADVANCE_BATCH();
     84 
     85    if (!hiz) {
     86       BEGIN_BATCH(5);
     87       OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
     88       OUT_BATCH(0);
     89       OUT_BATCH(0);
     90       OUT_BATCH(0);
     91       OUT_BATCH(0);
     92       ADVANCE_BATCH();
     93    } else {
     94       assert(depth_mt);
     95       BEGIN_BATCH(5);
     96       OUT_BATCH(GEN7_3DSTATE_HIER_DEPTH_BUFFER << 16 | (5 - 2));
     97       OUT_BATCH((depth_mt->hiz_buf->pitch - 1) | mocs_wb << 25);
     98       OUT_RELOC64(depth_mt->hiz_buf->bo, RELOC_WRITE, 0);
     99       OUT_BATCH(depth_mt->hiz_buf->qpitch >> 2);
    100       ADVANCE_BATCH();
    101    }
    102 
    103    if (stencil_mt == NULL) {
    104       BEGIN_BATCH(5);
    105       OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
    106       OUT_BATCH(0);
    107       OUT_BATCH(0);
    108       OUT_BATCH(0);
    109       OUT_BATCH(0);
    110       ADVANCE_BATCH();
    111    } else {
    112       BEGIN_BATCH(5);
    113       OUT_BATCH(GEN7_3DSTATE_STENCIL_BUFFER << 16 | (5 - 2));
    114       OUT_BATCH(HSW_STENCIL_ENABLED | mocs_wb << 22 |
    115                 (stencil_mt->surf.row_pitch - 1));
    116       OUT_RELOC64(stencil_mt->bo, RELOC_WRITE, 0);
    117       OUT_BATCH(stencil_mt->surf.array_pitch_el_rows >> 2);
    118       ADVANCE_BATCH();
    119    }
    120 
    121    BEGIN_BATCH(3);
    122    OUT_BATCH(GEN7_3DSTATE_CLEAR_PARAMS << 16 | (3 - 2));
    123    OUT_BATCH(depth_mt ? depth_mt->fast_clear_color.u32[0] : 0);
    124    OUT_BATCH(1);
    125    ADVANCE_BATCH();
    126 
    127    brw->no_depth_or_stencil = !depth_mt && !stencil_mt;
    128 }
    129 
    130 /* Awful vtable-compatible function; should be cleaned up in the future. */
    131 void
    132 gen8_emit_depth_stencil_hiz(struct brw_context *brw,
    133                             struct intel_mipmap_tree *depth_mt,
    134                             uint32_t depth_offset,
    135                             uint32_t depthbuffer_format,
    136                             uint32_t depth_surface_type,
    137                             struct intel_mipmap_tree *stencil_mt,
    138                             bool hiz, bool separate_stencil,
    139                             uint32_t width, uint32_t height,
    140                             uint32_t tile_x, uint32_t tile_y)
    141 {
    142    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    143    struct gl_context *ctx = &brw->ctx;
    144    struct gl_framebuffer *fb = ctx->DrawBuffer;
    145    uint32_t surftype;
    146    unsigned int depth = 1;
    147    unsigned int min_array_element;
    148    GLenum gl_target = GL_TEXTURE_2D;
    149    unsigned int lod;
    150    const struct intel_mipmap_tree *mt = depth_mt ? depth_mt : stencil_mt;
    151    const struct intel_renderbuffer *irb = NULL;
    152    const struct gl_renderbuffer *rb = NULL;
    153 
    154    irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
    155    if (!irb)
    156       irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
    157    rb = (struct gl_renderbuffer *) irb;
    158 
    159    if (rb) {
    160       depth = MAX2(irb->layer_count, 1);
    161       if (rb->TexImage)
    162          gl_target = rb->TexImage->TexObject->Target;
    163    }
    164 
    165    switch (gl_target) {
    166    case GL_TEXTURE_CUBE_MAP_ARRAY:
    167    case GL_TEXTURE_CUBE_MAP:
    168       /* The PRM claims that we should use BRW_SURFACE_CUBE for this
    169        * situation, but experiments show that gl_Layer doesn't work when we do
    170        * this.  So we use BRW_SURFACE_2D, since for rendering purposes this is
    171        * equivalent.
    172        */
    173       surftype = BRW_SURFACE_2D;
    174       depth *= 6;
    175       break;
    176    case GL_TEXTURE_3D:
    177       assert(mt);
    178       depth = mt->surf.logical_level0_px.depth;
    179       surftype = translate_tex_target(gl_target);
    180       break;
    181    case GL_TEXTURE_1D_ARRAY:
    182    case GL_TEXTURE_1D:
    183       if (devinfo->gen >= 9) {
    184          /* WaDisable1DDepthStencil. Skylake+ doesn't support 1D depth
    185           * textures but it does allow pretending it's a 2D texture
    186           * instead.
    187           */
    188          surftype = BRW_SURFACE_2D;
    189          break;
    190       }
    191       /* fallthrough */
    192    default:
    193       surftype = translate_tex_target(gl_target);
    194       break;
    195    }
    196 
    197    min_array_element = irb ? irb->mt_layer : 0;
    198 
    199    lod = irb ? irb->mt_level - irb->mt->first_level : 0;
    200 
    201    if (mt) {
    202       width = mt->surf.logical_level0_px.width;
    203       height = mt->surf.logical_level0_px.height;
    204    }
    205 
    206    emit_depth_packets(brw, depth_mt, brw_depthbuffer_format(brw), surftype,
    207                       brw_depth_writes_enabled(brw),
    208                       stencil_mt, brw->stencil_write_enabled,
    209                       hiz, width, height, depth, lod, min_array_element);
    210 }
    211 
    212 /**
    213  * Should we set the PMA FIX ENABLE bit?
    214  *
    215  * To avoid unnecessary depth related stalls, we need to set this bit.
    216  * However, there is a very complicated formula which governs when it
    217  * is legal to do so.  This function computes that.
    218  *
    219  * See the documenation for the CACHE_MODE_1 register, bit 11.
    220  */
    221 static bool
    222 pma_fix_enable(const struct brw_context *brw)
    223 {
    224    const struct gl_context *ctx = &brw->ctx;
    225    /* BRW_NEW_FS_PROG_DATA */
    226    const struct brw_wm_prog_data *wm_prog_data =
    227       brw_wm_prog_data(brw->wm.base.prog_data);
    228    /* _NEW_BUFFERS */
    229    struct intel_renderbuffer *depth_irb =
    230       intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
    231 
    232    /* 3DSTATE_WM::ForceThreadDispatch is never used. */
    233    const bool wm_force_thread_dispatch = false;
    234 
    235    /* 3DSTATE_RASTER::ForceSampleCount is never used. */
    236    const bool raster_force_sample_count_nonzero = false;
    237 
    238    /* _NEW_BUFFERS:
    239     * 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
    240     * 3DSTATE_DEPTH_BUFFER::HIZ Enable
    241     */
    242    const bool hiz_enabled = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
    243 
    244    /* 3DSTATE_WM::Early Depth/Stencil Control != EDSC_PREPS (2). */
    245    const bool edsc_not_preps = !wm_prog_data->early_fragment_tests;
    246 
    247    /* 3DSTATE_PS_EXTRA::PixelShaderValid is always true. */
    248    const bool pixel_shader_valid = true;
    249 
    250    /* !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
    251     *   3DSTATE_WM_HZ_OP::DepthBufferResolve ||
    252     *   3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
    253     *   3DSTATE_WM_HZ_OP::StencilBufferClear)
    254     *
    255     * HiZ operations are done outside of the normal state upload, so they're
    256     * definitely not happening now.
    257     */
    258    const bool in_hiz_op = false;
    259 
    260    /* _NEW_DEPTH:
    261     * DEPTH_STENCIL_STATE::DepthTestEnable
    262     */
    263    const bool depth_test_enabled = depth_irb && ctx->Depth.Test;
    264 
    265    /* _NEW_DEPTH:
    266     * 3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
    267     * 3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE.
    268     */
    269    const bool depth_writes_enabled = brw_depth_writes_enabled(brw);
    270 
    271    /* _NEW_STENCIL:
    272     * !DEPTH_STENCIL_STATE::Stencil Buffer Write Enable ||
    273     * !3DSTATE_DEPTH_BUFFER::Stencil Buffer Enable ||
    274     * !3DSTATE_STENCIL_BUFFER::Stencil Buffer Enable
    275     */
    276    const bool stencil_writes_enabled = brw->stencil_write_enabled;
    277 
    278    /* 3DSTATE_PS_EXTRA::Pixel Shader Computed Depth Mode != PSCDEPTH_OFF */
    279    const bool ps_computes_depth =
    280       wm_prog_data->computed_depth_mode != BRW_PSCDEPTH_OFF;
    281 
    282    /* BRW_NEW_FS_PROG_DATA:     3DSTATE_PS_EXTRA::PixelShaderKillsPixels
    283     * BRW_NEW_FS_PROG_DATA:     3DSTATE_PS_EXTRA::oMask Present to RenderTarget
    284     * _NEW_MULTISAMPLE:         3DSTATE_PS_BLEND::AlphaToCoverageEnable
    285     * _NEW_COLOR:               3DSTATE_PS_BLEND::AlphaTestEnable
    286     * _NEW_BUFFERS:             3DSTATE_PS_BLEND::AlphaTestEnable
    287     *                           3DSTATE_PS_BLEND::AlphaToCoverageEnable
    288     *
    289     * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false.
    290     * 3DSTATE_WM::ForceKillPix != ForceOff is always true.
    291     */
    292    const bool kill_pixel =
    293       wm_prog_data->uses_kill ||
    294       wm_prog_data->uses_omask ||
    295       _mesa_is_alpha_test_enabled(ctx) ||
    296       _mesa_is_alpha_to_coverage_enabled(ctx);
    297 
    298    /* The big formula in CACHE_MODE_1::NP PMA FIX ENABLE. */
    299    return !wm_force_thread_dispatch &&
    300           !raster_force_sample_count_nonzero &&
    301           hiz_enabled &&
    302           edsc_not_preps &&
    303           pixel_shader_valid &&
    304           !in_hiz_op &&
    305           depth_test_enabled &&
    306           (ps_computes_depth ||
    307            (kill_pixel && (depth_writes_enabled || stencil_writes_enabled)));
    308 }
    309 
    310 void
    311 gen8_write_pma_stall_bits(struct brw_context *brw, uint32_t pma_stall_bits)
    312 {
    313    /* If we haven't actually changed the value, bail now to avoid unnecessary
    314     * pipeline stalls and register writes.
    315     */
    316    if (brw->pma_stall_bits == pma_stall_bits)
    317       return;
    318 
    319    brw->pma_stall_bits = pma_stall_bits;
    320 
    321    /* According to the PIPE_CONTROL documentation, software should emit a
    322     * PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set prior
    323     * to the LRI.  If stencil buffer writes are enabled, then a Render Cache
    324     * Flush is also necessary.
    325     */
    326    const uint32_t render_cache_flush =
    327       brw->stencil_write_enabled ? PIPE_CONTROL_RENDER_TARGET_FLUSH : 0;
    328    brw_emit_pipe_control_flush(brw,
    329                                PIPE_CONTROL_CS_STALL |
    330                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    331                                render_cache_flush);
    332 
    333    /* CACHE_MODE_1 is a non-privileged register. */
    334    brw_load_register_imm32(brw, GEN7_CACHE_MODE_1,
    335                            GEN8_HIZ_PMA_MASK_BITS |
    336                            pma_stall_bits );
    337 
    338    /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
    339     * Flush bits is often necessary.  We do it regardless because it's easier.
    340     * The render cache flush is also necessary if stencil writes are enabled.
    341     */
    342    brw_emit_pipe_control_flush(brw,
    343                                PIPE_CONTROL_DEPTH_STALL |
    344                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    345                                render_cache_flush);
    346 
    347 }
    348 
    349 static void
    350 gen8_emit_pma_stall_workaround(struct brw_context *brw)
    351 {
    352    const struct gen_device_info *devinfo = &brw->screen->devinfo;
    353    uint32_t bits = 0;
    354 
    355    if (devinfo->gen >= 9)
    356       return;
    357 
    358    if (pma_fix_enable(brw))
    359       bits |= GEN8_HIZ_NP_PMA_FIX_ENABLE | GEN8_HIZ_NP_EARLY_Z_FAILS_DISABLE;
    360 
    361    gen8_write_pma_stall_bits(brw, bits);
    362 }
    363 
    364 const struct brw_tracked_state gen8_pma_fix = {
    365    .dirty = {
    366       .mesa = _NEW_BUFFERS |
    367               _NEW_COLOR |
    368               _NEW_DEPTH |
    369               _NEW_MULTISAMPLE |
    370               _NEW_STENCIL,
    371       .brw = BRW_NEW_BLORP |
    372              BRW_NEW_FS_PROG_DATA,
    373    },
    374    .emit = gen8_emit_pma_stall_workaround
    375 };
    376