Home | History | Annotate | Download | only in i965
      1 /*
      2  * Copyright 2003 VMware, Inc.
      3  * Copyright 2009, 2012 Intel Corporation.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sublicense, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
     22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  */
     26 
     27 #include "main/mtypes.h"
     28 #include "main/condrender.h"
     29 #include "swrast/swrast.h"
     30 #include "drivers/common/meta.h"
     31 
     32 #include "intel_batchbuffer.h"
     33 #include "intel_blit.h"
     34 #include "intel_fbo.h"
     35 #include "intel_mipmap_tree.h"
     36 
     37 #include "brw_context.h"
     38 #include "brw_blorp.h"
     39 #include "brw_defines.h"
     40 
     41 #define FILE_DEBUG_FLAG DEBUG_BLIT
     42 
     43 static const char *buffer_names[] = {
     44    [BUFFER_FRONT_LEFT] = "front",
     45    [BUFFER_BACK_LEFT] = "back",
     46    [BUFFER_FRONT_RIGHT] = "front right",
     47    [BUFFER_BACK_RIGHT] = "back right",
     48    [BUFFER_DEPTH] = "depth",
     49    [BUFFER_STENCIL] = "stencil",
     50    [BUFFER_ACCUM] = "accum",
     51    [BUFFER_AUX0] = "aux0",
     52    [BUFFER_COLOR0] = "color0",
     53    [BUFFER_COLOR1] = "color1",
     54    [BUFFER_COLOR2] = "color2",
     55    [BUFFER_COLOR3] = "color3",
     56    [BUFFER_COLOR4] = "color4",
     57    [BUFFER_COLOR5] = "color5",
     58    [BUFFER_COLOR6] = "color6",
     59    [BUFFER_COLOR7] = "color7",
     60 };
     61 
     62 static void
     63 debug_mask(const char *name, GLbitfield mask)
     64 {
     65    GLuint i;
     66 
     67    if (unlikely(INTEL_DEBUG & DEBUG_BLIT)) {
     68       DBG("%s clear:", name);
     69       for (i = 0; i < BUFFER_COUNT; i++) {
     70 	 if (mask & (1 << i))
     71 	    DBG(" %s", buffer_names[i]);
     72       }
     73       DBG("\n");
     74    }
     75 }
     76 
     77 /**
     78  * Returns true if the scissor is a noop (cuts out nothing).
     79  */
     80 static bool
     81 noop_scissor(struct gl_framebuffer *fb)
     82 {
     83    return fb->_Xmin <= 0 &&
     84           fb->_Ymin <= 0 &&
     85           fb->_Xmax >= fb->Width &&
     86           fb->_Ymax >= fb->Height;
     87 }
     88 
     89 /**
     90  * Implements fast depth clears on gen6+.
     91  *
     92  * Fast clears basically work by setting a flag in each of the subspans
     93  * represented in the HiZ buffer that says "When you need the depth values for
     94  * this subspan, it's the hardware's current clear value."  Then later rendering
     95  * can just use the static clear value instead of referencing memory.
     96  *
     97  * The tricky part of the implementation is that you have to have the clear
     98  * value that was used on the depth buffer in place for all further rendering,
     99  * at least until a resolve to the real depth buffer happens.
    100  */
    101 static bool
    102 brw_fast_clear_depth(struct gl_context *ctx)
    103 {
    104    struct brw_context *brw = brw_context(ctx);
    105    struct gl_framebuffer *fb = ctx->DrawBuffer;
    106    struct intel_renderbuffer *depth_irb =
    107       intel_get_renderbuffer(fb, BUFFER_DEPTH);
    108    struct intel_mipmap_tree *mt = depth_irb->mt;
    109    struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
    110 
    111    if (brw->gen < 6)
    112       return false;
    113 
    114    if (!intel_renderbuffer_has_hiz(depth_irb))
    115       return false;
    116 
    117    /* We only handle full buffer clears -- otherwise you'd have to track whether
    118     * a previous clear had happened at a different clear value and resolve it
    119     * first.
    120     */
    121    if ((ctx->Scissor.EnableFlags & 1) && !noop_scissor(fb)) {
    122       perf_debug("Failed to fast clear %dx%d depth because of scissors.  "
    123                  "Possible 5%% performance win if avoided.\n",
    124                  mt->logical_width0, mt->logical_height0);
    125       return false;
    126    }
    127 
    128    uint32_t depth_clear_value;
    129    switch (mt->format) {
    130    case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
    131    case MESA_FORMAT_Z24_UNORM_S8_UINT:
    132       /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
    133        *
    134        *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
    135        *      enabled (the legacy method of clearing must be performed):
    136        *
    137        *      - If the depth buffer format is D32_FLOAT_S8X24_UINT or
    138        *        D24_UNORM_S8_UINT.
    139        */
    140       return false;
    141 
    142    case MESA_FORMAT_Z_FLOAT32:
    143       depth_clear_value = float_as_int(ctx->Depth.Clear);
    144       break;
    145 
    146    case MESA_FORMAT_Z_UNORM16:
    147       /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
    148        *
    149        *     "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
    150        *      enabled (the legacy method of clearing must be performed):
    151        *
    152        *      - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
    153        *        width of the map (LOD0) is not multiple of 16, fast clear
    154        *        optimization must be disabled.
    155        */
    156       if (brw->gen == 6 &&
    157           (minify(mt->physical_width0,
    158                   depth_irb->mt_level - mt->first_level) % 16) != 0)
    159 	 return false;
    160       /* FALLTHROUGH */
    161 
    162    default:
    163       if (brw->gen >= 8)
    164          depth_clear_value = float_as_int(ctx->Depth.Clear);
    165       else
    166          depth_clear_value = fb->_DepthMax * ctx->Depth.Clear;
    167       break;
    168    }
    169 
    170    /* If we're clearing to a new clear value, then we need to resolve any clear
    171     * flags out of the HiZ buffer into the real depth buffer.
    172     */
    173    if (mt->depth_clear_value != depth_clear_value) {
    174       intel_miptree_all_slices_resolve_depth(brw, mt);
    175       mt->depth_clear_value = depth_clear_value;
    176    }
    177 
    178    if (brw->gen == 6) {
    179       /* From the Sandy Bridge PRM, volume 2 part 1, page 313:
    180        *
    181        *   "If other rendering operations have preceded this clear, a
    182        *    PIPE_CONTROL with write cache flush enabled and Z-inhibit disabled
    183        *    must be issued before the rectangle primitive used for the depth
    184        *    buffer clear operation.
    185        */
    186        brw_emit_pipe_control_flush(brw,
    187                                    PIPE_CONTROL_RENDER_TARGET_FLUSH |
    188                                    PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    189                                    PIPE_CONTROL_CS_STALL);
    190    } else if (brw->gen >= 7) {
    191       /*
    192        * From the Ivybridge PRM, volume 2, "Depth Buffer Clear":
    193        *
    194        *   If other rendering operations have preceded this clear, a
    195        *   PIPE_CONTROL with depth cache flush enabled, Depth Stall bit
    196        *   enabled must be issued before the rectangle primitive used for the
    197        *   depth buffer clear operation.
    198        *
    199        * Same applies for Gen8 and Gen9.
    200        *
    201        * In addition, from the Ivybridge PRM, volume 2, 1.10.4.1 PIPE_CONTROL,
    202        * Depth Cache Flush Enable:
    203        *
    204        *   This bit must not be set when Depth Stall Enable bit is set in
    205        *   this packet.
    206        *
    207        * This is confirmed to hold for real, HSW gets immediate gpu hangs.
    208        *
    209        * Therefore issue two pipe control flushes, one for cache flush and
    210        * another for depth stall.
    211        */
    212        brw_emit_pipe_control_flush(brw,
    213                                    PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    214                                    PIPE_CONTROL_CS_STALL);
    215 
    216        brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
    217    }
    218 
    219    if (fb->MaxNumLayers > 0) {
    220       for (unsigned layer = 0; layer < depth_irb->layer_count; layer++) {
    221          intel_hiz_exec(brw, mt, depth_irb->mt_level,
    222                         depth_irb->mt_layer + layer,
    223                         BLORP_HIZ_OP_DEPTH_CLEAR);
    224       }
    225    } else {
    226       intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
    227                      BLORP_HIZ_OP_DEPTH_CLEAR);
    228    }
    229 
    230    if (brw->gen == 6) {
    231       /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
    232        *
    233        *     "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed
    234        *      by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
    235        *      followed by Depth FLUSH'
    236       */
    237       brw_emit_pipe_control_flush(brw,
    238                                   PIPE_CONTROL_DEPTH_STALL);
    239 
    240       brw_emit_pipe_control_flush(brw,
    241                                   PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    242                                   PIPE_CONTROL_CS_STALL);
    243    }
    244 
    245    /* Now, the HiZ buffer contains data that needs to be resolved to the depth
    246     * buffer.
    247     */
    248    intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
    249 
    250    return true;
    251 }
    252 
    253 /**
    254  * Called by ctx->Driver.Clear.
    255  */
    256 static void
    257 brw_clear(struct gl_context *ctx, GLbitfield mask)
    258 {
    259    struct brw_context *brw = brw_context(ctx);
    260    struct gl_framebuffer *fb = ctx->DrawBuffer;
    261    bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(fb);
    262 
    263    if (!_mesa_check_conditional_render(ctx))
    264       return;
    265 
    266    if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
    267       brw->front_buffer_dirty = true;
    268    }
    269 
    270    intel_prepare_render(brw);
    271    brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
    272 
    273    if (mask & BUFFER_BIT_DEPTH) {
    274       if (brw_fast_clear_depth(ctx)) {
    275 	 DBG("fast clear: depth\n");
    276 	 mask &= ~BUFFER_BIT_DEPTH;
    277       }
    278    }
    279 
    280    if (mask & BUFFER_BIT_STENCIL) {
    281       struct intel_renderbuffer *stencil_irb =
    282          intel_get_renderbuffer(fb, BUFFER_STENCIL);
    283       struct intel_mipmap_tree *mt = stencil_irb->mt;
    284       if (mt && mt->stencil_mt)
    285          mt->stencil_mt->r8stencil_needs_update = true;
    286    }
    287 
    288    /* BLORP is currently only supported on Gen6+. */
    289    if (brw->gen >= 6 && (mask & BUFFER_BITS_COLOR)) {
    290       const bool encode_srgb = ctx->Color.sRGBEnabled;
    291       if (brw_blorp_clear_color(brw, fb, mask, partial_clear, encode_srgb)) {
    292          debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
    293          mask &= ~BUFFER_BITS_COLOR;
    294       }
    295    }
    296 
    297    GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
    298 				 BUFFER_BIT_STENCIL |
    299 				 BUFFER_BIT_DEPTH);
    300 
    301    if (tri_mask) {
    302       debug_mask("tri", tri_mask);
    303       mask &= ~tri_mask;
    304 
    305       if (ctx->API == API_OPENGLES) {
    306          _mesa_meta_Clear(&brw->ctx, tri_mask);
    307       } else {
    308          _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
    309       }
    310    }
    311 
    312    /* Any strange buffers get passed off to swrast */
    313    if (mask) {
    314       debug_mask("swrast", mask);
    315       _swrast_Clear(ctx, mask);
    316    }
    317 }
    318 
    319 
    320 void
    321 intelInitClearFuncs(struct dd_function_table *functions)
    322 {
    323    functions->Clear = brw_clear;
    324 }
    325