Home | History | Annotate | Download | only in i965
      1 /*
      2  * Copyright  2010 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #include "brw_context.h"
     25 #include "brw_defines.h"
     26 #include "intel_batchbuffer.h"
     27 #include "intel_fbo.h"
     28 
     29 /**
     30  * According to the latest documentation, any PIPE_CONTROL with the
     31  * "Command Streamer Stall" bit set must also have another bit set,
     32  * with five different options:
     33  *
     34  *  - Render Target Cache Flush
     35  *  - Depth Cache Flush
     36  *  - Stall at Pixel Scoreboard
     37  *  - Post-Sync Operation
     38  *  - Depth Stall
     39  *  - DC Flush Enable
     40  *
     41  * I chose "Stall at Pixel Scoreboard" since we've used it effectively
     42  * in the past, but the choice is fairly arbitrary.
     43  */
     44 static void
     45 gen8_add_cs_stall_workaround_bits(uint32_t *flags)
     46 {
     47    uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
     48                       PIPE_CONTROL_DEPTH_CACHE_FLUSH |
     49                       PIPE_CONTROL_WRITE_IMMEDIATE |
     50                       PIPE_CONTROL_WRITE_DEPTH_COUNT |
     51                       PIPE_CONTROL_WRITE_TIMESTAMP |
     52                       PIPE_CONTROL_STALL_AT_SCOREBOARD |
     53                       PIPE_CONTROL_DEPTH_STALL |
     54                       PIPE_CONTROL_DATA_CACHE_FLUSH;
     55 
     56    /* If we're doing a CS stall, and don't already have one of the
     57     * workaround bits set, add "Stall at Pixel Scoreboard."
     58     */
     59    if ((*flags & PIPE_CONTROL_CS_STALL) != 0 && (*flags & wa_bits) == 0)
     60       *flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
     61 }
     62 
     63 /* Implement the WaCsStallAtEveryFourthPipecontrol workaround on IVB, BYT:
     64  *
     65  * "Every 4th PIPE_CONTROL command, not counting the PIPE_CONTROL with
     66  *  only read-cache-invalidate bit(s) set, must have a CS_STALL bit set."
     67  *
     68  * Note that the kernel does CS stalls between batches, so we only need
     69  * to count them within a batch.
     70  */
     71 static uint32_t
     72 gen7_cs_stall_every_four_pipe_controls(struct brw_context *brw, uint32_t flags)
     73 {
     74    if (brw->gen == 7 && !brw->is_haswell) {
     75       if (flags & PIPE_CONTROL_CS_STALL) {
     76          /* If we're doing a CS stall, reset the counter and carry on. */
     77          brw->pipe_controls_since_last_cs_stall = 0;
     78          return 0;
     79       }
     80 
     81       /* If this is the fourth pipe control without a CS stall, do one now. */
     82       if (++brw->pipe_controls_since_last_cs_stall == 4) {
     83          brw->pipe_controls_since_last_cs_stall = 0;
     84          return PIPE_CONTROL_CS_STALL;
     85       }
     86    }
     87    return 0;
     88 }
     89 
     90 /**
     91  * Emit a PIPE_CONTROL with various flushing flags.
     92  *
     93  * The caller is responsible for deciding what flags are appropriate for the
     94  * given generation.
     95  */
     96 void
     97 brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags)
     98 {
     99    if (brw->gen >= 6 &&
    100        (flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
    101        (flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
    102       /* A pipe control command with flush and invalidate bits set
    103        * simultaneously is an inherently racy operation on Gen6+ if the
    104        * contents of the flushed caches were intended to become visible from
    105        * any of the invalidated caches.  Split it in two PIPE_CONTROLs, the
    106        * first one should stall the pipeline to make sure that the flushed R/W
    107        * caches are coherent with memory once the specified R/O caches are
    108        * invalidated.  On pre-Gen6 hardware the (implicit) R/O cache
    109        * invalidation seems to happen at the bottom of the pipeline together
    110        * with any write cache flush, so this shouldn't be a concern.
    111        */
    112       brw_emit_pipe_control_flush(brw, (flags & PIPE_CONTROL_CACHE_FLUSH_BITS) |
    113                                        PIPE_CONTROL_CS_STALL);
    114       flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
    115    }
    116 
    117    if (brw->gen >= 8) {
    118       if (brw->gen == 8)
    119          gen8_add_cs_stall_workaround_bits(&flags);
    120 
    121       if (brw->gen == 9 &&
    122           (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
    123          /* Hardware workaround: SKL
    124           *
    125           * Emit Pipe Control with all bits set to zero before emitting
    126           * a Pipe Control with VF Cache Invalidate set.
    127           */
    128          brw_emit_pipe_control_flush(brw, 0);
    129       }
    130 
    131       BEGIN_BATCH(6);
    132       OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
    133       OUT_BATCH(flags);
    134       OUT_BATCH(0);
    135       OUT_BATCH(0);
    136       OUT_BATCH(0);
    137       OUT_BATCH(0);
    138       ADVANCE_BATCH();
    139    } else if (brw->gen >= 6) {
    140       if (brw->gen == 6 &&
    141           (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH)) {
    142          /* Hardware workaround: SNB B-Spec says:
    143           *
    144           *   [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush
    145           *   Enable = 1, a PIPE_CONTROL with any non-zero post-sync-op is
    146           *   required.
    147           */
    148          brw_emit_post_sync_nonzero_flush(brw);
    149       }
    150 
    151       flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
    152 
    153       BEGIN_BATCH(5);
    154       OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
    155       OUT_BATCH(flags);
    156       OUT_BATCH(0);
    157       OUT_BATCH(0);
    158       OUT_BATCH(0);
    159       ADVANCE_BATCH();
    160    } else {
    161       BEGIN_BATCH(4);
    162       OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
    163       OUT_BATCH(0);
    164       OUT_BATCH(0);
    165       OUT_BATCH(0);
    166       ADVANCE_BATCH();
    167    }
    168 }
    169 
    170 /**
    171  * Emit a PIPE_CONTROL that writes to a buffer object.
    172  *
    173  * \p flags should contain one of the following items:
    174  *  - PIPE_CONTROL_WRITE_IMMEDIATE
    175  *  - PIPE_CONTROL_WRITE_TIMESTAMP
    176  *  - PIPE_CONTROL_WRITE_DEPTH_COUNT
    177  */
    178 void
    179 brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
    180                             drm_intel_bo *bo, uint32_t offset,
    181                             uint32_t imm_lower, uint32_t imm_upper)
    182 {
    183    if (brw->gen >= 8) {
    184       if (brw->gen == 8)
    185          gen8_add_cs_stall_workaround_bits(&flags);
    186 
    187       BEGIN_BATCH(6);
    188       OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
    189       OUT_BATCH(flags);
    190       OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
    191                   offset);
    192       OUT_BATCH(imm_lower);
    193       OUT_BATCH(imm_upper);
    194       ADVANCE_BATCH();
    195    } else if (brw->gen >= 6) {
    196       flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
    197 
    198       /* PPGTT/GGTT is selected by DW2 bit 2 on Sandybridge, but DW1 bit 24
    199        * on later platforms.  We always use PPGTT on Gen7+.
    200        */
    201       unsigned gen6_gtt = brw->gen == 6 ? PIPE_CONTROL_GLOBAL_GTT_WRITE : 0;
    202 
    203       BEGIN_BATCH(5);
    204       OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
    205       OUT_BATCH(flags);
    206       OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
    207                 gen6_gtt | offset);
    208       OUT_BATCH(imm_lower);
    209       OUT_BATCH(imm_upper);
    210       ADVANCE_BATCH();
    211    } else {
    212       BEGIN_BATCH(4);
    213       OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
    214       OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
    215                 PIPE_CONTROL_GLOBAL_GTT_WRITE | offset);
    216       OUT_BATCH(imm_lower);
    217       OUT_BATCH(imm_upper);
    218       ADVANCE_BATCH();
    219    }
    220 }
    221 
    222 /**
    223  * Restriction [DevSNB, DevIVB]:
    224  *
    225  * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
    226  * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
    227  * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
    228  * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
    229  * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
    230  * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
    231  * unless SW can otherwise guarantee that the pipeline from WM onwards is
    232  * already flushed (e.g., via a preceding MI_FLUSH).
    233  */
    234 void
    235 brw_emit_depth_stall_flushes(struct brw_context *brw)
    236 {
    237    assert(brw->gen >= 6);
    238 
    239    /* Starting on BDW, these pipe controls are unnecessary.
    240     *
    241     *   WM HW will internally manage the draining pipe and flushing of the caches
    242     *   when this command is issued. The PIPE_CONTROL restrictions are removed.
    243     */
    244    if (brw->gen >= 8)
    245       return;
    246 
    247    brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
    248    brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_CACHE_FLUSH);
    249    brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
    250 }
    251 
    252 /**
    253  * From the Ivybridge PRM, Volume 2 Part 1, Section 3.2 (VS Stage Input):
    254  * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
    255  *  stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
    256  *  3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
    257  *  3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one PIPE_CONTROL needs
    258  *  to be sent before any combination of VS associated 3DSTATE."
    259  */
    260 void
    261 gen7_emit_vs_workaround_flush(struct brw_context *brw)
    262 {
    263    assert(brw->gen == 7);
    264    brw_emit_pipe_control_write(brw,
    265                                PIPE_CONTROL_WRITE_IMMEDIATE
    266                                | PIPE_CONTROL_DEPTH_STALL,
    267                                brw->workaround_bo, 0,
    268                                0, 0);
    269 }
    270 
    271 
    272 /**
    273  * Emit a PIPE_CONTROL command for gen7 with the CS Stall bit set.
    274  */
    275 void
    276 gen7_emit_cs_stall_flush(struct brw_context *brw)
    277 {
    278    brw_emit_pipe_control_write(brw,
    279                                PIPE_CONTROL_CS_STALL
    280                                | PIPE_CONTROL_WRITE_IMMEDIATE,
    281                                brw->workaround_bo, 0,
    282                                0, 0);
    283 }
    284 
    285 
    286 /**
    287  * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
    288  * implementing two workarounds on gen6.  From section 1.4.7.1
    289  * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
    290  *
    291  * [DevSNB-C+{W/A}] Before any depth stall flush (including those
    292  * produced by non-pipelined state commands), software needs to first
    293  * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
    294  * 0.
    295  *
    296  * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
    297  * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
    298  *
    299  * And the workaround for these two requires this workaround first:
    300  *
    301  * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
    302  * BEFORE the pipe-control with a post-sync op and no write-cache
    303  * flushes.
    304  *
    305  * And this last workaround is tricky because of the requirements on
    306  * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
    307  * volume 2 part 1:
    308  *
    309  *     "1 of the following must also be set:
    310  *      - Render Target Cache Flush Enable ([12] of DW1)
    311  *      - Depth Cache Flush Enable ([0] of DW1)
    312  *      - Stall at Pixel Scoreboard ([1] of DW1)
    313  *      - Depth Stall ([13] of DW1)
    314  *      - Post-Sync Operation ([13] of DW1)
    315  *      - Notify Enable ([8] of DW1)"
    316  *
    317  * The cache flushes require the workaround flush that triggered this
    318  * one, so we can't use it.  Depth stall would trigger the same.
    319  * Post-sync nonzero is what triggered this second workaround, so we
    320  * can't use that one either.  Notify enable is IRQs, which aren't
    321  * really our business.  That leaves only stall at scoreboard.
    322  */
    323 void
    324 brw_emit_post_sync_nonzero_flush(struct brw_context *brw)
    325 {
    326    brw_emit_pipe_control_flush(brw,
    327                                PIPE_CONTROL_CS_STALL |
    328                                PIPE_CONTROL_STALL_AT_SCOREBOARD);
    329 
    330    brw_emit_pipe_control_write(brw, PIPE_CONTROL_WRITE_IMMEDIATE,
    331                                brw->workaround_bo, 0, 0, 0);
    332 }
    333 
    334 /* Emit a pipelined flush to either flush render and texture cache for
    335  * reading from a FBO-drawn texture, or flush so that frontbuffer
    336  * render appears on the screen in DRI1.
    337  *
    338  * This is also used for the always_flush_cache driconf debug option.
    339  */
    340 void
    341 brw_emit_mi_flush(struct brw_context *brw)
    342 {
    343    if (brw->batch.ring == BLT_RING && brw->gen >= 6) {
    344       BEGIN_BATCH_BLT(4);
    345       OUT_BATCH(MI_FLUSH_DW);
    346       OUT_BATCH(0);
    347       OUT_BATCH(0);
    348       OUT_BATCH(0);
    349       ADVANCE_BATCH();
    350    } else {
    351       int flags = PIPE_CONTROL_NO_WRITE | PIPE_CONTROL_RENDER_TARGET_FLUSH;
    352       if (brw->gen >= 6) {
    353          flags |= PIPE_CONTROL_INSTRUCTION_INVALIDATE |
    354                   PIPE_CONTROL_CONST_CACHE_INVALIDATE |
    355                   PIPE_CONTROL_DEPTH_CACHE_FLUSH |
    356                   PIPE_CONTROL_VF_CACHE_INVALIDATE |
    357                   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
    358                   PIPE_CONTROL_CS_STALL;
    359       }
    360       brw_emit_pipe_control_flush(brw, flags);
    361    }
    362 }
    363 
    364 int
    365 brw_init_pipe_control(struct brw_context *brw,
    366                       const struct gen_device_info *devinfo)
    367 {
    368    if (devinfo->gen < 6)
    369       return 0;
    370 
    371    /* We can't just use brw_state_batch to get a chunk of space for
    372     * the gen6 workaround because it involves actually writing to
    373     * the buffer, and the kernel doesn't let us write to the batch.
    374     */
    375    brw->workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
    376                                            "pipe_control workaround",
    377                                            4096, 4096);
    378    if (brw->workaround_bo == NULL)
    379       return -ENOMEM;
    380 
    381    brw->pipe_controls_since_last_cs_stall = 0;
    382 
    383    return 0;
    384 }
    385 
    386 void
    387 brw_fini_pipe_control(struct brw_context *brw)
    388 {
    389    drm_intel_bo_unreference(brw->workaround_bo);
    390 }
    391