Home | History | Annotate | Download | only in i965
      1 /*
      2  * Copyright  2010 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *    Eric Anholt <eric (at) anholt.net>
     25  *
     26  */
     27 
     28 #include "brw_eu.h"
     29 #include "brw_fs.h"
     30 #include "brw_cfg.h"
     31 #include "util/register_allocate.h"
     32 
     33 using namespace brw;
     34 
     35 static void
     36 assign_reg(unsigned *reg_hw_locations, fs_reg *reg)
     37 {
     38    if (reg->file == VGRF) {
     39       reg->nr = reg_hw_locations[reg->nr] + reg->offset / REG_SIZE;
     40       reg->offset %= REG_SIZE;
     41    }
     42 }
     43 
     44 void
     45 fs_visitor::assign_regs_trivial()
     46 {
     47    unsigned hw_reg_mapping[this->alloc.count + 1];
     48    unsigned i;
     49    int reg_width = dispatch_width / 8;
     50 
     51    /* Note that compressed instructions require alignment to 2 registers. */
     52    hw_reg_mapping[0] = ALIGN(this->first_non_payload_grf, reg_width);
     53    for (i = 1; i <= this->alloc.count; i++) {
     54       hw_reg_mapping[i] = (hw_reg_mapping[i - 1] +
     55 			   this->alloc.sizes[i - 1]);
     56    }
     57    this->grf_used = hw_reg_mapping[this->alloc.count];
     58 
     59    foreach_block_and_inst(block, fs_inst, inst, cfg) {
     60       assign_reg(hw_reg_mapping, &inst->dst);
     61       for (i = 0; i < inst->sources; i++) {
     62          assign_reg(hw_reg_mapping, &inst->src[i]);
     63       }
     64    }
     65 
     66    if (this->grf_used >= max_grf) {
     67       fail("Ran out of regs on trivial allocator (%d/%d)\n",
     68 	   this->grf_used, max_grf);
     69    } else {
     70       this->alloc.count = this->grf_used;
     71    }
     72 
     73 }
     74 
     75 static void
     76 brw_alloc_reg_set(struct brw_compiler *compiler, int dispatch_width)
     77 {
     78    const struct gen_device_info *devinfo = compiler->devinfo;
     79    int base_reg_count = BRW_MAX_GRF;
     80    const int index = _mesa_logbase2(dispatch_width / 8);
     81 
     82    if (dispatch_width > 8 && devinfo->gen >= 7) {
     83       /* For IVB+, we don't need the PLN hacks or the even-reg alignment in
     84        * SIMD16.  Therefore, we can use the exact same register sets for
     85        * SIMD16 as we do for SIMD8 and we don't need to recalculate them.
     86        */
     87       compiler->fs_reg_sets[index] = compiler->fs_reg_sets[0];
     88       return;
     89    }
     90 
     91    /* The registers used to make up almost all values handled in the compiler
     92     * are a scalar value occupying a single register (or 2 registers in the
     93     * case of SIMD16, which is handled by dividing base_reg_count by 2 and
     94     * multiplying allocated register numbers by 2).  Things that were
     95     * aggregates of scalar values at the GLSL level were split to scalar
     96     * values by split_virtual_grfs().
     97     *
     98     * However, texture SEND messages return a series of contiguous registers
     99     * to write into.  We currently always ask for 4 registers, but we may
    100     * convert that to use less some day.
    101     *
    102     * Additionally, on gen5 we need aligned pairs of registers for the PLN
    103     * instruction, and on gen4 we need 8 contiguous regs for workaround simd16
    104     * texturing.
    105     */
    106    const int class_count = MAX_VGRF_SIZE;
    107    int class_sizes[MAX_VGRF_SIZE];
    108    for (unsigned i = 0; i < MAX_VGRF_SIZE; i++)
    109       class_sizes[i] = i + 1;
    110 
    111    memset(compiler->fs_reg_sets[index].class_to_ra_reg_range, 0,
    112           sizeof(compiler->fs_reg_sets[index].class_to_ra_reg_range));
    113    int *class_to_ra_reg_range = compiler->fs_reg_sets[index].class_to_ra_reg_range;
    114 
    115    /* Compute the total number of registers across all classes. */
    116    int ra_reg_count = 0;
    117    for (int i = 0; i < class_count; i++) {
    118       if (devinfo->gen <= 5 && dispatch_width >= 16) {
    119          /* From the G45 PRM:
    120           *
    121           * In order to reduce the hardware complexity, the following
    122           * rules and restrictions apply to the compressed instruction:
    123           * ...
    124           * * Operand Alignment Rule: With the exceptions listed below, a
    125           *   source/destination operand in general should be aligned to
    126           *   even 256-bit physical register with a region size equal to
    127           *   two 256-bit physical register
    128           */
    129          ra_reg_count += (base_reg_count - (class_sizes[i] - 1)) / 2;
    130       } else {
    131          ra_reg_count += base_reg_count - (class_sizes[i] - 1);
    132       }
    133       /* Mark the last register. We'll fill in the beginnings later. */
    134       class_to_ra_reg_range[class_sizes[i]] = ra_reg_count;
    135    }
    136 
    137    /* Fill out the rest of the range markers */
    138    for (int i = 1; i < 17; ++i) {
    139       if (class_to_ra_reg_range[i] == 0)
    140          class_to_ra_reg_range[i] = class_to_ra_reg_range[i-1];
    141    }
    142 
    143    uint8_t *ra_reg_to_grf = ralloc_array(compiler, uint8_t, ra_reg_count);
    144    struct ra_regs *regs = ra_alloc_reg_set(compiler, ra_reg_count, false);
    145    if (devinfo->gen >= 6)
    146       ra_set_allocate_round_robin(regs);
    147    int *classes = ralloc_array(compiler, int, class_count);
    148    int aligned_pairs_class = -1;
    149 
    150    /* Allocate space for q values.  We allocate class_count + 1 because we
    151     * want to leave room for the aligned pairs class if we have it. */
    152    unsigned int **q_values = ralloc_array(compiler, unsigned int *,
    153                                           class_count + 1);
    154    for (int i = 0; i < class_count + 1; ++i)
    155       q_values[i] = ralloc_array(q_values, unsigned int, class_count + 1);
    156 
    157    /* Now, add the registers to their classes, and add the conflicts
    158     * between them and the base GRF registers (and also each other).
    159     */
    160    int reg = 0;
    161    int pairs_base_reg = 0;
    162    int pairs_reg_count = 0;
    163    for (int i = 0; i < class_count; i++) {
    164       int class_reg_count;
    165       if (devinfo->gen <= 5 && dispatch_width >= 16) {
    166          class_reg_count = (base_reg_count - (class_sizes[i] - 1)) / 2;
    167 
    168          /* See comment below.  The only difference here is that we are
    169           * dealing with pairs of registers instead of single registers.
    170           * Registers of odd sizes simply get rounded up. */
    171          for (int j = 0; j < class_count; j++)
    172             q_values[i][j] = (class_sizes[i] + 1) / 2 +
    173                              (class_sizes[j] + 1) / 2 - 1;
    174       } else {
    175          class_reg_count = base_reg_count - (class_sizes[i] - 1);
    176 
    177          /* From register_allocate.c:
    178           *
    179           * q(B,C) (indexed by C, B is this register class) in
    180           * Runeson/Nystrm paper.  This is "how many registers of B could
    181           * the worst choice register from C conflict with".
    182           *
    183           * If we just let the register allocation algorithm compute these
    184           * values, is extremely expensive.  However, since all of our
    185           * registers are laid out, we can very easily compute them
    186           * ourselves.  View the register from C as fixed starting at GRF n
    187           * somwhere in the middle, and the register from B as sliding back
    188           * and forth.  Then the first register to conflict from B is the
    189           * one starting at n - class_size[B] + 1 and the last register to
    190           * conflict will start at n + class_size[B] - 1.  Therefore, the
    191           * number of conflicts from B is class_size[B] + class_size[C] - 1.
    192           *
    193           *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
    194           * B | | | | | |n| --> | | | | | | |
    195           *   +-+-+-+-+-+-+     +-+-+-+-+-+-+
    196           *             +-+-+-+-+-+
    197           * C           |n| | | | |
    198           *             +-+-+-+-+-+
    199           */
    200          for (int j = 0; j < class_count; j++)
    201             q_values[i][j] = class_sizes[i] + class_sizes[j] - 1;
    202       }
    203       classes[i] = ra_alloc_reg_class(regs);
    204 
    205       /* Save this off for the aligned pair class at the end. */
    206       if (class_sizes[i] == 2) {
    207          pairs_base_reg = reg;
    208          pairs_reg_count = class_reg_count;
    209       }
    210 
    211       if (devinfo->gen <= 5 && dispatch_width >= 16) {
    212          for (int j = 0; j < class_reg_count; j++) {
    213             ra_class_add_reg(regs, classes[i], reg);
    214 
    215             ra_reg_to_grf[reg] = j * 2;
    216 
    217             for (int base_reg = j;
    218                  base_reg < j + (class_sizes[i] + 1) / 2;
    219                  base_reg++) {
    220                ra_add_reg_conflict(regs, base_reg, reg);
    221             }
    222 
    223             reg++;
    224          }
    225       } else {
    226          for (int j = 0; j < class_reg_count; j++) {
    227             ra_class_add_reg(regs, classes[i], reg);
    228 
    229             ra_reg_to_grf[reg] = j;
    230 
    231             for (int base_reg = j;
    232                  base_reg < j + class_sizes[i];
    233                  base_reg++) {
    234                ra_add_reg_conflict(regs, base_reg, reg);
    235             }
    236 
    237             reg++;
    238          }
    239       }
    240    }
    241    assert(reg == ra_reg_count);
    242 
    243    /* Applying transitivity to all of the base registers gives us the
    244     * appropreate register conflict relationships everywhere.
    245     */
    246    for (int reg = 0; reg < base_reg_count; reg++)
    247       ra_make_reg_conflicts_transitive(regs, reg);
    248 
    249    /* Add a special class for aligned pairs, which we'll put delta_xy
    250     * in on Gen <= 6 so that we can do PLN.
    251     */
    252    if (devinfo->has_pln && dispatch_width == 8 && devinfo->gen <= 6) {
    253       aligned_pairs_class = ra_alloc_reg_class(regs);
    254 
    255       for (int i = 0; i < pairs_reg_count; i++) {
    256 	 if ((ra_reg_to_grf[pairs_base_reg + i] & 1) == 0) {
    257 	    ra_class_add_reg(regs, aligned_pairs_class, pairs_base_reg + i);
    258 	 }
    259       }
    260 
    261       for (int i = 0; i < class_count; i++) {
    262          /* These are a little counter-intuitive because the pair registers
    263           * are required to be aligned while the register they are
    264           * potentially interferring with are not.  In the case where the
    265           * size is even, the worst-case is that the register is
    266           * odd-aligned.  In the odd-size case, it doesn't matter.
    267           */
    268          q_values[class_count][i] = class_sizes[i] / 2 + 1;
    269          q_values[i][class_count] = class_sizes[i] + 1;
    270       }
    271       q_values[class_count][class_count] = 1;
    272    }
    273 
    274    ra_set_finalize(regs, q_values);
    275 
    276    ralloc_free(q_values);
    277 
    278    compiler->fs_reg_sets[index].regs = regs;
    279    for (unsigned i = 0; i < ARRAY_SIZE(compiler->fs_reg_sets[index].classes); i++)
    280       compiler->fs_reg_sets[index].classes[i] = -1;
    281    for (int i = 0; i < class_count; i++)
    282       compiler->fs_reg_sets[index].classes[class_sizes[i] - 1] = classes[i];
    283    compiler->fs_reg_sets[index].ra_reg_to_grf = ra_reg_to_grf;
    284    compiler->fs_reg_sets[index].aligned_pairs_class = aligned_pairs_class;
    285 }
    286 
    287 void
    288 brw_fs_alloc_reg_sets(struct brw_compiler *compiler)
    289 {
    290    brw_alloc_reg_set(compiler, 8);
    291    brw_alloc_reg_set(compiler, 16);
    292    brw_alloc_reg_set(compiler, 32);
    293 }
    294 
    295 static int
    296 count_to_loop_end(const bblock_t *block)
    297 {
    298    if (block->end()->opcode == BRW_OPCODE_WHILE)
    299       return block->end_ip;
    300 
    301    int depth = 1;
    302    /* Skip the first block, since we don't want to count the do the calling
    303     * function found.
    304     */
    305    for (block = block->next();
    306         depth > 0;
    307         block = block->next()) {
    308       if (block->start()->opcode == BRW_OPCODE_DO)
    309          depth++;
    310       if (block->end()->opcode == BRW_OPCODE_WHILE) {
    311          depth--;
    312          if (depth == 0)
    313             return block->end_ip;
    314       }
    315    }
    316    unreachable("not reached");
    317 }
    318 
    319 void fs_visitor::calculate_payload_ranges(int payload_node_count,
    320                                           int *payload_last_use_ip)
    321 {
    322    int loop_depth = 0;
    323    int loop_end_ip = 0;
    324 
    325    for (int i = 0; i < payload_node_count; i++)
    326       payload_last_use_ip[i] = -1;
    327 
    328    int ip = 0;
    329    foreach_block_and_inst(block, fs_inst, inst, cfg) {
    330       switch (inst->opcode) {
    331       case BRW_OPCODE_DO:
    332          loop_depth++;
    333 
    334          /* Since payload regs are deffed only at the start of the shader
    335           * execution, any uses of the payload within a loop mean the live
    336           * interval extends to the end of the outermost loop.  Find the ip of
    337           * the end now.
    338           */
    339          if (loop_depth == 1)
    340             loop_end_ip = count_to_loop_end(block);
    341          break;
    342       case BRW_OPCODE_WHILE:
    343          loop_depth--;
    344          break;
    345       default:
    346          break;
    347       }
    348 
    349       int use_ip;
    350       if (loop_depth > 0)
    351          use_ip = loop_end_ip;
    352       else
    353          use_ip = ip;
    354 
    355       /* Note that UNIFORM args have been turned into FIXED_GRF by
    356        * assign_curbe_setup(), and interpolation uses fixed hardware regs from
    357        * the start (see interp_reg()).
    358        */
    359       for (int i = 0; i < inst->sources; i++) {
    360          if (inst->src[i].file == FIXED_GRF) {
    361             int node_nr = inst->src[i].nr;
    362             if (node_nr >= payload_node_count)
    363                continue;
    364 
    365             for (unsigned j = 0; j < regs_read(inst, i); j++) {
    366                payload_last_use_ip[node_nr + j] = use_ip;
    367                assert(node_nr + j < unsigned(payload_node_count));
    368             }
    369          }
    370       }
    371 
    372       /* Special case instructions which have extra implied registers used. */
    373       switch (inst->opcode) {
    374       case CS_OPCODE_CS_TERMINATE:
    375          payload_last_use_ip[0] = use_ip;
    376          break;
    377 
    378       default:
    379          if (inst->eot) {
    380             /* We could omit this for the !inst->header_present case, except
    381              * that the simulator apparently incorrectly reads from g0/g1
    382              * instead of sideband.  It also really freaks out driver
    383              * developers to see g0 used in unusual places, so just always
    384              * reserve it.
    385              */
    386             payload_last_use_ip[0] = use_ip;
    387             payload_last_use_ip[1] = use_ip;
    388          }
    389          break;
    390       }
    391 
    392       ip++;
    393    }
    394 }
    395 
    396 
    397 /**
    398  * Sets up interference between thread payload registers and the virtual GRFs
    399  * to be allocated for program temporaries.
    400  *
    401  * We want to be able to reallocate the payload for our virtual GRFs, notably
    402  * because the setup coefficients for a full set of 16 FS inputs takes up 8 of
    403  * our 128 registers.
    404  *
    405  * The layout of the payload registers is:
    406  *
    407  * 0..payload.num_regs-1: fixed function setup (including bary coordinates).
    408  * payload.num_regs..payload.num_regs+curb_read_lengh-1: uniform data
    409  * payload.num_regs+curb_read_lengh..first_non_payload_grf-1: setup coefficients.
    410  *
    411  * And we have payload_node_count nodes covering these registers in order
    412  * (note that in SIMD16, a node is two registers).
    413  */
    414 void
    415 fs_visitor::setup_payload_interference(struct ra_graph *g,
    416                                        int payload_node_count,
    417                                        int first_payload_node)
    418 {
    419    int payload_last_use_ip[payload_node_count];
    420    calculate_payload_ranges(payload_node_count, payload_last_use_ip);
    421 
    422    for (int i = 0; i < payload_node_count; i++) {
    423       if (payload_last_use_ip[i] == -1)
    424          continue;
    425 
    426       /* Mark the payload node as interfering with any virtual grf that is
    427        * live between the start of the program and our last use of the payload
    428        * node.
    429        */
    430       for (unsigned j = 0; j < this->alloc.count; j++) {
    431          /* Note that we use a <= comparison, unlike virtual_grf_interferes(),
    432           * in order to not have to worry about the uniform issue described in
    433           * calculate_live_intervals().
    434           */
    435          if (this->virtual_grf_start[j] <= payload_last_use_ip[i]) {
    436             ra_add_node_interference(g, first_payload_node + i, j);
    437          }
    438       }
    439    }
    440 
    441    for (int i = 0; i < payload_node_count; i++) {
    442       /* Mark each payload node as being allocated to its physical register.
    443        *
    444        * The alternative would be to have per-physical-register classes, which
    445        * would just be silly.
    446        */
    447       if (devinfo->gen <= 5 && dispatch_width >= 16) {
    448          /* We have to divide by 2 here because we only have even numbered
    449           * registers.  Some of the payload registers will be odd, but
    450           * that's ok because their physical register numbers have already
    451           * been assigned.  The only thing this is used for is interference.
    452           */
    453          ra_set_node_reg(g, first_payload_node + i, i / 2);
    454       } else {
    455          ra_set_node_reg(g, first_payload_node + i, i);
    456       }
    457    }
    458 }
    459 
    460 /**
    461  * Sets the mrf_used array to indicate which MRFs are used by the shader IR
    462  *
    463  * This is used in assign_regs() to decide which of the GRFs that we use as
    464  * MRFs on gen7 get normally register allocated, and in register spilling to
    465  * see if we can actually use MRFs to do spills without overwriting normal MRF
    466  * contents.
    467  */
    468 static void
    469 get_used_mrfs(fs_visitor *v, bool *mrf_used)
    470 {
    471    int reg_width = v->dispatch_width / 8;
    472 
    473    memset(mrf_used, 0, BRW_MAX_MRF(v->devinfo->gen) * sizeof(bool));
    474 
    475    foreach_block_and_inst(block, fs_inst, inst, v->cfg) {
    476       if (inst->dst.file == MRF) {
    477          int reg = inst->dst.nr & ~BRW_MRF_COMPR4;
    478          mrf_used[reg] = true;
    479          if (reg_width == 2) {
    480             if (inst->dst.nr & BRW_MRF_COMPR4) {
    481                mrf_used[reg + 4] = true;
    482             } else {
    483                mrf_used[reg + 1] = true;
    484             }
    485          }
    486       }
    487 
    488       if (inst->mlen > 0) {
    489 	 for (int i = 0; i < v->implied_mrf_writes(inst); i++) {
    490             mrf_used[inst->base_mrf + i] = true;
    491          }
    492       }
    493    }
    494 }
    495 
    496 /**
    497  * Sets interference between virtual GRFs and usage of the high GRFs for SEND
    498  * messages (treated as MRFs in code generation).
    499  */
    500 static void
    501 setup_mrf_hack_interference(fs_visitor *v, struct ra_graph *g,
    502                             int first_mrf_node, int *first_used_mrf)
    503 {
    504    bool mrf_used[BRW_MAX_MRF(v->devinfo->gen)];
    505    get_used_mrfs(v, mrf_used);
    506 
    507    *first_used_mrf = BRW_MAX_MRF(v->devinfo->gen);
    508    for (int i = 0; i < BRW_MAX_MRF(v->devinfo->gen); i++) {
    509       /* Mark each MRF reg node as being allocated to its physical register.
    510        *
    511        * The alternative would be to have per-physical-register classes, which
    512        * would just be silly.
    513        */
    514       ra_set_node_reg(g, first_mrf_node + i, GEN7_MRF_HACK_START + i);
    515 
    516       /* Since we don't have any live/dead analysis on the MRFs, just mark all
    517        * that are used as conflicting with all virtual GRFs.
    518        */
    519       if (mrf_used[i]) {
    520          if (i < *first_used_mrf)
    521             *first_used_mrf = i;
    522 
    523          for (unsigned j = 0; j < v->alloc.count; j++) {
    524             ra_add_node_interference(g, first_mrf_node + i, j);
    525          }
    526       }
    527    }
    528 }
    529 
    530 bool
    531 fs_visitor::assign_regs(bool allow_spilling, bool spill_all)
    532 {
    533    /* Most of this allocation was written for a reg_width of 1
    534     * (dispatch_width == 8).  In extending to SIMD16, the code was
    535     * left in place and it was converted to have the hardware
    536     * registers it's allocating be contiguous physical pairs of regs
    537     * for reg_width == 2.
    538     */
    539    int reg_width = dispatch_width / 8;
    540    unsigned hw_reg_mapping[this->alloc.count];
    541    int payload_node_count = ALIGN(this->first_non_payload_grf, reg_width);
    542    int rsi = _mesa_logbase2(reg_width); /* Which compiler->fs_reg_sets[] to use */
    543    calculate_live_intervals();
    544 
    545    int node_count = this->alloc.count;
    546    int first_payload_node = node_count;
    547    node_count += payload_node_count;
    548    int first_mrf_hack_node = node_count;
    549    if (devinfo->gen >= 7)
    550       node_count += BRW_MAX_GRF - GEN7_MRF_HACK_START;
    551    struct ra_graph *g =
    552       ra_alloc_interference_graph(compiler->fs_reg_sets[rsi].regs, node_count);
    553 
    554    for (unsigned i = 0; i < this->alloc.count; i++) {
    555       unsigned size = this->alloc.sizes[i];
    556       int c;
    557 
    558       assert(size <= ARRAY_SIZE(compiler->fs_reg_sets[rsi].classes) &&
    559              "Register allocation relies on split_virtual_grfs()");
    560       c = compiler->fs_reg_sets[rsi].classes[size - 1];
    561 
    562       /* Special case: on pre-GEN6 hardware that supports PLN, the
    563        * second operand of a PLN instruction needs to be an
    564        * even-numbered register, so we have a special register class
    565        * wm_aligned_pairs_class to handle this case.  pre-GEN6 always
    566        * uses this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL] as the
    567        * second operand of a PLN instruction (since it doesn't support
    568        * any other interpolation modes).  So all we need to do is find
    569        * that register and set it to the appropriate class.
    570        */
    571       if (compiler->fs_reg_sets[rsi].aligned_pairs_class >= 0 &&
    572           this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL].file == VGRF &&
    573           this->delta_xy[BRW_BARYCENTRIC_PERSPECTIVE_PIXEL].nr == i) {
    574          c = compiler->fs_reg_sets[rsi].aligned_pairs_class;
    575       }
    576 
    577       ra_set_node_class(g, i, c);
    578 
    579       for (unsigned j = 0; j < i; j++) {
    580 	 if (virtual_grf_interferes(i, j)) {
    581 	    ra_add_node_interference(g, i, j);
    582 	 }
    583       }
    584    }
    585 
    586    /* Certain instructions can't safely use the same register for their
    587     * sources and destination.  Add interference.
    588     */
    589    foreach_block_and_inst(block, fs_inst, inst, cfg) {
    590       if (inst->dst.file == VGRF && inst->has_source_and_destination_hazard()) {
    591          for (unsigned i = 0; i < 3; i++) {
    592             if (inst->src[i].file == VGRF) {
    593                ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr);
    594             }
    595          }
    596       }
    597    }
    598 
    599    setup_payload_interference(g, payload_node_count, first_payload_node);
    600    if (devinfo->gen >= 7) {
    601       int first_used_mrf = BRW_MAX_MRF(devinfo->gen);
    602       setup_mrf_hack_interference(this, g, first_mrf_hack_node,
    603                                   &first_used_mrf);
    604 
    605       foreach_block_and_inst(block, fs_inst, inst, cfg) {
    606          /* When we do send-from-GRF for FB writes, we need to ensure that
    607           * the last write instruction sends from a high register.  This is
    608           * because the vertex fetcher wants to start filling the low
    609           * payload registers while the pixel data port is still working on
    610           * writing out the memory.  If we don't do this, we get rendering
    611           * artifacts.
    612           *
    613           * We could just do "something high".  Instead, we just pick the
    614           * highest register that works.
    615           */
    616          if (inst->eot) {
    617             int size = alloc.sizes[inst->src[0].nr];
    618             int reg = compiler->fs_reg_sets[rsi].class_to_ra_reg_range[size] - 1;
    619 
    620             /* If something happened to spill, we want to push the EOT send
    621              * register early enough in the register file that we don't
    622              * conflict with any used MRF hack registers.
    623              */
    624             reg -= BRW_MAX_MRF(devinfo->gen) - first_used_mrf;
    625 
    626             ra_set_node_reg(g, inst->src[0].nr, reg);
    627             break;
    628          }
    629       }
    630    }
    631 
    632    if (dispatch_width > 8) {
    633       /* In 16-wide dispatch we have an issue where a compressed
    634        * instruction is actually two instructions executed simultaneiously.
    635        * It's actually ok to have the source and destination registers be
    636        * the same.  In this case, each instruction over-writes its own
    637        * source and there's no problem.  The real problem here is if the
    638        * source and destination registers are off by one.  Then you can end
    639        * up in a scenario where the first instruction over-writes the
    640        * source of the second instruction.  Since the compiler doesn't know
    641        * about this level of granularity, we simply make the source and
    642        * destination interfere.
    643        */
    644       foreach_block_and_inst(block, fs_inst, inst, cfg) {
    645          if (inst->dst.file != VGRF)
    646             continue;
    647 
    648          for (int i = 0; i < inst->sources; ++i) {
    649             if (inst->src[i].file == VGRF) {
    650                ra_add_node_interference(g, inst->dst.nr, inst->src[i].nr);
    651             }
    652          }
    653       }
    654    }
    655 
    656    /* Debug of register spilling: Go spill everything. */
    657    if (unlikely(spill_all)) {
    658       int reg = choose_spill_reg(g);
    659 
    660       if (reg != -1) {
    661          spill_reg(reg);
    662          ralloc_free(g);
    663          return false;
    664       }
    665    }
    666 
    667    if (!ra_allocate(g)) {
    668       /* Failed to allocate registers.  Spill a reg, and the caller will
    669        * loop back into here to try again.
    670        */
    671       int reg = choose_spill_reg(g);
    672 
    673       if (reg == -1) {
    674          fail("no register to spill:\n");
    675          dump_instructions(NULL);
    676       } else if (allow_spilling) {
    677          spill_reg(reg);
    678       }
    679 
    680       ralloc_free(g);
    681 
    682       return false;
    683    }
    684 
    685    /* Get the chosen virtual registers for each node, and map virtual
    686     * regs in the register classes back down to real hardware reg
    687     * numbers.
    688     */
    689    this->grf_used = payload_node_count;
    690    for (unsigned i = 0; i < this->alloc.count; i++) {
    691       int reg = ra_get_node_reg(g, i);
    692 
    693       hw_reg_mapping[i] = compiler->fs_reg_sets[rsi].ra_reg_to_grf[reg];
    694       this->grf_used = MAX2(this->grf_used,
    695 			    hw_reg_mapping[i] + this->alloc.sizes[i]);
    696    }
    697 
    698    foreach_block_and_inst(block, fs_inst, inst, cfg) {
    699       assign_reg(hw_reg_mapping, &inst->dst);
    700       for (int i = 0; i < inst->sources; i++) {
    701          assign_reg(hw_reg_mapping, &inst->src[i]);
    702       }
    703    }
    704 
    705    this->alloc.count = this->grf_used;
    706 
    707    ralloc_free(g);
    708 
    709    return true;
    710 }
    711 
    712 namespace {
    713    /**
    714     * Maximum spill block size we expect to encounter in 32B units.
    715     *
    716     * This is somewhat arbitrary and doesn't necessarily limit the maximum
    717     * variable size that can be spilled -- A higher value will allow a
    718     * variable of a given size to be spilled more efficiently with a smaller
    719     * number of scratch messages, but will increase the likelihood of a
    720     * collision between the MRFs reserved for spilling and other MRFs used by
    721     * the program (and possibly increase GRF register pressure on platforms
    722     * without hardware MRFs), what could cause register allocation to fail.
    723     *
    724     * For the moment reserve just enough space so a register of 32 bit
    725     * component type and natural region width can be spilled without splitting
    726     * into multiple (force_writemask_all) scratch messages.
    727     */
    728    unsigned
    729    spill_max_size(const backend_shader *s)
    730    {
    731       /* FINISHME - On Gen7+ it should be possible to avoid this limit
    732        *            altogether by spilling directly from the temporary GRF
    733        *            allocated to hold the result of the instruction (and the
    734        *            scratch write header).
    735        */
    736       /* FINISHME - The shader's dispatch width probably belongs in
    737        *            backend_shader (or some nonexistent fs_shader class?)
    738        *            rather than in the visitor class.
    739        */
    740       return static_cast<const fs_visitor *>(s)->dispatch_width / 8;
    741    }
    742 
    743    /**
    744     * First MRF register available for spilling.
    745     */
    746    unsigned
    747    spill_base_mrf(const backend_shader *s)
    748    {
    749       return BRW_MAX_MRF(s->devinfo->gen) - spill_max_size(s) - 1;
    750    }
    751 }
    752 
    753 static void
    754 emit_unspill(const fs_builder &bld, fs_reg dst,
    755              uint32_t spill_offset, unsigned count)
    756 {
    757    const gen_device_info *devinfo = bld.shader->devinfo;
    758    const unsigned reg_size = dst.component_size(bld.dispatch_width()) /
    759                              REG_SIZE;
    760    assert(count % reg_size == 0);
    761 
    762    for (unsigned i = 0; i < count / reg_size; i++) {
    763       /* The Gen7 descriptor-based offset is 12 bits of HWORD units.  Because
    764        * the Gen7-style scratch block read is hardwired to BTI 255, on Gen9+
    765        * it would cause the DC to do an IA-coherent read, what largely
    766        * outweighs the slight advantage from not having to provide the address
    767        * as part of the message header, so we're better off using plain old
    768        * oword block reads.
    769        */
    770       bool gen7_read = (devinfo->gen >= 7 && devinfo->gen < 9 &&
    771                         spill_offset < (1 << 12) * REG_SIZE);
    772       fs_inst *unspill_inst = bld.emit(gen7_read ?
    773                                        SHADER_OPCODE_GEN7_SCRATCH_READ :
    774                                        SHADER_OPCODE_GEN4_SCRATCH_READ,
    775                                        dst);
    776       unspill_inst->offset = spill_offset;
    777 
    778       if (!gen7_read) {
    779          unspill_inst->base_mrf = spill_base_mrf(bld.shader);
    780          unspill_inst->mlen = 1; /* header contains offset */
    781       }
    782 
    783       dst.offset += reg_size * REG_SIZE;
    784       spill_offset += reg_size * REG_SIZE;
    785    }
    786 }
    787 
    788 static void
    789 emit_spill(const fs_builder &bld, fs_reg src,
    790            uint32_t spill_offset, unsigned count)
    791 {
    792    const unsigned reg_size = src.component_size(bld.dispatch_width()) /
    793                              REG_SIZE;
    794    assert(count % reg_size == 0);
    795 
    796    for (unsigned i = 0; i < count / reg_size; i++) {
    797       fs_inst *spill_inst =
    798          bld.emit(SHADER_OPCODE_GEN4_SCRATCH_WRITE, bld.null_reg_f(), src);
    799       src.offset += reg_size * REG_SIZE;
    800       spill_inst->offset = spill_offset + i * reg_size * REG_SIZE;
    801       spill_inst->mlen = 1 + reg_size; /* header, value */
    802       spill_inst->base_mrf = spill_base_mrf(bld.shader);
    803    }
    804 }
    805 
    806 int
    807 fs_visitor::choose_spill_reg(struct ra_graph *g)
    808 {
    809    float loop_scale = 1.0;
    810    float spill_costs[this->alloc.count];
    811    bool no_spill[this->alloc.count];
    812 
    813    for (unsigned i = 0; i < this->alloc.count; i++) {
    814       spill_costs[i] = 0.0;
    815       no_spill[i] = false;
    816    }
    817 
    818    /* Calculate costs for spilling nodes.  Call it a cost of 1 per
    819     * spill/unspill we'll have to do, and guess that the insides of
    820     * loops run 10 times.
    821     */
    822    foreach_block_and_inst(block, fs_inst, inst, cfg) {
    823       for (unsigned int i = 0; i < inst->sources; i++) {
    824 	 if (inst->src[i].file == VGRF)
    825             spill_costs[inst->src[i].nr] += loop_scale;
    826       }
    827 
    828       if (inst->dst.file == VGRF)
    829          spill_costs[inst->dst.nr] += DIV_ROUND_UP(inst->size_written, REG_SIZE)
    830                                       * loop_scale;
    831 
    832       switch (inst->opcode) {
    833 
    834       case BRW_OPCODE_DO:
    835 	 loop_scale *= 10;
    836 	 break;
    837 
    838       case BRW_OPCODE_WHILE:
    839 	 loop_scale /= 10;
    840 	 break;
    841 
    842       case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
    843 	 if (inst->src[0].file == VGRF)
    844             no_spill[inst->src[0].nr] = true;
    845 	 break;
    846 
    847       case SHADER_OPCODE_GEN4_SCRATCH_READ:
    848       case SHADER_OPCODE_GEN7_SCRATCH_READ:
    849 	 if (inst->dst.file == VGRF)
    850             no_spill[inst->dst.nr] = true;
    851 	 break;
    852 
    853       default:
    854 	 break;
    855       }
    856    }
    857 
    858    for (unsigned i = 0; i < this->alloc.count; i++) {
    859       if (!no_spill[i])
    860 	 ra_set_node_spill_cost(g, i, spill_costs[i]);
    861    }
    862 
    863    return ra_get_best_spill_node(g);
    864 }
    865 
    866 void
    867 fs_visitor::spill_reg(int spill_reg)
    868 {
    869    int size = alloc.sizes[spill_reg];
    870    unsigned int spill_offset = last_scratch;
    871    assert(ALIGN(spill_offset, 16) == spill_offset); /* oword read/write req. */
    872 
    873    /* Spills may use MRFs 13-15 in the SIMD16 case.  Our texturing is done
    874     * using up to 11 MRFs starting from either m1 or m2, and fb writes can use
    875     * up to m13 (gen6+ simd16: 2 header + 8 color + 2 src0alpha + 2 omask) or
    876     * m15 (gen4-5 simd16: 2 header + 8 color + 1 aads + 2 src depth + 2 dst
    877     * depth), starting from m1.  In summary: We may not be able to spill in
    878     * SIMD16 mode, because we'd stomp the FB writes.
    879     */
    880    if (!spilled_any_registers) {
    881       bool mrf_used[BRW_MAX_MRF(devinfo->gen)];
    882       get_used_mrfs(this, mrf_used);
    883 
    884       for (int i = spill_base_mrf(this); i < BRW_MAX_MRF(devinfo->gen); i++) {
    885          if (mrf_used[i]) {
    886             fail("Register spilling not supported with m%d used", i);
    887           return;
    888          }
    889       }
    890 
    891       spilled_any_registers = true;
    892    }
    893 
    894    last_scratch += size * REG_SIZE;
    895 
    896    /* Generate spill/unspill instructions for the objects being
    897     * spilled.  Right now, we spill or unspill the whole thing to a
    898     * virtual grf of the same size.  For most instructions, though, we
    899     * could just spill/unspill the GRF being accessed.
    900     */
    901    foreach_block_and_inst (block, fs_inst, inst, cfg) {
    902       const fs_builder ibld = fs_builder(this, block, inst);
    903 
    904       for (unsigned int i = 0; i < inst->sources; i++) {
    905 	 if (inst->src[i].file == VGRF &&
    906              inst->src[i].nr == spill_reg) {
    907             int count = regs_read(inst, i);
    908             int subset_spill_offset = spill_offset +
    909                ROUND_DOWN_TO(inst->src[i].offset, REG_SIZE);
    910             fs_reg unspill_dst(VGRF, alloc.allocate(count));
    911 
    912             inst->src[i].nr = unspill_dst.nr;
    913             inst->src[i].offset %= REG_SIZE;
    914 
    915             /* We read the largest power-of-two divisor of the register count
    916              * (because only POT scratch read blocks are allowed by the
    917              * hardware) up to the maximum supported block size.
    918              */
    919             const unsigned width =
    920                MIN2(32, 1u << (ffs(MAX2(1, count) * 8) - 1));
    921 
    922             /* Set exec_all() on unspill messages under the (rather
    923              * pessimistic) assumption that there is no one-to-one
    924              * correspondence between channels of the spilled variable in
    925              * scratch space and the scratch read message, which operates on
    926              * 32 bit channels.  It shouldn't hurt in any case because the
    927              * unspill destination is a block-local temporary.
    928              */
    929             emit_unspill(ibld.exec_all().group(width, 0),
    930                          unspill_dst, subset_spill_offset, count);
    931 	 }
    932       }
    933 
    934       if (inst->dst.file == VGRF &&
    935           inst->dst.nr == spill_reg) {
    936          int subset_spill_offset = spill_offset +
    937             ROUND_DOWN_TO(inst->dst.offset, REG_SIZE);
    938          fs_reg spill_src(VGRF, alloc.allocate(regs_written(inst)));
    939 
    940          inst->dst.nr = spill_src.nr;
    941          inst->dst.offset %= REG_SIZE;
    942 
    943          /* If we're immediately spilling the register, we should not use
    944           * destination dependency hints.  Doing so will cause the GPU do
    945           * try to read and write the register at the same time and may
    946           * hang the GPU.
    947           */
    948          inst->no_dd_clear = false;
    949          inst->no_dd_check = false;
    950 
    951          /* Calculate the execution width of the scratch messages (which work
    952           * in terms of 32 bit components so we have a fixed number of eight
    953           * channels per spilled register).  We attempt to write one
    954           * exec_size-wide component of the variable at a time without
    955           * exceeding the maximum number of (fake) MRF registers reserved for
    956           * spills.
    957           */
    958          const unsigned width = 8 * MIN2(
    959             DIV_ROUND_UP(inst->dst.component_size(inst->exec_size), REG_SIZE),
    960             spill_max_size(this));
    961 
    962          /* Spills should only write data initialized by the instruction for
    963           * whichever channels are enabled in the excution mask.  If that's
    964           * not possible we'll have to emit a matching unspill before the
    965           * instruction and set force_writemask_all on the spill.
    966           */
    967          const bool per_channel =
    968             inst->dst.is_contiguous() && type_sz(inst->dst.type) == 4 &&
    969             inst->exec_size == width;
    970 
    971          /* Builder used to emit the scratch messages. */
    972          const fs_builder ubld = ibld.exec_all(!per_channel).group(width, 0);
    973 
    974 	 /* If our write is going to affect just part of the
    975           * regs_written(inst), then we need to unspill the destination since
    976           * we write back out all of the regs_written().  If the original
    977           * instruction had force_writemask_all set and is not a partial
    978           * write, there should be no need for the unspill since the
    979           * instruction will be overwriting the whole destination in any case.
    980 	  */
    981          if (inst->is_partial_write() ||
    982              (!inst->force_writemask_all && !per_channel))
    983             emit_unspill(ubld, spill_src, subset_spill_offset,
    984                          regs_written(inst));
    985 
    986          emit_spill(ubld.at(block, inst->next), spill_src,
    987                     subset_spill_offset, regs_written(inst));
    988       }
    989    }
    990 
    991    invalidate_live_intervals();
    992 }
    993