Home | History | Annotate | Download | only in nir
      1 /*
      2  * Copyright  2015 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *    Jason Ekstrand (jason (at) jlekstrand.net)
     25  *
     26  */
     27 
     28 #include "nir.h"
     29 
     30 /*
     31  * Implements a pass that lowers vector phi nodes to scalar phi nodes when
     32  * we don't think it will hurt anything.
     33  */
     34 
     35 struct lower_phis_to_scalar_state {
     36    void *mem_ctx;
     37    void *dead_ctx;
     38 
     39    /* Hash table marking which phi nodes are scalarizable.  The key is
     40     * pointers to phi instructions and the entry is either NULL for not
     41     * scalarizable or non-null for scalarizable.
     42     */
     43    struct hash_table *phi_table;
     44 };
     45 
     46 static bool
     47 should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state);
     48 
     49 static bool
     50 is_phi_src_scalarizable(nir_phi_src *src,
     51                         struct lower_phis_to_scalar_state *state)
     52 {
     53    /* Don't know what to do with non-ssa sources */
     54    if (!src->src.is_ssa)
     55       return false;
     56 
     57    nir_instr *src_instr = src->src.ssa->parent_instr;
     58    switch (src_instr->type) {
     59    case nir_instr_type_alu: {
     60       nir_alu_instr *src_alu = nir_instr_as_alu(src_instr);
     61 
     62       /* ALU operations with output_size == 0 should be scalarized.  We
     63        * will also see a bunch of vecN operations from scalarizing ALU
     64        * operations and, since they can easily be copy-propagated, they
     65        * are ok too.
     66        */
     67       return nir_op_infos[src_alu->op].output_size == 0 ||
     68              src_alu->op == nir_op_vec2 ||
     69              src_alu->op == nir_op_vec3 ||
     70              src_alu->op == nir_op_vec4;
     71    }
     72 
     73    case nir_instr_type_phi:
     74       /* A phi is scalarizable if we're going to lower it */
     75       return should_lower_phi(nir_instr_as_phi(src_instr), state);
     76 
     77    case nir_instr_type_load_const:
     78    case nir_instr_type_ssa_undef:
     79       /* These are trivially scalarizable */
     80       return true;
     81 
     82    case nir_instr_type_intrinsic: {
     83       nir_intrinsic_instr *src_intrin = nir_instr_as_intrinsic(src_instr);
     84 
     85       switch (src_intrin->intrinsic) {
     86       case nir_intrinsic_load_var:
     87          return src_intrin->variables[0]->var->data.mode == nir_var_shader_in ||
     88                 src_intrin->variables[0]->var->data.mode == nir_var_uniform;
     89 
     90       case nir_intrinsic_interp_var_at_centroid:
     91       case nir_intrinsic_interp_var_at_sample:
     92       case nir_intrinsic_interp_var_at_offset:
     93       case nir_intrinsic_load_uniform:
     94       case nir_intrinsic_load_ubo:
     95       case nir_intrinsic_load_ssbo:
     96       case nir_intrinsic_load_input:
     97          return true;
     98       default:
     99          break;
    100       }
    101    }
    102 
    103    default:
    104       /* We can't scalarize this type of instruction */
    105       return false;
    106    }
    107 }
    108 
    109 /**
    110  * Determines if the given phi node should be lowered.  The only phi nodes
    111  * we will scalarize at the moment are those where all of the sources are
    112  * scalarizable.
    113  *
    114  * The reason for this comes down to coalescing.  Since phi sources can't
    115  * swizzle, swizzles on phis have to be resolved by inserting a mov right
    116  * before the phi.  The choice then becomes between movs to pick off
    117  * components for a scalar phi or potentially movs to recombine components
    118  * for a vector phi.  The problem is that the movs generated to pick off
    119  * the components are almost uncoalescable.  We can't coalesce them in NIR
    120  * because we need them to pick off components and we can't coalesce them
    121  * in the backend because the source register is a vector and the
    122  * destination is a scalar that may be used at other places in the program.
    123  * On the other hand, if we have a bunch of scalars going into a vector
    124  * phi, the situation is much better.  In this case, if the SSA def is
    125  * generated in the predecessor block to the corresponding phi source, the
    126  * backend code will be an ALU op into a temporary and then a mov into the
    127  * given vector component;  this move can almost certainly be coalesced
    128  * away.
    129  */
    130 static bool
    131 should_lower_phi(nir_phi_instr *phi, struct lower_phis_to_scalar_state *state)
    132 {
    133    /* Already scalar */
    134    if (phi->dest.ssa.num_components == 1)
    135       return false;
    136 
    137    struct hash_entry *entry = _mesa_hash_table_search(state->phi_table, phi);
    138    if (entry)
    139       return entry->data != NULL;
    140 
    141    /* Insert an entry and mark it as scalarizable for now. That way
    142     * we don't recurse forever and a cycle in the dependence graph
    143     * won't automatically make us fail to scalarize.
    144     */
    145    entry = _mesa_hash_table_insert(state->phi_table, phi, (void *)(intptr_t)1);
    146 
    147    bool scalarizable = true;
    148 
    149    nir_foreach_phi_src(src, phi) {
    150       scalarizable = is_phi_src_scalarizable(src, state);
    151       if (!scalarizable)
    152          break;
    153    }
    154 
    155    /* The hash table entry for 'phi' may have changed while recursing the
    156     * dependence graph, so we need to reset it */
    157    entry = _mesa_hash_table_search(state->phi_table, phi);
    158    assert(entry);
    159 
    160    entry->data = (void *)(intptr_t)scalarizable;
    161 
    162    return scalarizable;
    163 }
    164 
    165 static bool
    166 lower_phis_to_scalar_block(nir_block *block,
    167                            struct lower_phis_to_scalar_state *state)
    168 {
    169    bool progress = false;
    170 
    171    /* Find the last phi node in the block */
    172    nir_phi_instr *last_phi = NULL;
    173    nir_foreach_instr(instr, block) {
    174       if (instr->type != nir_instr_type_phi)
    175          break;
    176 
    177       last_phi = nir_instr_as_phi(instr);
    178    }
    179 
    180    /* We have to handle the phi nodes in their own pass due to the way
    181     * we're modifying the linked list of instructions.
    182     */
    183    nir_foreach_instr_safe(instr, block) {
    184       if (instr->type != nir_instr_type_phi)
    185          break;
    186 
    187       nir_phi_instr *phi = nir_instr_as_phi(instr);
    188 
    189       if (!should_lower_phi(phi, state))
    190          continue;
    191 
    192       unsigned bit_size = phi->dest.ssa.bit_size;
    193 
    194       /* Create a vecN operation to combine the results.  Most of these
    195        * will be redundant, but copy propagation should clean them up for
    196        * us.  No need to add the complexity here.
    197        */
    198       nir_op vec_op;
    199       switch (phi->dest.ssa.num_components) {
    200       case 2: vec_op = nir_op_vec2; break;
    201       case 3: vec_op = nir_op_vec3; break;
    202       case 4: vec_op = nir_op_vec4; break;
    203       default: unreachable("Invalid number of components");
    204       }
    205 
    206       nir_alu_instr *vec = nir_alu_instr_create(state->mem_ctx, vec_op);
    207       nir_ssa_dest_init(&vec->instr, &vec->dest.dest,
    208                         phi->dest.ssa.num_components,
    209                         bit_size, NULL);
    210       vec->dest.write_mask = (1 << phi->dest.ssa.num_components) - 1;
    211 
    212       for (unsigned i = 0; i < phi->dest.ssa.num_components; i++) {
    213          nir_phi_instr *new_phi = nir_phi_instr_create(state->mem_ctx);
    214          nir_ssa_dest_init(&new_phi->instr, &new_phi->dest, 1,
    215                            phi->dest.ssa.bit_size, NULL);
    216 
    217          vec->src[i].src = nir_src_for_ssa(&new_phi->dest.ssa);
    218 
    219          nir_foreach_phi_src(src, phi) {
    220             /* We need to insert a mov to grab the i'th component of src */
    221             nir_alu_instr *mov = nir_alu_instr_create(state->mem_ctx,
    222                                                       nir_op_imov);
    223             nir_ssa_dest_init(&mov->instr, &mov->dest.dest, 1, bit_size, NULL);
    224             mov->dest.write_mask = 1;
    225             nir_src_copy(&mov->src[0].src, &src->src, state->mem_ctx);
    226             mov->src[0].swizzle[0] = i;
    227 
    228             /* Insert at the end of the predecessor but before the jump */
    229             nir_instr *pred_last_instr = nir_block_last_instr(src->pred);
    230             if (pred_last_instr && pred_last_instr->type == nir_instr_type_jump)
    231                nir_instr_insert_before(pred_last_instr, &mov->instr);
    232             else
    233                nir_instr_insert_after_block(src->pred, &mov->instr);
    234 
    235             nir_phi_src *new_src = ralloc(new_phi, nir_phi_src);
    236             new_src->pred = src->pred;
    237             new_src->src = nir_src_for_ssa(&mov->dest.dest.ssa);
    238 
    239             exec_list_push_tail(&new_phi->srcs, &new_src->node);
    240          }
    241 
    242          nir_instr_insert_before(&phi->instr, &new_phi->instr);
    243       }
    244 
    245       nir_instr_insert_after(&last_phi->instr, &vec->instr);
    246 
    247       nir_ssa_def_rewrite_uses(&phi->dest.ssa,
    248                                nir_src_for_ssa(&vec->dest.dest.ssa));
    249 
    250       ralloc_steal(state->dead_ctx, phi);
    251       nir_instr_remove(&phi->instr);
    252 
    253       progress = true;
    254 
    255       /* We're using the safe iterator and inserting all the newly
    256        * scalarized phi nodes before their non-scalarized version so that's
    257        * ok.  However, we are also inserting vec operations after all of
    258        * the last phi node so once we get here, we can't trust even the
    259        * safe iterator to stop properly.  We have to break manually.
    260        */
    261       if (instr == &last_phi->instr)
    262          break;
    263    }
    264 
    265    return progress;
    266 }
    267 
    268 static bool
    269 lower_phis_to_scalar_impl(nir_function_impl *impl)
    270 {
    271    struct lower_phis_to_scalar_state state;
    272    bool progress = false;
    273 
    274    state.mem_ctx = ralloc_parent(impl);
    275    state.dead_ctx = ralloc_context(NULL);
    276    state.phi_table = _mesa_hash_table_create(state.dead_ctx, _mesa_hash_pointer,
    277                                              _mesa_key_pointer_equal);
    278 
    279    nir_foreach_block(block, impl) {
    280       progress = lower_phis_to_scalar_block(block, &state) || progress;
    281    }
    282 
    283    nir_metadata_preserve(impl, nir_metadata_block_index |
    284                                nir_metadata_dominance);
    285 
    286    ralloc_free(state.dead_ctx);
    287    return progress;
    288 }
    289 
    290 /** A pass that lowers vector phi nodes to scalar
    291  *
    292  * This pass loops through the blocks and lowers looks for vector phi nodes
    293  * it can lower to scalar phi nodes.  Not all phi nodes are lowered.  For
    294  * instance, if one of the sources is a non-scalarizable vector, then we
    295  * don't bother lowering because that would generate hard-to-coalesce movs.
    296  */
    297 bool
    298 nir_lower_phis_to_scalar(nir_shader *shader)
    299 {
    300    bool progress = false;
    301 
    302    nir_foreach_function(function, shader) {
    303       if (function->impl)
    304          progress = lower_phis_to_scalar_impl(function->impl) || progress;
    305    }
    306 
    307    return progress;
    308 }
    309