Home | History | Annotate | Download | only in compiler
      1 /*
      2  * Copyright  2015 Broadcom
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #include "compiler/v3d_compiler.h"
     25 #include "compiler/nir/nir_builder.h"
     26 
     27 /**
     28  * Walks the NIR generated by TGSI-to-NIR or GLSL-to-NIR to lower its io
     29  * intrinsics into something amenable to the V3D architecture.
     30  *
     31  * Currently, it splits VS inputs and uniforms into scalars, drops any
     32  * non-position outputs in coordinate shaders, and fixes up the addressing on
     33  * indirect uniform loads.  FS input and VS output scalarization is handled by
     34  * nir_lower_io_to_scalar().
     35  */
     36 
     37 static void
     38 replace_intrinsic_with_vec(nir_builder *b, nir_intrinsic_instr *intr,
     39                            nir_ssa_def **comps)
     40 {
     41 
     42         /* Batch things back together into a vector.  This will get split by
     43          * the later ALU scalarization pass.
     44          */
     45         nir_ssa_def *vec = nir_vec(b, comps, intr->num_components);
     46 
     47         /* Replace the old intrinsic with a reference to our reconstructed
     48          * vector.
     49          */
     50         nir_ssa_def_rewrite_uses(&intr->dest.ssa, nir_src_for_ssa(vec));
     51         nir_instr_remove(&intr->instr);
     52 }
     53 
     54 static void
     55 v3d_nir_lower_output(struct v3d_compile *c, nir_builder *b,
     56                      nir_intrinsic_instr *intr)
     57 {
     58         nir_variable *output_var = NULL;
     59         nir_foreach_variable(var, &c->s->outputs) {
     60                 if (var->data.driver_location == nir_intrinsic_base(intr)) {
     61                         output_var = var;
     62                         break;
     63                 }
     64         }
     65         assert(output_var);
     66 
     67         if (c->vs_key) {
     68                 int slot = output_var->data.location;
     69                 bool used = false;
     70 
     71                 switch (slot) {
     72                 case VARYING_SLOT_PSIZ:
     73                 case VARYING_SLOT_POS:
     74                         used = true;
     75                         break;
     76 
     77                 default:
     78                         for (int i = 0; i < c->vs_key->num_fs_inputs; i++) {
     79                                 if (v3d_slot_get_slot(c->vs_key->fs_inputs[i]) == slot) {
     80                                         used = true;
     81                                         break;
     82                                 }
     83                         }
     84                         break;
     85                 }
     86 
     87                 if (!used)
     88                         nir_instr_remove(&intr->instr);
     89         }
     90 }
     91 
     92 static void
     93 v3d_nir_lower_uniform(struct v3d_compile *c, nir_builder *b,
     94                       nir_intrinsic_instr *intr)
     95 {
     96         b->cursor = nir_before_instr(&intr->instr);
     97 
     98         /* Generate scalar loads equivalent to the original vector. */
     99         nir_ssa_def *dests[4];
    100         for (unsigned i = 0; i < intr->num_components; i++) {
    101                 nir_intrinsic_instr *intr_comp =
    102                         nir_intrinsic_instr_create(c->s, intr->intrinsic);
    103                 intr_comp->num_components = 1;
    104                 nir_ssa_dest_init(&intr_comp->instr, &intr_comp->dest, 1, 32, NULL);
    105 
    106                 /* Convert the uniform offset to bytes.  If it happens
    107                  * to be a constant, constant-folding will clean up
    108                  * the shift for us.
    109                  */
    110                 nir_intrinsic_set_base(intr_comp,
    111                                        nir_intrinsic_base(intr) * 16 +
    112                                        i * 4);
    113 
    114                 intr_comp->src[0] =
    115                         nir_src_for_ssa(nir_ishl(b, intr->src[0].ssa,
    116                                                  nir_imm_int(b, 4)));
    117 
    118                 dests[i] = &intr_comp->dest.ssa;
    119 
    120                 nir_builder_instr_insert(b, &intr_comp->instr);
    121         }
    122 
    123         replace_intrinsic_with_vec(b, intr, dests);
    124 }
    125 
    126 static void
    127 v3d_nir_lower_io_instr(struct v3d_compile *c, nir_builder *b,
    128                        struct nir_instr *instr)
    129 {
    130         if (instr->type != nir_instr_type_intrinsic)
    131                 return;
    132         nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
    133 
    134         switch (intr->intrinsic) {
    135         case nir_intrinsic_load_input:
    136                 break;
    137 
    138         case nir_intrinsic_store_output:
    139                 v3d_nir_lower_output(c, b, intr);
    140                 break;
    141 
    142         case nir_intrinsic_load_uniform:
    143                 v3d_nir_lower_uniform(c, b, intr);
    144                 break;
    145 
    146         case nir_intrinsic_load_user_clip_plane:
    147         default:
    148                 break;
    149         }
    150 }
    151 
    152 static bool
    153 v3d_nir_lower_io_impl(struct v3d_compile *c, nir_function_impl *impl)
    154 {
    155         nir_builder b;
    156         nir_builder_init(&b, impl);
    157 
    158         nir_foreach_block(block, impl) {
    159                 nir_foreach_instr_safe(instr, block)
    160                         v3d_nir_lower_io_instr(c, &b, instr);
    161         }
    162 
    163         nir_metadata_preserve(impl, nir_metadata_block_index |
    164                               nir_metadata_dominance);
    165 
    166         return true;
    167 }
    168 
    169 void
    170 v3d_nir_lower_io(nir_shader *s, struct v3d_compile *c)
    171 {
    172         nir_foreach_function(function, s) {
    173                 if (function->impl)
    174                         v3d_nir_lower_io_impl(c, function->impl);
    175         }
    176 }
    177