Home | History | Annotate | Download | only in glsl
      1 /*
      2  * Copyright  2011 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
     21  * DEALINGS IN THE SOFTWARE.
     22  */
     23 
     24 /**
     25  * \file lower_varyings_to_packed.cpp
     26  *
     27  * This lowering pass generates GLSL code that manually packs varyings into
     28  * vec4 slots, for the benefit of back-ends that don't support packed varyings
     29  * natively.
     30  *
     31  * For example, the following shader:
     32  *
     33  *   out mat3x2 foo;  // location=4, location_frac=0
     34  *   out vec3 bar[2]; // location=5, location_frac=2
     35  *
     36  *   main()
     37  *   {
     38  *     ...
     39  *   }
     40  *
     41  * Is rewritten to:
     42  *
     43  *   mat3x2 foo;
     44  *   vec3 bar[2];
     45  *   out vec4 packed4; // location=4, location_frac=0
     46  *   out vec4 packed5; // location=5, location_frac=0
     47  *   out vec4 packed6; // location=6, location_frac=0
     48  *
     49  *   main()
     50  *   {
     51  *     ...
     52  *     packed4.xy = foo[0];
     53  *     packed4.zw = foo[1];
     54  *     packed5.xy = foo[2];
     55  *     packed5.zw = bar[0].xy;
     56  *     packed6.x = bar[0].z;
     57  *     packed6.yzw = bar[1];
     58  *   }
     59  *
     60  * This lowering pass properly handles "double parking" of a varying vector
     61  * across two varying slots.  For example, in the code above, two of the
     62  * components of bar[0] are stored in packed5, and the remaining component is
     63  * stored in packed6.
     64  *
     65  * Note that in theory, the extra instructions may cause some loss of
     66  * performance.  However, hopefully in most cases the performance loss will
     67  * either be absorbed by a later optimization pass, or it will be offset by
     68  * memory bandwidth savings (because fewer varyings are used).
     69  *
     70  * This lowering pass also packs flat floats, ints, and uints together, by
     71  * using ivec4 as the base type of flat "varyings", and using appropriate
     72  * casts to convert floats and uints into ints.
     73  *
     74  * This lowering pass also handles varyings whose type is a struct or an array
     75  * of struct.  Structs are packed in order and with no gaps, so there may be a
     76  * performance penalty due to structure elements being double-parked.
     77  *
     78  * Lowering of geometry shader inputs is slightly more complex, since geometry
     79  * inputs are always arrays, so we need to lower arrays to arrays.  For
     80  * example, the following input:
     81  *
     82  *   in struct Foo {
     83  *     float f;
     84  *     vec3 v;
     85  *     vec2 a[2];
     86  *   } arr[3];         // location=4, location_frac=0
     87  *
     88  * Would get lowered like this if it occurred in a fragment shader:
     89  *
     90  *   struct Foo {
     91  *     float f;
     92  *     vec3 v;
     93  *     vec2 a[2];
     94  *   } arr[3];
     95  *   in vec4 packed4;  // location=4, location_frac=0
     96  *   in vec4 packed5;  // location=5, location_frac=0
     97  *   in vec4 packed6;  // location=6, location_frac=0
     98  *   in vec4 packed7;  // location=7, location_frac=0
     99  *   in vec4 packed8;  // location=8, location_frac=0
    100  *   in vec4 packed9;  // location=9, location_frac=0
    101  *
    102  *   main()
    103  *   {
    104  *     arr[0].f = packed4.x;
    105  *     arr[0].v = packed4.yzw;
    106  *     arr[0].a[0] = packed5.xy;
    107  *     arr[0].a[1] = packed5.zw;
    108  *     arr[1].f = packed6.x;
    109  *     arr[1].v = packed6.yzw;
    110  *     arr[1].a[0] = packed7.xy;
    111  *     arr[1].a[1] = packed7.zw;
    112  *     arr[2].f = packed8.x;
    113  *     arr[2].v = packed8.yzw;
    114  *     arr[2].a[0] = packed9.xy;
    115  *     arr[2].a[1] = packed9.zw;
    116  *     ...
    117  *   }
    118  *
    119  * But it would get lowered like this if it occurred in a geometry shader:
    120  *
    121  *   struct Foo {
    122  *     float f;
    123  *     vec3 v;
    124  *     vec2 a[2];
    125  *   } arr[3];
    126  *   in vec4 packed4[3];  // location=4, location_frac=0
    127  *   in vec4 packed5[3];  // location=5, location_frac=0
    128  *
    129  *   main()
    130  *   {
    131  *     arr[0].f = packed4[0].x;
    132  *     arr[0].v = packed4[0].yzw;
    133  *     arr[0].a[0] = packed5[0].xy;
    134  *     arr[0].a[1] = packed5[0].zw;
    135  *     arr[1].f = packed4[1].x;
    136  *     arr[1].v = packed4[1].yzw;
    137  *     arr[1].a[0] = packed5[1].xy;
    138  *     arr[1].a[1] = packed5[1].zw;
    139  *     arr[2].f = packed4[2].x;
    140  *     arr[2].v = packed4[2].yzw;
    141  *     arr[2].a[0] = packed5[2].xy;
    142  *     arr[2].a[1] = packed5[2].zw;
    143  *     ...
    144  *   }
    145  */
    146 
    147 #include "glsl_symbol_table.h"
    148 #include "ir.h"
    149 #include "ir_builder.h"
    150 #include "ir_optimization.h"
    151 #include "program/prog_instruction.h"
    152 
    153 using namespace ir_builder;
    154 
    155 namespace {
    156 
    157 /**
    158  * Visitor that performs varying packing.  For each varying declared in the
    159  * shader, this visitor determines whether it needs to be packed.  If so, it
    160  * demotes it to an ordinary global, creates new packed varyings, and
    161  * generates assignments to convert between the original varying and the
    162  * packed varying.
    163  */
    164 class lower_packed_varyings_visitor
    165 {
    166 public:
    167    lower_packed_varyings_visitor(void *mem_ctx,
    168                                  unsigned locations_used,
    169                                  const uint8_t *components,
    170                                  ir_variable_mode mode,
    171                                  unsigned gs_input_vertices,
    172                                  exec_list *out_instructions,
    173                                  exec_list *out_variables,
    174                                  bool disable_varying_packing,
    175                                  bool xfb_enabled);
    176 
    177    void run(struct gl_linked_shader *shader);
    178 
    179 private:
    180    void bitwise_assign_pack(ir_rvalue *lhs, ir_rvalue *rhs);
    181    void bitwise_assign_unpack(ir_rvalue *lhs, ir_rvalue *rhs);
    182    unsigned lower_rvalue(ir_rvalue *rvalue, unsigned fine_location,
    183                          ir_variable *unpacked_var, const char *name,
    184                          bool gs_input_toplevel, unsigned vertex_index);
    185    unsigned lower_arraylike(ir_rvalue *rvalue, unsigned array_size,
    186                             unsigned fine_location,
    187                             ir_variable *unpacked_var, const char *name,
    188                             bool gs_input_toplevel, unsigned vertex_index);
    189    ir_dereference *get_packed_varying_deref(unsigned location,
    190                                             ir_variable *unpacked_var,
    191                                             const char *name,
    192                                             unsigned vertex_index);
    193    bool needs_lowering(ir_variable *var);
    194 
    195    /**
    196     * Memory context used to allocate new instructions for the shader.
    197     */
    198    void * const mem_ctx;
    199 
    200    /**
    201     * Number of generic varying slots which are used by this shader.  This is
    202     * used to allocate temporary intermediate data structures.  If any varying
    203     * used by this shader has a location greater than or equal to
    204     * VARYING_SLOT_VAR0 + locations_used, an assertion will fire.
    205     */
    206    const unsigned locations_used;
    207 
    208    const uint8_t* components;
    209 
    210    /**
    211     * Array of pointers to the packed varyings that have been created for each
    212     * generic varying slot.  NULL entries in this array indicate varying slots
    213     * for which a packed varying has not been created yet.
    214     */
    215    ir_variable **packed_varyings;
    216 
    217    /**
    218     * Type of varying which is being lowered in this pass (either
    219     * ir_var_shader_in or ir_var_shader_out).
    220     */
    221    const ir_variable_mode mode;
    222 
    223    /**
    224     * If we are currently lowering geometry shader inputs, the number of input
    225     * vertices the geometry shader accepts.  Otherwise zero.
    226     */
    227    const unsigned gs_input_vertices;
    228 
    229    /**
    230     * Exec list into which the visitor should insert the packing instructions.
    231     * Caller provides this list; it should insert the instructions into the
    232     * appropriate place in the shader once the visitor has finished running.
    233     */
    234    exec_list *out_instructions;
    235 
    236    /**
    237     * Exec list into which the visitor should insert any new variables.
    238     */
    239    exec_list *out_variables;
    240 
    241    bool disable_varying_packing;
    242    bool xfb_enabled;
    243 };
    244 
    245 } /* anonymous namespace */
    246 
    247 lower_packed_varyings_visitor::lower_packed_varyings_visitor(
    248       void *mem_ctx, unsigned locations_used, const uint8_t *components,
    249       ir_variable_mode mode,
    250       unsigned gs_input_vertices, exec_list *out_instructions,
    251       exec_list *out_variables, bool disable_varying_packing,
    252       bool xfb_enabled)
    253    : mem_ctx(mem_ctx),
    254      locations_used(locations_used),
    255      components(components),
    256      packed_varyings((ir_variable **)
    257                      rzalloc_array_size(mem_ctx, sizeof(*packed_varyings),
    258                                         locations_used)),
    259      mode(mode),
    260      gs_input_vertices(gs_input_vertices),
    261      out_instructions(out_instructions),
    262      out_variables(out_variables),
    263      disable_varying_packing(disable_varying_packing),
    264      xfb_enabled(xfb_enabled)
    265 {
    266 }
    267 
    268 void
    269 lower_packed_varyings_visitor::run(struct gl_linked_shader *shader)
    270 {
    271    foreach_in_list(ir_instruction, node, shader->ir) {
    272       ir_variable *var = node->as_variable();
    273       if (var == NULL)
    274          continue;
    275 
    276       if (var->data.mode != this->mode ||
    277           var->data.location < VARYING_SLOT_VAR0 ||
    278           !this->needs_lowering(var))
    279          continue;
    280 
    281       /* This lowering pass is only capable of packing floats and ints
    282        * together when their interpolation mode is "flat".  Treat integers as
    283        * being flat when the interpolation mode is none.
    284        */
    285       assert(var->data.interpolation == INTERP_MODE_FLAT ||
    286              var->data.interpolation == INTERP_MODE_NONE ||
    287              !var->type->contains_integer());
    288 
    289       /* Clone the variable for program resource list before
    290        * it gets modified and lost.
    291        */
    292       if (!shader->packed_varyings)
    293          shader->packed_varyings = new (shader) exec_list;
    294 
    295       shader->packed_varyings->push_tail(var->clone(shader, NULL));
    296 
    297       /* Change the old varying into an ordinary global. */
    298       assert(var->data.mode != ir_var_temporary);
    299       var->data.mode = ir_var_auto;
    300 
    301       /* Create a reference to the old varying. */
    302       ir_dereference_variable *deref
    303          = new(this->mem_ctx) ir_dereference_variable(var);
    304 
    305       /* Recursively pack or unpack it. */
    306       this->lower_rvalue(deref, var->data.location * 4 + var->data.location_frac, var,
    307                          var->name, this->gs_input_vertices != 0, 0);
    308    }
    309 }
    310 
    311 #define SWIZZLE_ZWZW MAKE_SWIZZLE4(SWIZZLE_Z, SWIZZLE_W, SWIZZLE_Z, SWIZZLE_W)
    312 
    313 /**
    314  * Make an ir_assignment from \c rhs to \c lhs, performing appropriate
    315  * bitcasts if necessary to match up types.
    316  *
    317  * This function is called when packing varyings.
    318  */
    319 void
    320 lower_packed_varyings_visitor::bitwise_assign_pack(ir_rvalue *lhs,
    321                                                    ir_rvalue *rhs)
    322 {
    323    if (lhs->type->base_type != rhs->type->base_type) {
    324       /* Since we only mix types in flat varyings, and we always store flat
    325        * varyings as type ivec4, we need only produce conversions from (uint
    326        * or float) to int.
    327        */
    328       assert(lhs->type->base_type == GLSL_TYPE_INT);
    329       switch (rhs->type->base_type) {
    330       case GLSL_TYPE_UINT:
    331          rhs = new(this->mem_ctx)
    332             ir_expression(ir_unop_u2i, lhs->type, rhs);
    333          break;
    334       case GLSL_TYPE_FLOAT:
    335          rhs = new(this->mem_ctx)
    336             ir_expression(ir_unop_bitcast_f2i, lhs->type, rhs);
    337          break;
    338       case GLSL_TYPE_DOUBLE:
    339          assert(rhs->type->vector_elements <= 2);
    340          if (rhs->type->vector_elements == 2) {
    341             ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
    342 
    343             assert(lhs->type->vector_elements == 4);
    344             this->out_variables->push_tail(t);
    345             this->out_instructions->push_tail(
    346                   assign(t, u2i(expr(ir_unop_unpack_double_2x32, swizzle_x(rhs->clone(mem_ctx, NULL)))), 0x3));
    347             this->out_instructions->push_tail(
    348                   assign(t,  u2i(expr(ir_unop_unpack_double_2x32, swizzle_y(rhs))), 0xc));
    349             rhs = deref(t).val;
    350          } else {
    351             rhs = u2i(expr(ir_unop_unpack_double_2x32, rhs));
    352          }
    353          break;
    354       case GLSL_TYPE_INT64:
    355          assert(rhs->type->vector_elements <= 2);
    356          if (rhs->type->vector_elements == 2) {
    357             ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
    358 
    359             assert(lhs->type->vector_elements == 4);
    360             this->out_variables->push_tail(t);
    361             this->out_instructions->push_tail(
    362                assign(t, expr(ir_unop_unpack_int_2x32, swizzle_x(rhs->clone(mem_ctx, NULL))), 0x3));
    363             this->out_instructions->push_tail(
    364                assign(t,  expr(ir_unop_unpack_int_2x32, swizzle_y(rhs)), 0xc));
    365             rhs = deref(t).val;
    366          } else {
    367             rhs = expr(ir_unop_unpack_int_2x32, rhs);
    368          }
    369          break;
    370       case GLSL_TYPE_UINT64:
    371          assert(rhs->type->vector_elements <= 2);
    372          if (rhs->type->vector_elements == 2) {
    373             ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "pack", ir_var_temporary);
    374 
    375             assert(lhs->type->vector_elements == 4);
    376             this->out_variables->push_tail(t);
    377             this->out_instructions->push_tail(
    378                   assign(t, u2i(expr(ir_unop_unpack_uint_2x32, swizzle_x(rhs->clone(mem_ctx, NULL)))), 0x3));
    379             this->out_instructions->push_tail(
    380                   assign(t,  u2i(expr(ir_unop_unpack_uint_2x32, swizzle_y(rhs))), 0xc));
    381             rhs = deref(t).val;
    382          } else {
    383             rhs = u2i(expr(ir_unop_unpack_uint_2x32, rhs));
    384          }
    385          break;
    386       case GLSL_TYPE_SAMPLER:
    387          rhs = u2i(expr(ir_unop_unpack_sampler_2x32, rhs));
    388          break;
    389       case GLSL_TYPE_IMAGE:
    390          rhs = u2i(expr(ir_unop_unpack_image_2x32, rhs));
    391          break;
    392       default:
    393          assert(!"Unexpected type conversion while lowering varyings");
    394          break;
    395       }
    396    }
    397    this->out_instructions->push_tail(new (this->mem_ctx) ir_assignment(lhs, rhs));
    398 }
    399 
    400 
    401 /**
    402  * Make an ir_assignment from \c rhs to \c lhs, performing appropriate
    403  * bitcasts if necessary to match up types.
    404  *
    405  * This function is called when unpacking varyings.
    406  */
    407 void
    408 lower_packed_varyings_visitor::bitwise_assign_unpack(ir_rvalue *lhs,
    409                                                      ir_rvalue *rhs)
    410 {
    411    if (lhs->type->base_type != rhs->type->base_type) {
    412       /* Since we only mix types in flat varyings, and we always store flat
    413        * varyings as type ivec4, we need only produce conversions from int to
    414        * (uint or float).
    415        */
    416       assert(rhs->type->base_type == GLSL_TYPE_INT);
    417       switch (lhs->type->base_type) {
    418       case GLSL_TYPE_UINT:
    419          rhs = new(this->mem_ctx)
    420             ir_expression(ir_unop_i2u, lhs->type, rhs);
    421          break;
    422       case GLSL_TYPE_FLOAT:
    423          rhs = new(this->mem_ctx)
    424             ir_expression(ir_unop_bitcast_i2f, lhs->type, rhs);
    425          break;
    426       case GLSL_TYPE_DOUBLE:
    427          assert(lhs->type->vector_elements <= 2);
    428          if (lhs->type->vector_elements == 2) {
    429             ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
    430             assert(rhs->type->vector_elements == 4);
    431             this->out_variables->push_tail(t);
    432             this->out_instructions->push_tail(
    433                   assign(t, expr(ir_unop_pack_double_2x32, i2u(swizzle_xy(rhs->clone(mem_ctx, NULL)))), 0x1));
    434             this->out_instructions->push_tail(
    435                   assign(t, expr(ir_unop_pack_double_2x32, i2u(swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2))), 0x2));
    436             rhs = deref(t).val;
    437          } else {
    438             rhs = expr(ir_unop_pack_double_2x32, i2u(rhs));
    439          }
    440          break;
    441       case GLSL_TYPE_INT64:
    442          assert(lhs->type->vector_elements <= 2);
    443          if (lhs->type->vector_elements == 2) {
    444             ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
    445             assert(rhs->type->vector_elements == 4);
    446             this->out_variables->push_tail(t);
    447             this->out_instructions->push_tail(
    448                   assign(t, expr(ir_unop_pack_int_2x32, swizzle_xy(rhs->clone(mem_ctx, NULL))), 0x1));
    449             this->out_instructions->push_tail(
    450                   assign(t, expr(ir_unop_pack_int_2x32, swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2)), 0x2));
    451             rhs = deref(t).val;
    452          } else {
    453             rhs = expr(ir_unop_pack_int_2x32, rhs);
    454          }
    455          break;
    456       case GLSL_TYPE_UINT64:
    457          assert(lhs->type->vector_elements <= 2);
    458          if (lhs->type->vector_elements == 2) {
    459             ir_variable *t = new(mem_ctx) ir_variable(lhs->type, "unpack", ir_var_temporary);
    460             assert(rhs->type->vector_elements == 4);
    461             this->out_variables->push_tail(t);
    462             this->out_instructions->push_tail(
    463                   assign(t, expr(ir_unop_pack_uint_2x32, i2u(swizzle_xy(rhs->clone(mem_ctx, NULL)))), 0x1));
    464             this->out_instructions->push_tail(
    465                   assign(t, expr(ir_unop_pack_uint_2x32, i2u(swizzle(rhs->clone(mem_ctx, NULL), SWIZZLE_ZWZW, 2))), 0x2));
    466             rhs = deref(t).val;
    467          } else {
    468             rhs = expr(ir_unop_pack_uint_2x32, i2u(rhs));
    469          }
    470          break;
    471       case GLSL_TYPE_SAMPLER:
    472          rhs = new(mem_ctx)
    473             ir_expression(ir_unop_pack_sampler_2x32, lhs->type, i2u(rhs));
    474          break;
    475       case GLSL_TYPE_IMAGE:
    476          rhs = new(mem_ctx)
    477             ir_expression(ir_unop_pack_image_2x32, lhs->type, i2u(rhs));
    478          break;
    479       default:
    480          assert(!"Unexpected type conversion while lowering varyings");
    481          break;
    482       }
    483    }
    484    this->out_instructions->push_tail(new(this->mem_ctx) ir_assignment(lhs, rhs));
    485 }
    486 
    487 
    488 /**
    489  * Recursively pack or unpack the given varying (or portion of a varying) by
    490  * traversing all of its constituent vectors.
    491  *
    492  * \param fine_location is the location where the first constituent vector
    493  * should be packed--the word "fine" indicates that this location is expressed
    494  * in multiples of a float, rather than multiples of a vec4 as is used
    495  * elsewhere in Mesa.
    496  *
    497  * \param gs_input_toplevel should be set to true if we are lowering geometry
    498  * shader inputs, and we are currently lowering the whole input variable
    499  * (i.e. we are lowering the array whose index selects the vertex).
    500  *
    501  * \param vertex_index: if we are lowering geometry shader inputs, and the
    502  * level of the array that we are currently lowering is *not* the top level,
    503  * then this indicates which vertex we are currently lowering.  Otherwise it
    504  * is ignored.
    505  *
    506  * \return the location where the next constituent vector (after this one)
    507  * should be packed.
    508  */
    509 unsigned
    510 lower_packed_varyings_visitor::lower_rvalue(ir_rvalue *rvalue,
    511                                             unsigned fine_location,
    512                                             ir_variable *unpacked_var,
    513                                             const char *name,
    514                                             bool gs_input_toplevel,
    515                                             unsigned vertex_index)
    516 {
    517    unsigned dmul = rvalue->type->is_64bit() ? 2 : 1;
    518    /* When gs_input_toplevel is set, we should be looking at a geometry shader
    519     * input array.
    520     */
    521    assert(!gs_input_toplevel || rvalue->type->is_array());
    522 
    523    if (rvalue->type->is_record()) {
    524       for (unsigned i = 0; i < rvalue->type->length; i++) {
    525          if (i != 0)
    526             rvalue = rvalue->clone(this->mem_ctx, NULL);
    527          const char *field_name = rvalue->type->fields.structure[i].name;
    528          ir_dereference_record *dereference_record = new(this->mem_ctx)
    529             ir_dereference_record(rvalue, field_name);
    530          char *deref_name
    531             = ralloc_asprintf(this->mem_ctx, "%s.%s", name, field_name);
    532          fine_location = this->lower_rvalue(dereference_record, fine_location,
    533                                             unpacked_var, deref_name, false,
    534                                             vertex_index);
    535       }
    536       return fine_location;
    537    } else if (rvalue->type->is_array()) {
    538       /* Arrays are packed/unpacked by considering each array element in
    539        * sequence.
    540        */
    541       return this->lower_arraylike(rvalue, rvalue->type->array_size(),
    542                                    fine_location, unpacked_var, name,
    543                                    gs_input_toplevel, vertex_index);
    544    } else if (rvalue->type->is_matrix()) {
    545       /* Matrices are packed/unpacked by considering each column vector in
    546        * sequence.
    547        */
    548       return this->lower_arraylike(rvalue, rvalue->type->matrix_columns,
    549                                    fine_location, unpacked_var, name,
    550                                    false, vertex_index);
    551    } else if (rvalue->type->vector_elements * dmul +
    552               fine_location % 4 > 4) {
    553       /* This vector is going to be "double parked" across two varying slots,
    554        * so handle it as two separate assignments. For doubles, a dvec3/dvec4
    555        * can end up being spread over 3 slots. However the second splitting
    556        * will happen later, here we just always want to split into 2.
    557        */
    558       unsigned left_components, right_components;
    559       unsigned left_swizzle_values[4] = { 0, 0, 0, 0 };
    560       unsigned right_swizzle_values[4] = { 0, 0, 0, 0 };
    561       char left_swizzle_name[4] = { 0, 0, 0, 0 };
    562       char right_swizzle_name[4] = { 0, 0, 0, 0 };
    563 
    564       left_components = 4 - fine_location % 4;
    565       if (rvalue->type->is_64bit()) {
    566          /* We might actually end up with 0 left components! */
    567          left_components /= 2;
    568       }
    569       right_components = rvalue->type->vector_elements - left_components;
    570 
    571       for (unsigned i = 0; i < left_components; i++) {
    572          left_swizzle_values[i] = i;
    573          left_swizzle_name[i] = "xyzw"[i];
    574       }
    575       for (unsigned i = 0; i < right_components; i++) {
    576          right_swizzle_values[i] = i + left_components;
    577          right_swizzle_name[i] = "xyzw"[i + left_components];
    578       }
    579       ir_swizzle *left_swizzle = new(this->mem_ctx)
    580          ir_swizzle(rvalue, left_swizzle_values, left_components);
    581       ir_swizzle *right_swizzle = new(this->mem_ctx)
    582          ir_swizzle(rvalue->clone(this->mem_ctx, NULL), right_swizzle_values,
    583                     right_components);
    584       char *left_name
    585          = ralloc_asprintf(this->mem_ctx, "%s.%s", name, left_swizzle_name);
    586       char *right_name
    587          = ralloc_asprintf(this->mem_ctx, "%s.%s", name, right_swizzle_name);
    588       if (left_components)
    589          fine_location = this->lower_rvalue(left_swizzle, fine_location,
    590                                             unpacked_var, left_name, false,
    591                                             vertex_index);
    592       else
    593          /* Top up the fine location to the next slot */
    594          fine_location++;
    595       return this->lower_rvalue(right_swizzle, fine_location, unpacked_var,
    596                                 right_name, false, vertex_index);
    597    } else {
    598       /* No special handling is necessary; pack the rvalue into the
    599        * varying.
    600        */
    601       unsigned swizzle_values[4] = { 0, 0, 0, 0 };
    602       unsigned components = rvalue->type->vector_elements * dmul;
    603       unsigned location = fine_location / 4;
    604       unsigned location_frac = fine_location % 4;
    605       for (unsigned i = 0; i < components; ++i)
    606          swizzle_values[i] = i + location_frac;
    607       ir_dereference *packed_deref =
    608          this->get_packed_varying_deref(location, unpacked_var, name,
    609                                         vertex_index);
    610       if (unpacked_var->data.stream != 0) {
    611          assert(unpacked_var->data.stream < 4);
    612          ir_variable *packed_var = packed_deref->variable_referenced();
    613          for (unsigned i = 0; i < components; ++i) {
    614             packed_var->data.stream |=
    615                unpacked_var->data.stream << (2 * (location_frac + i));
    616          }
    617       }
    618       ir_swizzle *swizzle = new(this->mem_ctx)
    619          ir_swizzle(packed_deref, swizzle_values, components);
    620       if (this->mode == ir_var_shader_out) {
    621          this->bitwise_assign_pack(swizzle, rvalue);
    622       } else {
    623          this->bitwise_assign_unpack(rvalue, swizzle);
    624       }
    625       return fine_location + components;
    626    }
    627 }
    628 
    629 /**
    630  * Recursively pack or unpack a varying for which we need to iterate over its
    631  * constituent elements, accessing each one using an ir_dereference_array.
    632  * This takes care of both arrays and matrices, since ir_dereference_array
    633  * treats a matrix like an array of its column vectors.
    634  *
    635  * \param gs_input_toplevel should be set to true if we are lowering geometry
    636  * shader inputs, and we are currently lowering the whole input variable
    637  * (i.e. we are lowering the array whose index selects the vertex).
    638  *
    639  * \param vertex_index: if we are lowering geometry shader inputs, and the
    640  * level of the array that we are currently lowering is *not* the top level,
    641  * then this indicates which vertex we are currently lowering.  Otherwise it
    642  * is ignored.
    643  */
    644 unsigned
    645 lower_packed_varyings_visitor::lower_arraylike(ir_rvalue *rvalue,
    646                                                unsigned array_size,
    647                                                unsigned fine_location,
    648                                                ir_variable *unpacked_var,
    649                                                const char *name,
    650                                                bool gs_input_toplevel,
    651                                                unsigned vertex_index)
    652 {
    653    for (unsigned i = 0; i < array_size; i++) {
    654       if (i != 0)
    655          rvalue = rvalue->clone(this->mem_ctx, NULL);
    656       ir_constant *constant = new(this->mem_ctx) ir_constant(i);
    657       ir_dereference_array *dereference_array = new(this->mem_ctx)
    658          ir_dereference_array(rvalue, constant);
    659       if (gs_input_toplevel) {
    660          /* Geometry shader inputs are a special case.  Instead of storing
    661           * each element of the array at a different location, all elements
    662           * are at the same location, but with a different vertex index.
    663           */
    664          (void) this->lower_rvalue(dereference_array, fine_location,
    665                                    unpacked_var, name, false, i);
    666       } else {
    667          char *subscripted_name
    668             = ralloc_asprintf(this->mem_ctx, "%s[%d]", name, i);
    669          fine_location =
    670             this->lower_rvalue(dereference_array, fine_location,
    671                                unpacked_var, subscripted_name,
    672                                false, vertex_index);
    673       }
    674    }
    675    return fine_location;
    676 }
    677 
    678 /**
    679  * Retrieve the packed varying corresponding to the given varying location.
    680  * If no packed varying has been created for the given varying location yet,
    681  * create it and add it to the shader before returning it.
    682  *
    683  * The newly created varying inherits its interpolation parameters from \c
    684  * unpacked_var.  Its base type is ivec4 if we are lowering a flat varying,
    685  * vec4 otherwise.
    686  *
    687  * \param vertex_index: if we are lowering geometry shader inputs, then this
    688  * indicates which vertex we are currently lowering.  Otherwise it is ignored.
    689  */
    690 ir_dereference *
    691 lower_packed_varyings_visitor::get_packed_varying_deref(
    692       unsigned location, ir_variable *unpacked_var, const char *name,
    693       unsigned vertex_index)
    694 {
    695    unsigned slot = location - VARYING_SLOT_VAR0;
    696    assert(slot < locations_used);
    697    if (this->packed_varyings[slot] == NULL) {
    698       char *packed_name = ralloc_asprintf(this->mem_ctx, "packed:%s", name);
    699       const glsl_type *packed_type;
    700       assert(components[slot] != 0);
    701       if (unpacked_var->is_interpolation_flat())
    702          packed_type = glsl_type::get_instance(GLSL_TYPE_INT, components[slot], 1);
    703       else
    704          packed_type = glsl_type::get_instance(GLSL_TYPE_FLOAT, components[slot], 1);
    705       if (this->gs_input_vertices != 0) {
    706          packed_type =
    707             glsl_type::get_array_instance(packed_type,
    708                                           this->gs_input_vertices);
    709       }
    710       ir_variable *packed_var = new(this->mem_ctx)
    711          ir_variable(packed_type, packed_name, this->mode);
    712       if (this->gs_input_vertices != 0) {
    713          /* Prevent update_array_sizes() from messing with the size of the
    714           * array.
    715           */
    716          packed_var->data.max_array_access = this->gs_input_vertices - 1;
    717       }
    718       packed_var->data.centroid = unpacked_var->data.centroid;
    719       packed_var->data.sample = unpacked_var->data.sample;
    720       packed_var->data.patch = unpacked_var->data.patch;
    721       packed_var->data.interpolation =
    722          packed_type->without_array() == glsl_type::ivec4_type
    723          ? unsigned(INTERP_MODE_FLAT) : unpacked_var->data.interpolation;
    724       packed_var->data.location = location;
    725       packed_var->data.precision = unpacked_var->data.precision;
    726       packed_var->data.always_active_io = unpacked_var->data.always_active_io;
    727       packed_var->data.stream = 1u << 31;
    728       unpacked_var->insert_before(packed_var);
    729       this->packed_varyings[slot] = packed_var;
    730    } else {
    731       /* For geometry shader inputs, only update the packed variable name the
    732        * first time we visit each component.
    733        */
    734       if (this->gs_input_vertices == 0 || vertex_index == 0) {
    735          ir_variable *var = this->packed_varyings[slot];
    736 
    737          if (var->is_name_ralloced())
    738             ralloc_asprintf_append((char **) &var->name, ",%s", name);
    739          else
    740             var->name = ralloc_asprintf(var, "%s,%s", var->name, name);
    741       }
    742    }
    743 
    744    ir_dereference *deref = new(this->mem_ctx)
    745       ir_dereference_variable(this->packed_varyings[slot]);
    746    if (this->gs_input_vertices != 0) {
    747       /* When lowering GS inputs, the packed variable is an array, so we need
    748        * to dereference it using vertex_index.
    749        */
    750       ir_constant *constant = new(this->mem_ctx) ir_constant(vertex_index);
    751       deref = new(this->mem_ctx) ir_dereference_array(deref, constant);
    752    }
    753    return deref;
    754 }
    755 
    756 bool
    757 lower_packed_varyings_visitor::needs_lowering(ir_variable *var)
    758 {
    759    /* Things composed of vec4's, varyings with explicitly assigned
    760     * locations or varyings marked as must_be_shader_input (which might be used
    761     * by interpolateAt* functions) shouldn't be lowered. Everything else can be.
    762     */
    763    if (var->data.explicit_location || var->data.must_be_shader_input)
    764       return false;
    765 
    766    /* Override disable_varying_packing if the var is only used by transform
    767     * feedback. Also override it if transform feedback is enabled and the
    768     * variable is an array, struct or matrix as the elements of these types
    769     * will always have the same interpolation and therefore are safe to pack.
    770     */
    771    const glsl_type *type = var->type;
    772    if (disable_varying_packing && !var->data.is_xfb_only &&
    773        !((type->is_array() || type->is_record() || type->is_matrix()) &&
    774          xfb_enabled))
    775       return false;
    776 
    777    type = type->without_array();
    778    if (type->vector_elements == 4 && !type->is_64bit())
    779       return false;
    780    return true;
    781 }
    782 
    783 
    784 /**
    785  * Visitor that splices varying packing code before every use of EmitVertex()
    786  * in a geometry shader.
    787  */
    788 class lower_packed_varyings_gs_splicer : public ir_hierarchical_visitor
    789 {
    790 public:
    791    explicit lower_packed_varyings_gs_splicer(void *mem_ctx,
    792                                              const exec_list *instructions);
    793 
    794    virtual ir_visitor_status visit_leave(ir_emit_vertex *ev);
    795 
    796 private:
    797    /**
    798     * Memory context used to allocate new instructions for the shader.
    799     */
    800    void * const mem_ctx;
    801 
    802    /**
    803     * Instructions that should be spliced into place before each EmitVertex()
    804     * call.
    805     */
    806    const exec_list *instructions;
    807 };
    808 
    809 
    810 lower_packed_varyings_gs_splicer::lower_packed_varyings_gs_splicer(
    811       void *mem_ctx, const exec_list *instructions)
    812    : mem_ctx(mem_ctx), instructions(instructions)
    813 {
    814 }
    815 
    816 
    817 ir_visitor_status
    818 lower_packed_varyings_gs_splicer::visit_leave(ir_emit_vertex *ev)
    819 {
    820    foreach_in_list(ir_instruction, ir, this->instructions) {
    821       ev->insert_before(ir->clone(this->mem_ctx, NULL));
    822    }
    823    return visit_continue;
    824 }
    825 
    826 /**
    827  * Visitor that splices varying packing code before every return.
    828  */
    829 class lower_packed_varyings_return_splicer : public ir_hierarchical_visitor
    830 {
    831 public:
    832    explicit lower_packed_varyings_return_splicer(void *mem_ctx,
    833                                                  const exec_list *instructions);
    834 
    835    virtual ir_visitor_status visit_leave(ir_return *ret);
    836 
    837 private:
    838    /**
    839     * Memory context used to allocate new instructions for the shader.
    840     */
    841    void * const mem_ctx;
    842 
    843    /**
    844     * Instructions that should be spliced into place before each return.
    845     */
    846    const exec_list *instructions;
    847 };
    848 
    849 
    850 lower_packed_varyings_return_splicer::lower_packed_varyings_return_splicer(
    851       void *mem_ctx, const exec_list *instructions)
    852    : mem_ctx(mem_ctx), instructions(instructions)
    853 {
    854 }
    855 
    856 
    857 ir_visitor_status
    858 lower_packed_varyings_return_splicer::visit_leave(ir_return *ret)
    859 {
    860    foreach_in_list(ir_instruction, ir, this->instructions) {
    861       ret->insert_before(ir->clone(this->mem_ctx, NULL));
    862    }
    863    return visit_continue;
    864 }
    865 
    866 void
    867 lower_packed_varyings(void *mem_ctx, unsigned locations_used,
    868                       const uint8_t *components,
    869                       ir_variable_mode mode, unsigned gs_input_vertices,
    870                       gl_linked_shader *shader, bool disable_varying_packing,
    871                       bool xfb_enabled)
    872 {
    873    exec_list *instructions = shader->ir;
    874    ir_function *main_func = shader->symbols->get_function("main");
    875    exec_list void_parameters;
    876    ir_function_signature *main_func_sig
    877       = main_func->matching_signature(NULL, &void_parameters, false);
    878    exec_list new_instructions, new_variables;
    879    lower_packed_varyings_visitor visitor(mem_ctx,
    880                                          locations_used,
    881                                          components,
    882                                          mode,
    883                                          gs_input_vertices,
    884                                          &new_instructions,
    885                                          &new_variables,
    886                                          disable_varying_packing,
    887                                          xfb_enabled);
    888    visitor.run(shader);
    889    if (mode == ir_var_shader_out) {
    890       if (shader->Stage == MESA_SHADER_GEOMETRY) {
    891          /* For geometry shaders, outputs need to be lowered before each call
    892           * to EmitVertex()
    893           */
    894          lower_packed_varyings_gs_splicer splicer(mem_ctx, &new_instructions);
    895 
    896          /* Add all the variables in first. */
    897          main_func_sig->body.get_head_raw()->insert_before(&new_variables);
    898 
    899          /* Now update all the EmitVertex instances */
    900          splicer.run(instructions);
    901       } else {
    902          /* For other shader types, outputs need to be lowered before each
    903           * return statement and at the end of main()
    904           */
    905 
    906          lower_packed_varyings_return_splicer splicer(mem_ctx, &new_instructions);
    907 
    908          main_func_sig->body.get_head_raw()->insert_before(&new_variables);
    909 
    910          splicer.run(instructions);
    911 
    912          /* Lower outputs at the end of main() if the last instruction is not
    913           * a return statement
    914           */
    915          if (((ir_instruction*)instructions->get_tail())->ir_type != ir_type_return) {
    916             main_func_sig->body.append_list(&new_instructions);
    917          }
    918       }
    919    } else {
    920       /* Shader inputs need to be lowered at the beginning of main() */
    921       main_func_sig->body.get_head_raw()->insert_before(&new_instructions);
    922       main_func_sig->body.get_head_raw()->insert_before(&new_variables);
    923    }
    924 }
    925