Home | History | Annotate | Download | only in spirv
      1 /*
      2  * Copyright  2015 Intel Corporation
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #include "vtn_private.h"
     25 #include "nir/nir_vla.h"
     26 
     27 static bool
     28 vtn_cfg_handle_prepass_instruction(struct vtn_builder *b, SpvOp opcode,
     29                                    const uint32_t *w, unsigned count)
     30 {
     31    switch (opcode) {
     32    case SpvOpFunction: {
     33       assert(b->func == NULL);
     34       b->func = rzalloc(b, struct vtn_function);
     35 
     36       list_inithead(&b->func->body);
     37       b->func->control = w[3];
     38 
     39       MAYBE_UNUSED const struct glsl_type *result_type =
     40          vtn_value(b, w[1], vtn_value_type_type)->type->type;
     41       struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_function);
     42       val->func = b->func;
     43 
     44       const struct glsl_type *func_type =
     45          vtn_value(b, w[4], vtn_value_type_type)->type->type;
     46 
     47       assert(glsl_get_function_return_type(func_type) == result_type);
     48 
     49       nir_function *func =
     50          nir_function_create(b->shader, ralloc_strdup(b->shader, val->name));
     51 
     52       func->num_params = glsl_get_length(func_type);
     53       func->params = ralloc_array(b->shader, nir_parameter, func->num_params);
     54       for (unsigned i = 0; i < func->num_params; i++) {
     55          const struct glsl_function_param *param =
     56             glsl_get_function_param(func_type, i);
     57          func->params[i].type = param->type;
     58          if (param->in) {
     59             if (param->out) {
     60                func->params[i].param_type = nir_parameter_inout;
     61             } else {
     62                func->params[i].param_type = nir_parameter_in;
     63             }
     64          } else {
     65             if (param->out) {
     66                func->params[i].param_type = nir_parameter_out;
     67             } else {
     68                assert(!"Parameter is neither in nor out");
     69             }
     70          }
     71       }
     72 
     73       func->return_type = glsl_get_function_return_type(func_type);
     74 
     75       b->func->impl = nir_function_impl_create(func);
     76 
     77       b->func_param_idx = 0;
     78       break;
     79    }
     80 
     81    case SpvOpFunctionEnd:
     82       b->func->end = w;
     83       b->func = NULL;
     84       break;
     85 
     86    case SpvOpFunctionParameter: {
     87       struct vtn_value *val =
     88          vtn_push_value(b, w[2], vtn_value_type_access_chain);
     89 
     90       struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
     91 
     92       assert(b->func_param_idx < b->func->impl->num_params);
     93       nir_variable *param = b->func->impl->params[b->func_param_idx++];
     94 
     95       assert(param->type == type->type);
     96 
     97       /* Name the parameter so it shows up nicely in NIR */
     98       param->name = ralloc_strdup(param, val->name);
     99 
    100       struct vtn_variable *vtn_var = rzalloc(b, struct vtn_variable);
    101       vtn_var->type = type;
    102       vtn_var->var = param;
    103       vtn_var->chain.var = vtn_var;
    104       vtn_var->chain.length = 0;
    105 
    106       struct vtn_type *without_array = type;
    107       while(glsl_type_is_array(without_array->type))
    108          without_array = without_array->array_element;
    109 
    110       if (glsl_type_is_image(without_array->type)) {
    111          vtn_var->mode = vtn_variable_mode_image;
    112          param->interface_type = without_array->type;
    113       } else if (glsl_type_is_sampler(without_array->type)) {
    114          vtn_var->mode = vtn_variable_mode_sampler;
    115          param->interface_type = without_array->type;
    116       } else {
    117          vtn_var->mode = vtn_variable_mode_param;
    118       }
    119 
    120       val->access_chain = &vtn_var->chain;
    121       break;
    122    }
    123 
    124    case SpvOpLabel: {
    125       assert(b->block == NULL);
    126       b->block = rzalloc(b, struct vtn_block);
    127       b->block->node.type = vtn_cf_node_type_block;
    128       b->block->label = w;
    129       vtn_push_value(b, w[1], vtn_value_type_block)->block = b->block;
    130 
    131       if (b->func->start_block == NULL) {
    132          /* This is the first block encountered for this function.  In this
    133           * case, we set the start block and add it to the list of
    134           * implemented functions that we'll walk later.
    135           */
    136          b->func->start_block = b->block;
    137          exec_list_push_tail(&b->functions, &b->func->node);
    138       }
    139       break;
    140    }
    141 
    142    case SpvOpSelectionMerge:
    143    case SpvOpLoopMerge:
    144       assert(b->block && b->block->merge == NULL);
    145       b->block->merge = w;
    146       break;
    147 
    148    case SpvOpBranch:
    149    case SpvOpBranchConditional:
    150    case SpvOpSwitch:
    151    case SpvOpKill:
    152    case SpvOpReturn:
    153    case SpvOpReturnValue:
    154    case SpvOpUnreachable:
    155       assert(b->block && b->block->branch == NULL);
    156       b->block->branch = w;
    157       b->block = NULL;
    158       break;
    159 
    160    default:
    161       /* Continue on as per normal */
    162       return true;
    163    }
    164 
    165    return true;
    166 }
    167 
    168 static void
    169 vtn_add_case(struct vtn_builder *b, struct vtn_switch *swtch,
    170              struct vtn_block *break_block,
    171              uint32_t block_id, uint32_t val, bool is_default)
    172 {
    173    struct vtn_block *case_block =
    174       vtn_value(b, block_id, vtn_value_type_block)->block;
    175 
    176    /* Don't create dummy cases that just break */
    177    if (case_block == break_block)
    178       return;
    179 
    180    if (case_block->switch_case == NULL) {
    181       struct vtn_case *c = ralloc(b, struct vtn_case);
    182 
    183       list_inithead(&c->body);
    184       c->start_block = case_block;
    185       c->fallthrough = NULL;
    186       nir_array_init(&c->values, b);
    187       c->is_default = false;
    188       c->visited = false;
    189 
    190       list_addtail(&c->link, &swtch->cases);
    191 
    192       case_block->switch_case = c;
    193    }
    194 
    195    if (is_default) {
    196       case_block->switch_case->is_default = true;
    197    } else {
    198       nir_array_add(&case_block->switch_case->values, uint32_t, val);
    199    }
    200 }
    201 
    202 /* This function performs a depth-first search of the cases and puts them
    203  * in fall-through order.
    204  */
    205 static void
    206 vtn_order_case(struct vtn_switch *swtch, struct vtn_case *cse)
    207 {
    208    if (cse->visited)
    209       return;
    210 
    211    cse->visited = true;
    212 
    213    list_del(&cse->link);
    214 
    215    if (cse->fallthrough) {
    216       vtn_order_case(swtch, cse->fallthrough);
    217 
    218       /* If we have a fall-through, place this case right before the case it
    219        * falls through to.  This ensures that fallthroughs come one after
    220        * the other.  These two can never get separated because that would
    221        * imply something else falling through to the same case.  Also, this
    222        * can't break ordering because the DFS ensures that this case is
    223        * visited before anything that falls through to it.
    224        */
    225       list_addtail(&cse->link, &cse->fallthrough->link);
    226    } else {
    227       list_add(&cse->link, &swtch->cases);
    228    }
    229 }
    230 
    231 static enum vtn_branch_type
    232 vtn_get_branch_type(struct vtn_block *block,
    233                     struct vtn_case *swcase, struct vtn_block *switch_break,
    234                     struct vtn_block *loop_break, struct vtn_block *loop_cont)
    235 {
    236    if (block->switch_case) {
    237       /* This branch is actually a fallthrough */
    238       assert(swcase->fallthrough == NULL ||
    239              swcase->fallthrough == block->switch_case);
    240       swcase->fallthrough = block->switch_case;
    241       return vtn_branch_type_switch_fallthrough;
    242    } else if (block == loop_break) {
    243       return vtn_branch_type_loop_break;
    244    } else if (block == loop_cont) {
    245       return vtn_branch_type_loop_continue;
    246    } else if (block == switch_break) {
    247       return vtn_branch_type_switch_break;
    248    } else {
    249       return vtn_branch_type_none;
    250    }
    251 }
    252 
    253 static void
    254 vtn_cfg_walk_blocks(struct vtn_builder *b, struct list_head *cf_list,
    255                     struct vtn_block *start, struct vtn_case *switch_case,
    256                     struct vtn_block *switch_break,
    257                     struct vtn_block *loop_break, struct vtn_block *loop_cont,
    258                     struct vtn_block *end)
    259 {
    260    struct vtn_block *block = start;
    261    while (block != end) {
    262       if (block->merge && (*block->merge & SpvOpCodeMask) == SpvOpLoopMerge &&
    263           !block->loop) {
    264          struct vtn_loop *loop = ralloc(b, struct vtn_loop);
    265 
    266          loop->node.type = vtn_cf_node_type_loop;
    267          list_inithead(&loop->body);
    268          list_inithead(&loop->cont_body);
    269          loop->control = block->merge[3];
    270 
    271          list_addtail(&loop->node.link, cf_list);
    272          block->loop = loop;
    273 
    274          struct vtn_block *new_loop_break =
    275             vtn_value(b, block->merge[1], vtn_value_type_block)->block;
    276          struct vtn_block *new_loop_cont =
    277             vtn_value(b, block->merge[2], vtn_value_type_block)->block;
    278 
    279          /* Note: This recursive call will start with the current block as
    280           * its start block.  If we weren't careful, we would get here
    281           * again and end up in infinite recursion.  This is why we set
    282           * block->loop above and check for it before creating one.  This
    283           * way, we only create the loop once and the second call that
    284           * tries to handle this loop goes to the cases below and gets
    285           * handled as a regular block.
    286           *
    287           * Note: When we make the recursive walk calls, we pass NULL for
    288           * the switch break since you have to break out of the loop first.
    289           * We do, however, still pass the current switch case because it's
    290           * possible that the merge block for the loop is the start of
    291           * another case.
    292           */
    293          vtn_cfg_walk_blocks(b, &loop->body, block, switch_case, NULL,
    294                              new_loop_break, new_loop_cont, NULL );
    295          vtn_cfg_walk_blocks(b, &loop->cont_body, new_loop_cont, NULL, NULL,
    296                              new_loop_break, NULL, block);
    297 
    298          block = new_loop_break;
    299          continue;
    300       }
    301 
    302       assert(block->node.link.next == NULL);
    303       list_addtail(&block->node.link, cf_list);
    304 
    305       switch (*block->branch & SpvOpCodeMask) {
    306       case SpvOpBranch: {
    307          struct vtn_block *branch_block =
    308             vtn_value(b, block->branch[1], vtn_value_type_block)->block;
    309 
    310          block->branch_type = vtn_get_branch_type(branch_block,
    311                                                   switch_case, switch_break,
    312                                                   loop_break, loop_cont);
    313 
    314          if (block->branch_type != vtn_branch_type_none)
    315             return;
    316 
    317          block = branch_block;
    318          continue;
    319       }
    320 
    321       case SpvOpReturn:
    322       case SpvOpReturnValue:
    323          block->branch_type = vtn_branch_type_return;
    324          return;
    325 
    326       case SpvOpKill:
    327          block->branch_type = vtn_branch_type_discard;
    328          return;
    329 
    330       case SpvOpBranchConditional: {
    331          struct vtn_block *then_block =
    332             vtn_value(b, block->branch[2], vtn_value_type_block)->block;
    333          struct vtn_block *else_block =
    334             vtn_value(b, block->branch[3], vtn_value_type_block)->block;
    335 
    336          struct vtn_if *if_stmt = ralloc(b, struct vtn_if);
    337 
    338          if_stmt->node.type = vtn_cf_node_type_if;
    339          if_stmt->condition = block->branch[1];
    340          list_inithead(&if_stmt->then_body);
    341          list_inithead(&if_stmt->else_body);
    342 
    343          list_addtail(&if_stmt->node.link, cf_list);
    344 
    345          if (block->merge &&
    346              (*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge) {
    347             if_stmt->control = block->merge[2];
    348          }
    349 
    350          if_stmt->then_type = vtn_get_branch_type(then_block,
    351                                                   switch_case, switch_break,
    352                                                   loop_break, loop_cont);
    353          if_stmt->else_type = vtn_get_branch_type(else_block,
    354                                                   switch_case, switch_break,
    355                                                   loop_break, loop_cont);
    356 
    357          if (if_stmt->then_type == vtn_branch_type_none &&
    358              if_stmt->else_type == vtn_branch_type_none) {
    359             /* Neither side of the if is something we can short-circuit. */
    360             assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
    361             struct vtn_block *merge_block =
    362                vtn_value(b, block->merge[1], vtn_value_type_block)->block;
    363 
    364             vtn_cfg_walk_blocks(b, &if_stmt->then_body, then_block,
    365                                 switch_case, switch_break,
    366                                 loop_break, loop_cont, merge_block);
    367             vtn_cfg_walk_blocks(b, &if_stmt->else_body, else_block,
    368                                 switch_case, switch_break,
    369                                 loop_break, loop_cont, merge_block);
    370 
    371             enum vtn_branch_type merge_type =
    372                vtn_get_branch_type(merge_block, switch_case, switch_break,
    373                                    loop_break, loop_cont);
    374             if (merge_type == vtn_branch_type_none) {
    375                block = merge_block;
    376                continue;
    377             } else {
    378                return;
    379             }
    380          } else if (if_stmt->then_type != vtn_branch_type_none &&
    381                     if_stmt->else_type != vtn_branch_type_none) {
    382             /* Both sides were short-circuited.  We're done here. */
    383             return;
    384          } else {
    385             /* Exeactly one side of the branch could be short-circuited.
    386              * We set the branch up as a predicated break/continue and we
    387              * continue on with the other side as if it were what comes
    388              * after the if.
    389              */
    390             if (if_stmt->then_type == vtn_branch_type_none) {
    391                block = then_block;
    392             } else {
    393                block = else_block;
    394             }
    395             continue;
    396          }
    397          unreachable("Should have returned or continued");
    398       }
    399 
    400       case SpvOpSwitch: {
    401          assert((*block->merge & SpvOpCodeMask) == SpvOpSelectionMerge);
    402          struct vtn_block *break_block =
    403             vtn_value(b, block->merge[1], vtn_value_type_block)->block;
    404 
    405          struct vtn_switch *swtch = ralloc(b, struct vtn_switch);
    406 
    407          swtch->node.type = vtn_cf_node_type_switch;
    408          swtch->selector = block->branch[1];
    409          list_inithead(&swtch->cases);
    410 
    411          list_addtail(&swtch->node.link, cf_list);
    412 
    413          /* First, we go through and record all of the cases. */
    414          const uint32_t *branch_end =
    415             block->branch + (block->branch[0] >> SpvWordCountShift);
    416 
    417          vtn_add_case(b, swtch, break_block, block->branch[2], 0, true);
    418          for (const uint32_t *w = block->branch + 3; w < branch_end; w += 2)
    419             vtn_add_case(b, swtch, break_block, w[1], w[0], false);
    420 
    421          /* Now, we go through and walk the blocks.  While we walk through
    422           * the blocks, we also gather the much-needed fall-through
    423           * information.
    424           */
    425          list_for_each_entry(struct vtn_case, cse, &swtch->cases, link) {
    426             assert(cse->start_block != break_block);
    427             vtn_cfg_walk_blocks(b, &cse->body, cse->start_block, cse,
    428                                 break_block, NULL, loop_cont, NULL);
    429          }
    430 
    431          /* Finally, we walk over all of the cases one more time and put
    432           * them in fall-through order.
    433           */
    434          for (const uint32_t *w = block->branch + 2; w < branch_end; w += 2) {
    435             struct vtn_block *case_block =
    436                vtn_value(b, *w, vtn_value_type_block)->block;
    437 
    438             if (case_block == break_block)
    439                continue;
    440 
    441             assert(case_block->switch_case);
    442 
    443             vtn_order_case(swtch, case_block->switch_case);
    444          }
    445 
    446          enum vtn_branch_type branch_type =
    447             vtn_get_branch_type(break_block, switch_case, NULL,
    448                                 loop_break, loop_cont);
    449 
    450          if (branch_type != vtn_branch_type_none) {
    451             /* It is possible that the break is actually the continue block
    452              * for the containing loop.  In this case, we need to bail and let
    453              * the loop parsing code handle the continue properly.
    454              */
    455             assert(branch_type == vtn_branch_type_loop_continue);
    456             return;
    457          }
    458 
    459          block = break_block;
    460          continue;
    461       }
    462 
    463       case SpvOpUnreachable:
    464          return;
    465 
    466       default:
    467          unreachable("Unhandled opcode");
    468       }
    469    }
    470 }
    471 
    472 void
    473 vtn_build_cfg(struct vtn_builder *b, const uint32_t *words, const uint32_t *end)
    474 {
    475    vtn_foreach_instruction(b, words, end,
    476                            vtn_cfg_handle_prepass_instruction);
    477 
    478    foreach_list_typed(struct vtn_function, func, node, &b->functions) {
    479       vtn_cfg_walk_blocks(b, &func->body, func->start_block,
    480                           NULL, NULL, NULL, NULL, NULL);
    481    }
    482 }
    483 
    484 static bool
    485 vtn_handle_phis_first_pass(struct vtn_builder *b, SpvOp opcode,
    486                            const uint32_t *w, unsigned count)
    487 {
    488    if (opcode == SpvOpLabel)
    489       return true; /* Nothing to do */
    490 
    491    /* If this isn't a phi node, stop. */
    492    if (opcode != SpvOpPhi)
    493       return false;
    494 
    495    /* For handling phi nodes, we do a poor-man's out-of-ssa on the spot.
    496     * For each phi, we create a variable with the appropreate type and
    497     * do a load from that variable.  Then, in a second pass, we add
    498     * stores to that variable to each of the predecessor blocks.
    499     *
    500     * We could do something more intelligent here.  However, in order to
    501     * handle loops and things properly, we really need dominance
    502     * information.  It would end up basically being the into-SSA
    503     * algorithm all over again.  It's easier if we just let
    504     * lower_vars_to_ssa do that for us instead of repeating it here.
    505     */
    506    struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
    507 
    508    struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
    509    nir_variable *phi_var =
    510       nir_local_variable_create(b->nb.impl, type->type, "phi");
    511    _mesa_hash_table_insert(b->phi_table, w, phi_var);
    512 
    513    val->ssa = vtn_local_load(b, nir_deref_var_create(b, phi_var));
    514 
    515    return true;
    516 }
    517 
    518 static bool
    519 vtn_handle_phi_second_pass(struct vtn_builder *b, SpvOp opcode,
    520                            const uint32_t *w, unsigned count)
    521 {
    522    if (opcode != SpvOpPhi)
    523       return true;
    524 
    525    struct hash_entry *phi_entry = _mesa_hash_table_search(b->phi_table, w);
    526    assert(phi_entry);
    527    nir_variable *phi_var = phi_entry->data;
    528 
    529    for (unsigned i = 3; i < count; i += 2) {
    530       struct vtn_block *pred =
    531          vtn_value(b, w[i + 1], vtn_value_type_block)->block;
    532 
    533       b->nb.cursor = nir_after_instr(&pred->end_nop->instr);
    534 
    535       struct vtn_ssa_value *src = vtn_ssa_value(b, w[i]);
    536 
    537       vtn_local_store(b, src, nir_deref_var_create(b, phi_var));
    538    }
    539 
    540    return true;
    541 }
    542 
    543 static void
    544 vtn_emit_branch(struct vtn_builder *b, enum vtn_branch_type branch_type,
    545                 nir_variable *switch_fall_var, bool *has_switch_break)
    546 {
    547    switch (branch_type) {
    548    case vtn_branch_type_switch_break:
    549       nir_store_var(&b->nb, switch_fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
    550       *has_switch_break = true;
    551       break;
    552    case vtn_branch_type_switch_fallthrough:
    553       break; /* Nothing to do */
    554    case vtn_branch_type_loop_break:
    555       nir_jump(&b->nb, nir_jump_break);
    556       break;
    557    case vtn_branch_type_loop_continue:
    558       nir_jump(&b->nb, nir_jump_continue);
    559       break;
    560    case vtn_branch_type_return:
    561       nir_jump(&b->nb, nir_jump_return);
    562       break;
    563    case vtn_branch_type_discard: {
    564       nir_intrinsic_instr *discard =
    565          nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_discard);
    566       nir_builder_instr_insert(&b->nb, &discard->instr);
    567       break;
    568    }
    569    default:
    570       unreachable("Invalid branch type");
    571    }
    572 }
    573 
    574 static void
    575 vtn_emit_cf_list(struct vtn_builder *b, struct list_head *cf_list,
    576                  nir_variable *switch_fall_var, bool *has_switch_break,
    577                  vtn_instruction_handler handler)
    578 {
    579    list_for_each_entry(struct vtn_cf_node, node, cf_list, link) {
    580       switch (node->type) {
    581       case vtn_cf_node_type_block: {
    582          struct vtn_block *block = (struct vtn_block *)node;
    583 
    584          const uint32_t *block_start = block->label;
    585          const uint32_t *block_end = block->merge ? block->merge :
    586                                                     block->branch;
    587 
    588          block_start = vtn_foreach_instruction(b, block_start, block_end,
    589                                                vtn_handle_phis_first_pass);
    590 
    591          vtn_foreach_instruction(b, block_start, block_end, handler);
    592 
    593          block->end_nop = nir_intrinsic_instr_create(b->nb.shader,
    594                                                      nir_intrinsic_nop);
    595          nir_builder_instr_insert(&b->nb, &block->end_nop->instr);
    596 
    597          if ((*block->branch & SpvOpCodeMask) == SpvOpReturnValue) {
    598             struct vtn_ssa_value *src = vtn_ssa_value(b, block->branch[1]);
    599             vtn_local_store(b, src,
    600                             nir_deref_var_create(b, b->impl->return_var));
    601          }
    602 
    603          if (block->branch_type != vtn_branch_type_none) {
    604             vtn_emit_branch(b, block->branch_type,
    605                             switch_fall_var, has_switch_break);
    606          }
    607 
    608          break;
    609       }
    610 
    611       case vtn_cf_node_type_if: {
    612          struct vtn_if *vtn_if = (struct vtn_if *)node;
    613 
    614          nir_if *if_stmt = nir_if_create(b->shader);
    615          if_stmt->condition =
    616             nir_src_for_ssa(vtn_ssa_value(b, vtn_if->condition)->def);
    617          nir_cf_node_insert(b->nb.cursor, &if_stmt->cf_node);
    618 
    619          bool sw_break = false;
    620 
    621          b->nb.cursor = nir_after_cf_list(&if_stmt->then_list);
    622          if (vtn_if->then_type == vtn_branch_type_none) {
    623             vtn_emit_cf_list(b, &vtn_if->then_body,
    624                              switch_fall_var, &sw_break, handler);
    625          } else {
    626             vtn_emit_branch(b, vtn_if->then_type, switch_fall_var, &sw_break);
    627          }
    628 
    629          b->nb.cursor = nir_after_cf_list(&if_stmt->else_list);
    630          if (vtn_if->else_type == vtn_branch_type_none) {
    631             vtn_emit_cf_list(b, &vtn_if->else_body,
    632                              switch_fall_var, &sw_break, handler);
    633          } else {
    634             vtn_emit_branch(b, vtn_if->else_type, switch_fall_var, &sw_break);
    635          }
    636 
    637          b->nb.cursor = nir_after_cf_node(&if_stmt->cf_node);
    638 
    639          /* If we encountered a switch break somewhere inside of the if,
    640           * then it would have been handled correctly by calling
    641           * emit_cf_list or emit_branch for the interrior.  However, we
    642           * need to predicate everything following on wether or not we're
    643           * still going.
    644           */
    645          if (sw_break) {
    646             *has_switch_break = true;
    647 
    648             nir_if *switch_if = nir_if_create(b->shader);
    649             switch_if->condition =
    650                nir_src_for_ssa(nir_load_var(&b->nb, switch_fall_var));
    651             nir_cf_node_insert(b->nb.cursor, &switch_if->cf_node);
    652 
    653             b->nb.cursor = nir_after_cf_list(&if_stmt->then_list);
    654          }
    655          break;
    656       }
    657 
    658       case vtn_cf_node_type_loop: {
    659          struct vtn_loop *vtn_loop = (struct vtn_loop *)node;
    660 
    661          nir_loop *loop = nir_loop_create(b->shader);
    662          nir_cf_node_insert(b->nb.cursor, &loop->cf_node);
    663 
    664          b->nb.cursor = nir_after_cf_list(&loop->body);
    665          vtn_emit_cf_list(b, &vtn_loop->body, NULL, NULL, handler);
    666 
    667          if (!list_empty(&vtn_loop->cont_body)) {
    668             /* If we have a non-trivial continue body then we need to put
    669              * it at the beginning of the loop with a flag to ensure that
    670              * it doesn't get executed in the first iteration.
    671              */
    672             nir_variable *do_cont =
    673                nir_local_variable_create(b->nb.impl, glsl_bool_type(), "cont");
    674 
    675             b->nb.cursor = nir_before_cf_node(&loop->cf_node);
    676             nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_FALSE), 1);
    677 
    678             b->nb.cursor = nir_before_cf_list(&loop->body);
    679             nir_if *cont_if = nir_if_create(b->shader);
    680             cont_if->condition = nir_src_for_ssa(nir_load_var(&b->nb, do_cont));
    681             nir_cf_node_insert(b->nb.cursor, &cont_if->cf_node);
    682 
    683             b->nb.cursor = nir_after_cf_list(&cont_if->then_list);
    684             vtn_emit_cf_list(b, &vtn_loop->cont_body, NULL, NULL, handler);
    685 
    686             b->nb.cursor = nir_after_cf_node(&cont_if->cf_node);
    687             nir_store_var(&b->nb, do_cont, nir_imm_int(&b->nb, NIR_TRUE), 1);
    688 
    689             b->has_loop_continue = true;
    690          }
    691 
    692          b->nb.cursor = nir_after_cf_node(&loop->cf_node);
    693          break;
    694       }
    695 
    696       case vtn_cf_node_type_switch: {
    697          struct vtn_switch *vtn_switch = (struct vtn_switch *)node;
    698 
    699          /* First, we create a variable to keep track of whether or not the
    700           * switch is still going at any given point.  Any switch breaks
    701           * will set this variable to false.
    702           */
    703          nir_variable *fall_var =
    704             nir_local_variable_create(b->nb.impl, glsl_bool_type(), "fall");
    705          nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_FALSE), 1);
    706 
    707          /* Next, we gather up all of the conditions.  We have to do this
    708           * up-front because we also need to build an "any" condition so
    709           * that we can use !any for default.
    710           */
    711          const int num_cases = list_length(&vtn_switch->cases);
    712          NIR_VLA(nir_ssa_def *, conditions, num_cases);
    713 
    714          nir_ssa_def *sel = vtn_ssa_value(b, vtn_switch->selector)->def;
    715          /* An accumulation of all conditions.  Used for the default */
    716          nir_ssa_def *any = NULL;
    717 
    718          int i = 0;
    719          list_for_each_entry(struct vtn_case, cse, &vtn_switch->cases, link) {
    720             if (cse->is_default) {
    721                conditions[i++] = NULL;
    722                continue;
    723             }
    724 
    725             nir_ssa_def *cond = NULL;
    726             nir_array_foreach(&cse->values, uint32_t, val) {
    727                nir_ssa_def *is_val =
    728                   nir_ieq(&b->nb, sel, nir_imm_int(&b->nb, *val));
    729 
    730                cond = cond ? nir_ior(&b->nb, cond, is_val) : is_val;
    731             }
    732 
    733             any = any ? nir_ior(&b->nb, any, cond) : cond;
    734             conditions[i++] = cond;
    735          }
    736          assert(i == num_cases);
    737 
    738          /* Now we can walk the list of cases and actually emit code */
    739          i = 0;
    740          list_for_each_entry(struct vtn_case, cse, &vtn_switch->cases, link) {
    741             /* Figure out the condition */
    742             nir_ssa_def *cond = conditions[i++];
    743             if (cse->is_default) {
    744                assert(cond == NULL);
    745                cond = nir_inot(&b->nb, any);
    746             }
    747             /* Take fallthrough into account */
    748             cond = nir_ior(&b->nb, cond, nir_load_var(&b->nb, fall_var));
    749 
    750             nir_if *case_if = nir_if_create(b->nb.shader);
    751             case_if->condition = nir_src_for_ssa(cond);
    752             nir_cf_node_insert(b->nb.cursor, &case_if->cf_node);
    753 
    754             bool has_break = false;
    755             b->nb.cursor = nir_after_cf_list(&case_if->then_list);
    756             nir_store_var(&b->nb, fall_var, nir_imm_int(&b->nb, NIR_TRUE), 1);
    757             vtn_emit_cf_list(b, &cse->body, fall_var, &has_break, handler);
    758             (void)has_break; /* We don't care */
    759 
    760             b->nb.cursor = nir_after_cf_node(&case_if->cf_node);
    761          }
    762          assert(i == num_cases);
    763 
    764          break;
    765       }
    766 
    767       default:
    768          unreachable("Invalid CF node type");
    769       }
    770    }
    771 }
    772 
    773 void
    774 vtn_function_emit(struct vtn_builder *b, struct vtn_function *func,
    775                   vtn_instruction_handler instruction_handler)
    776 {
    777    nir_builder_init(&b->nb, func->impl);
    778    b->nb.cursor = nir_after_cf_list(&func->impl->body);
    779    b->has_loop_continue = false;
    780    b->phi_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
    781                                           _mesa_key_pointer_equal);
    782 
    783    vtn_emit_cf_list(b, &func->body, NULL, NULL, instruction_handler);
    784 
    785    vtn_foreach_instruction(b, func->start_block->label, func->end,
    786                            vtn_handle_phi_second_pass);
    787 
    788    /* Continue blocks for loops get inserted before the body of the loop
    789     * but instructions in the continue may use SSA defs in the loop body.
    790     * Therefore, we need to repair SSA to insert the needed phi nodes.
    791     */
    792    if (b->has_loop_continue)
    793       nir_repair_ssa_impl(func->impl);
    794 }
    795