Home | History | Annotate | Download | only in nir
      1 /*
      2  * Copyright  2014-2015 Broadcom
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #ifndef NIR_BUILDER_H
     25 #define NIR_BUILDER_H
     26 
     27 #include "nir_control_flow.h"
     28 
     29 struct exec_list;
     30 
     31 typedef struct nir_builder {
     32    nir_cursor cursor;
     33 
     34    /* Whether new ALU instructions will be marked "exact" */
     35    bool exact;
     36 
     37    nir_shader *shader;
     38    nir_function_impl *impl;
     39 } nir_builder;
     40 
     41 static inline void
     42 nir_builder_init(nir_builder *build, nir_function_impl *impl)
     43 {
     44    memset(build, 0, sizeof(*build));
     45    build->exact = false;
     46    build->impl = impl;
     47    build->shader = impl->function->shader;
     48 }
     49 
     50 static inline void
     51 nir_builder_init_simple_shader(nir_builder *build, void *mem_ctx,
     52                                gl_shader_stage stage,
     53                                const nir_shader_compiler_options *options)
     54 {
     55    build->shader = nir_shader_create(mem_ctx, stage, options, NULL);
     56    nir_function *func = nir_function_create(build->shader, "main");
     57    build->exact = false;
     58    build->impl = nir_function_impl_create(func);
     59    build->cursor = nir_after_cf_list(&build->impl->body);
     60 }
     61 
     62 static inline void
     63 nir_builder_instr_insert(nir_builder *build, nir_instr *instr)
     64 {
     65    nir_instr_insert(build->cursor, instr);
     66 
     67    /* Move the cursor forward. */
     68    build->cursor = nir_after_instr(instr);
     69 }
     70 
     71 static inline nir_instr *
     72 nir_builder_last_instr(nir_builder *build)
     73 {
     74    assert(build->cursor.option == nir_cursor_after_instr);
     75    return build->cursor.instr;
     76 }
     77 
     78 static inline void
     79 nir_builder_cf_insert(nir_builder *build, nir_cf_node *cf)
     80 {
     81    nir_cf_node_insert(build->cursor, cf);
     82 }
     83 
     84 static inline bool
     85 nir_builder_is_inside_cf(nir_builder *build, nir_cf_node *cf_node)
     86 {
     87    nir_block *block = nir_cursor_current_block(build->cursor);
     88    for (nir_cf_node *n = &block->cf_node; n; n = n->parent) {
     89       if (n == cf_node)
     90          return true;
     91    }
     92    return false;
     93 }
     94 
     95 static inline nir_if *
     96 nir_push_if(nir_builder *build, nir_ssa_def *condition)
     97 {
     98    nir_if *nif = nir_if_create(build->shader);
     99    nif->condition = nir_src_for_ssa(condition);
    100    nir_builder_cf_insert(build, &nif->cf_node);
    101    build->cursor = nir_before_cf_list(&nif->then_list);
    102    return nif;
    103 }
    104 
    105 static inline nir_if *
    106 nir_push_else(nir_builder *build, nir_if *nif)
    107 {
    108    if (nif) {
    109       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
    110    } else {
    111       nir_block *block = nir_cursor_current_block(build->cursor);
    112       nif = nir_cf_node_as_if(block->cf_node.parent);
    113    }
    114    build->cursor = nir_before_cf_list(&nif->else_list);
    115    return nif;
    116 }
    117 
    118 static inline void
    119 nir_pop_if(nir_builder *build, nir_if *nif)
    120 {
    121    if (nif) {
    122       assert(nir_builder_is_inside_cf(build, &nif->cf_node));
    123    } else {
    124       nir_block *block = nir_cursor_current_block(build->cursor);
    125       nif = nir_cf_node_as_if(block->cf_node.parent);
    126    }
    127    build->cursor = nir_after_cf_node(&nif->cf_node);
    128 }
    129 
    130 static inline nir_ssa_def *
    131 nir_if_phi(nir_builder *build, nir_ssa_def *then_def, nir_ssa_def *else_def)
    132 {
    133    nir_block *block = nir_cursor_current_block(build->cursor);
    134    nir_if *nif = nir_cf_node_as_if(nir_cf_node_prev(&block->cf_node));
    135 
    136    nir_phi_instr *phi = nir_phi_instr_create(build->shader);
    137 
    138    nir_phi_src *src = ralloc(phi, nir_phi_src);
    139    src->pred = nir_if_last_then_block(nif);
    140    src->src = nir_src_for_ssa(then_def);
    141    exec_list_push_tail(&phi->srcs, &src->node);
    142 
    143    src = ralloc(phi, nir_phi_src);
    144    src->pred = nir_if_last_else_block(nif);
    145    src->src = nir_src_for_ssa(else_def);
    146    exec_list_push_tail(&phi->srcs, &src->node);
    147 
    148    assert(then_def->num_components == else_def->num_components);
    149    assert(then_def->bit_size == else_def->bit_size);
    150    nir_ssa_dest_init(&phi->instr, &phi->dest,
    151                      then_def->num_components, then_def->bit_size, NULL);
    152 
    153    nir_builder_instr_insert(build, &phi->instr);
    154 
    155    return &phi->dest.ssa;
    156 }
    157 
    158 static inline nir_loop *
    159 nir_push_loop(nir_builder *build)
    160 {
    161    nir_loop *loop = nir_loop_create(build->shader);
    162    nir_builder_cf_insert(build, &loop->cf_node);
    163    build->cursor = nir_before_cf_list(&loop->body);
    164    return loop;
    165 }
    166 
    167 static inline void
    168 nir_pop_loop(nir_builder *build, nir_loop *loop)
    169 {
    170    if (loop) {
    171       assert(nir_builder_is_inside_cf(build, &loop->cf_node));
    172    } else {
    173       nir_block *block = nir_cursor_current_block(build->cursor);
    174       loop = nir_cf_node_as_loop(block->cf_node.parent);
    175    }
    176    build->cursor = nir_after_cf_node(&loop->cf_node);
    177 }
    178 
    179 static inline nir_ssa_def *
    180 nir_ssa_undef(nir_builder *build, unsigned num_components, unsigned bit_size)
    181 {
    182    nir_ssa_undef_instr *undef =
    183       nir_ssa_undef_instr_create(build->shader, num_components, bit_size);
    184    if (!undef)
    185       return NULL;
    186 
    187    nir_instr_insert(nir_before_cf_list(&build->impl->body), &undef->instr);
    188 
    189    return &undef->def;
    190 }
    191 
    192 static inline nir_ssa_def *
    193 nir_build_imm(nir_builder *build, unsigned num_components,
    194               unsigned bit_size, nir_const_value value)
    195 {
    196    nir_load_const_instr *load_const =
    197       nir_load_const_instr_create(build->shader, num_components, bit_size);
    198    if (!load_const)
    199       return NULL;
    200 
    201    load_const->value = value;
    202 
    203    nir_builder_instr_insert(build, &load_const->instr);
    204 
    205    return &load_const->def;
    206 }
    207 
    208 static inline nir_ssa_def *
    209 nir_imm_float(nir_builder *build, float x)
    210 {
    211    nir_const_value v;
    212 
    213    memset(&v, 0, sizeof(v));
    214    v.f32[0] = x;
    215 
    216    return nir_build_imm(build, 1, 32, v);
    217 }
    218 
    219 static inline nir_ssa_def *
    220 nir_imm_double(nir_builder *build, double x)
    221 {
    222    nir_const_value v;
    223 
    224    memset(&v, 0, sizeof(v));
    225    v.f64[0] = x;
    226 
    227    return nir_build_imm(build, 1, 64, v);
    228 }
    229 
    230 static inline nir_ssa_def *
    231 nir_imm_vec4(nir_builder *build, float x, float y, float z, float w)
    232 {
    233    nir_const_value v;
    234 
    235    memset(&v, 0, sizeof(v));
    236    v.f32[0] = x;
    237    v.f32[1] = y;
    238    v.f32[2] = z;
    239    v.f32[3] = w;
    240 
    241    return nir_build_imm(build, 4, 32, v);
    242 }
    243 
    244 static inline nir_ssa_def *
    245 nir_imm_int(nir_builder *build, int x)
    246 {
    247    nir_const_value v;
    248 
    249    memset(&v, 0, sizeof(v));
    250    v.i32[0] = x;
    251 
    252    return nir_build_imm(build, 1, 32, v);
    253 }
    254 
    255 static inline nir_ssa_def *
    256 nir_imm_int64(nir_builder *build, int64_t x)
    257 {
    258    nir_const_value v;
    259 
    260    memset(&v, 0, sizeof(v));
    261    v.i64[0] = x;
    262 
    263    return nir_build_imm(build, 1, 64, v);
    264 }
    265 
    266 static inline nir_ssa_def *
    267 nir_imm_intN_t(nir_builder *build, uint64_t x, unsigned bit_size)
    268 {
    269    nir_const_value v;
    270 
    271    memset(&v, 0, sizeof(v));
    272    assert(bit_size <= 64);
    273    v.i64[0] = x & (~0ull >> (64 - bit_size));
    274 
    275    return nir_build_imm(build, 1, bit_size, v);
    276 }
    277 
    278 static inline nir_ssa_def *
    279 nir_imm_ivec4(nir_builder *build, int x, int y, int z, int w)
    280 {
    281    nir_const_value v;
    282 
    283    memset(&v, 0, sizeof(v));
    284    v.i32[0] = x;
    285    v.i32[1] = y;
    286    v.i32[2] = z;
    287    v.i32[3] = w;
    288 
    289    return nir_build_imm(build, 4, 32, v);
    290 }
    291 
    292 static inline nir_ssa_def *
    293 nir_build_alu(nir_builder *build, nir_op op, nir_ssa_def *src0,
    294               nir_ssa_def *src1, nir_ssa_def *src2, nir_ssa_def *src3)
    295 {
    296    const nir_op_info *op_info = &nir_op_infos[op];
    297    nir_alu_instr *instr = nir_alu_instr_create(build->shader, op);
    298    if (!instr)
    299       return NULL;
    300 
    301    instr->exact = build->exact;
    302 
    303    instr->src[0].src = nir_src_for_ssa(src0);
    304    if (src1)
    305       instr->src[1].src = nir_src_for_ssa(src1);
    306    if (src2)
    307       instr->src[2].src = nir_src_for_ssa(src2);
    308    if (src3)
    309       instr->src[3].src = nir_src_for_ssa(src3);
    310 
    311    /* Guess the number of components the destination temporary should have
    312     * based on our input sizes, if it's not fixed for the op.
    313     */
    314    unsigned num_components = op_info->output_size;
    315    if (num_components == 0) {
    316       for (unsigned i = 0; i < op_info->num_inputs; i++) {
    317          if (op_info->input_sizes[i] == 0)
    318             num_components = MAX2(num_components,
    319                                   instr->src[i].src.ssa->num_components);
    320       }
    321    }
    322    assert(num_components != 0);
    323 
    324    /* Figure out the bitwidth based on the source bitwidth if the instruction
    325     * is variable-width.
    326     */
    327    unsigned bit_size = nir_alu_type_get_type_size(op_info->output_type);
    328    if (bit_size == 0) {
    329       for (unsigned i = 0; i < op_info->num_inputs; i++) {
    330          unsigned src_bit_size = instr->src[i].src.ssa->bit_size;
    331          if (nir_alu_type_get_type_size(op_info->input_types[i]) == 0) {
    332             if (bit_size)
    333                assert(src_bit_size == bit_size);
    334             else
    335                bit_size = src_bit_size;
    336          } else {
    337             assert(src_bit_size ==
    338                nir_alu_type_get_type_size(op_info->input_types[i]));
    339          }
    340       }
    341    }
    342 
    343    /* When in doubt, assume 32. */
    344    if (bit_size == 0)
    345       bit_size = 32;
    346 
    347    /* Make sure we don't swizzle from outside of our source vector (like if a
    348     * scalar value was passed into a multiply with a vector).
    349     */
    350    for (unsigned i = 0; i < op_info->num_inputs; i++) {
    351       for (unsigned j = instr->src[i].src.ssa->num_components; j < 4; j++) {
    352          instr->src[i].swizzle[j] = instr->src[i].src.ssa->num_components - 1;
    353       }
    354    }
    355 
    356    nir_ssa_dest_init(&instr->instr, &instr->dest.dest, num_components,
    357                      bit_size, NULL);
    358    instr->dest.write_mask = (1 << num_components) - 1;
    359 
    360    nir_builder_instr_insert(build, &instr->instr);
    361 
    362    return &instr->dest.dest.ssa;
    363 }
    364 
    365 #include "nir_builder_opcodes.h"
    366 
    367 static inline nir_ssa_def *
    368 nir_vec(nir_builder *build, nir_ssa_def **comp, unsigned num_components)
    369 {
    370    switch (num_components) {
    371    case 4:
    372       return nir_vec4(build, comp[0], comp[1], comp[2], comp[3]);
    373    case 3:
    374       return nir_vec3(build, comp[0], comp[1], comp[2]);
    375    case 2:
    376       return nir_vec2(build, comp[0], comp[1]);
    377    case 1:
    378       return comp[0];
    379    default:
    380       unreachable("bad component count");
    381       return NULL;
    382    }
    383 }
    384 
    385 /**
    386  * Similar to nir_fmov, but takes a nir_alu_src instead of a nir_ssa_def.
    387  */
    388 static inline nir_ssa_def *
    389 nir_fmov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
    390 {
    391    nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_fmov);
    392    nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
    393                      nir_src_bit_size(src.src), NULL);
    394    mov->exact = build->exact;
    395    mov->dest.write_mask = (1 << num_components) - 1;
    396    mov->src[0] = src;
    397    nir_builder_instr_insert(build, &mov->instr);
    398 
    399    return &mov->dest.dest.ssa;
    400 }
    401 
    402 static inline nir_ssa_def *
    403 nir_imov_alu(nir_builder *build, nir_alu_src src, unsigned num_components)
    404 {
    405    nir_alu_instr *mov = nir_alu_instr_create(build->shader, nir_op_imov);
    406    nir_ssa_dest_init(&mov->instr, &mov->dest.dest, num_components,
    407                      nir_src_bit_size(src.src), NULL);
    408    mov->exact = build->exact;
    409    mov->dest.write_mask = (1 << num_components) - 1;
    410    mov->src[0] = src;
    411    nir_builder_instr_insert(build, &mov->instr);
    412 
    413    return &mov->dest.dest.ssa;
    414 }
    415 
    416 /**
    417  * Construct an fmov or imov that reswizzles the source's components.
    418  */
    419 static inline nir_ssa_def *
    420 nir_swizzle(nir_builder *build, nir_ssa_def *src, const unsigned swiz[4],
    421             unsigned num_components, bool use_fmov)
    422 {
    423    nir_alu_src alu_src = { NIR_SRC_INIT };
    424    alu_src.src = nir_src_for_ssa(src);
    425    for (unsigned i = 0; i < num_components; i++)
    426       alu_src.swizzle[i] = swiz[i];
    427 
    428    return use_fmov ? nir_fmov_alu(build, alu_src, num_components) :
    429                      nir_imov_alu(build, alu_src, num_components);
    430 }
    431 
    432 /* Selects the right fdot given the number of components in each source. */
    433 static inline nir_ssa_def *
    434 nir_fdot(nir_builder *build, nir_ssa_def *src0, nir_ssa_def *src1)
    435 {
    436    assert(src0->num_components == src1->num_components);
    437    switch (src0->num_components) {
    438    case 1: return nir_fmul(build, src0, src1);
    439    case 2: return nir_fdot2(build, src0, src1);
    440    case 3: return nir_fdot3(build, src0, src1);
    441    case 4: return nir_fdot4(build, src0, src1);
    442    default:
    443       unreachable("bad component size");
    444    }
    445 
    446    return NULL;
    447 }
    448 
    449 static inline nir_ssa_def *
    450 nir_bany_inequal(nir_builder *b, nir_ssa_def *src0, nir_ssa_def *src1)
    451 {
    452    switch (src0->num_components) {
    453    case 1: return nir_ine(b, src0, src1);
    454    case 2: return nir_bany_inequal2(b, src0, src1);
    455    case 3: return nir_bany_inequal3(b, src0, src1);
    456    case 4: return nir_bany_inequal4(b, src0, src1);
    457    default:
    458       unreachable("bad component size");
    459    }
    460 }
    461 
    462 static inline nir_ssa_def *
    463 nir_bany(nir_builder *b, nir_ssa_def *src)
    464 {
    465    return nir_bany_inequal(b, src, nir_imm_int(b, 0));
    466 }
    467 
    468 static inline nir_ssa_def *
    469 nir_channel(nir_builder *b, nir_ssa_def *def, unsigned c)
    470 {
    471    unsigned swizzle[4] = {c, c, c, c};
    472    return nir_swizzle(b, def, swizzle, 1, false);
    473 }
    474 
    475 static inline nir_ssa_def *
    476 nir_channels(nir_builder *b, nir_ssa_def *def, unsigned mask)
    477 {
    478    unsigned num_channels = 0, swizzle[4] = { 0, 0, 0, 0 };
    479 
    480    for (unsigned i = 0; i < 4; i++) {
    481       if ((mask & (1 << i)) == 0)
    482          continue;
    483       swizzle[num_channels++] = i;
    484    }
    485 
    486    return nir_swizzle(b, def, swizzle, num_channels, false);
    487 }
    488 
    489 /**
    490  * Turns a nir_src into a nir_ssa_def * so it can be passed to
    491  * nir_build_alu()-based builder calls.
    492  *
    493  * See nir_ssa_for_alu_src() for alu instructions.
    494  */
    495 static inline nir_ssa_def *
    496 nir_ssa_for_src(nir_builder *build, nir_src src, int num_components)
    497 {
    498    if (src.is_ssa && src.ssa->num_components == num_components)
    499       return src.ssa;
    500 
    501    nir_alu_src alu = { NIR_SRC_INIT };
    502    alu.src = src;
    503    for (int j = 0; j < 4; j++)
    504       alu.swizzle[j] = j;
    505 
    506    return nir_imov_alu(build, alu, num_components);
    507 }
    508 
    509 /**
    510  * Similar to nir_ssa_for_src(), but for alu srcs, respecting the
    511  * nir_alu_src's swizzle.
    512  */
    513 static inline nir_ssa_def *
    514 nir_ssa_for_alu_src(nir_builder *build, nir_alu_instr *instr, unsigned srcn)
    515 {
    516    static uint8_t trivial_swizzle[4] = { 0, 1, 2, 3 };
    517    nir_alu_src *src = &instr->src[srcn];
    518    unsigned num_components = nir_ssa_alu_instr_src_components(instr, srcn);
    519 
    520    if (src->src.is_ssa && (src->src.ssa->num_components == num_components) &&
    521        !src->abs && !src->negate &&
    522        (memcmp(src->swizzle, trivial_swizzle, num_components) == 0))
    523       return src->src.ssa;
    524 
    525    return nir_imov_alu(build, *src, num_components);
    526 }
    527 
    528 static inline nir_ssa_def *
    529 nir_load_var(nir_builder *build, nir_variable *var)
    530 {
    531    const unsigned num_components = glsl_get_vector_elements(var->type);
    532 
    533    nir_intrinsic_instr *load =
    534       nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_var);
    535    load->num_components = num_components;
    536    load->variables[0] = nir_deref_var_create(load, var);
    537    nir_ssa_dest_init(&load->instr, &load->dest, num_components,
    538                      glsl_get_bit_size(var->type), NULL);
    539    nir_builder_instr_insert(build, &load->instr);
    540    return &load->dest.ssa;
    541 }
    542 
    543 static inline nir_ssa_def *
    544 nir_load_deref_var(nir_builder *build, nir_deref_var *deref)
    545 {
    546    const struct glsl_type *type = nir_deref_tail(&deref->deref)->type;
    547    const unsigned num_components = glsl_get_vector_elements(type);
    548 
    549    nir_intrinsic_instr *load =
    550       nir_intrinsic_instr_create(build->shader, nir_intrinsic_load_var);
    551    load->num_components = num_components;
    552    load->variables[0] = nir_deref_var_clone(deref, load);
    553    nir_ssa_dest_init(&load->instr, &load->dest, num_components,
    554                      glsl_get_bit_size(type), NULL);
    555    nir_builder_instr_insert(build, &load->instr);
    556    return &load->dest.ssa;
    557 }
    558 
    559 static inline void
    560 nir_store_var(nir_builder *build, nir_variable *var, nir_ssa_def *value,
    561               unsigned writemask)
    562 {
    563    const unsigned num_components = glsl_get_vector_elements(var->type);
    564 
    565    nir_intrinsic_instr *store =
    566       nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_var);
    567    store->num_components = num_components;
    568    nir_intrinsic_set_write_mask(store, writemask);
    569    store->variables[0] = nir_deref_var_create(store, var);
    570    store->src[0] = nir_src_for_ssa(value);
    571    nir_builder_instr_insert(build, &store->instr);
    572 }
    573 
    574 static inline void
    575 nir_store_deref_var(nir_builder *build, nir_deref_var *deref,
    576                     nir_ssa_def *value, unsigned writemask)
    577 {
    578    const unsigned num_components =
    579       glsl_get_vector_elements(nir_deref_tail(&deref->deref)->type);
    580 
    581    nir_intrinsic_instr *store =
    582       nir_intrinsic_instr_create(build->shader, nir_intrinsic_store_var);
    583    store->num_components = num_components;
    584    store->const_index[0] = writemask & ((1 << num_components) - 1);
    585    store->variables[0] = nir_deref_var_clone(deref, store);
    586    store->src[0] = nir_src_for_ssa(value);
    587    nir_builder_instr_insert(build, &store->instr);
    588 }
    589 
    590 static inline void
    591 nir_copy_deref_var(nir_builder *build, nir_deref_var *dest, nir_deref_var *src)
    592 {
    593    assert(nir_deref_tail(&dest->deref)->type ==
    594           nir_deref_tail(&src->deref)->type);
    595 
    596    nir_intrinsic_instr *copy =
    597       nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_var);
    598    copy->variables[0] = nir_deref_var_clone(dest, copy);
    599    copy->variables[1] = nir_deref_var_clone(src, copy);
    600    nir_builder_instr_insert(build, &copy->instr);
    601 }
    602 
    603 static inline void
    604 nir_copy_var(nir_builder *build, nir_variable *dest, nir_variable *src)
    605 {
    606    nir_intrinsic_instr *copy =
    607       nir_intrinsic_instr_create(build->shader, nir_intrinsic_copy_var);
    608    copy->variables[0] = nir_deref_var_create(copy, dest);
    609    copy->variables[1] = nir_deref_var_create(copy, src);
    610    nir_builder_instr_insert(build, &copy->instr);
    611 }
    612 
    613 /* Generic builder for system values. */
    614 static inline nir_ssa_def *
    615 nir_load_system_value(nir_builder *build, nir_intrinsic_op op, int index)
    616 {
    617    nir_intrinsic_instr *load = nir_intrinsic_instr_create(build->shader, op);
    618    load->num_components = nir_intrinsic_infos[op].dest_components;
    619    load->const_index[0] = index;
    620    nir_ssa_dest_init(&load->instr, &load->dest,
    621                      nir_intrinsic_infos[op].dest_components, 32, NULL);
    622    nir_builder_instr_insert(build, &load->instr);
    623    return &load->dest.ssa;
    624 }
    625 
    626 /* Generate custom builders for system values. */
    627 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \
    628                   num_variables, num_indices, idx0, idx1, idx2, flags)
    629 #define LAST_INTRINSIC(name)
    630 
    631 #define DEFINE_SYSTEM_VALUE(name)                                        \
    632    static inline nir_ssa_def *                                           \
    633    nir_load_##name(nir_builder *build)                                   \
    634    {                                                                     \
    635       return nir_load_system_value(build, nir_intrinsic_load_##name, 0); \
    636    }
    637 
    638 #include "nir_intrinsics.h"
    639 
    640 static inline nir_ssa_def *
    641 nir_load_barycentric(nir_builder *build, nir_intrinsic_op op,
    642                      unsigned interp_mode)
    643 {
    644    nir_intrinsic_instr *bary = nir_intrinsic_instr_create(build->shader, op);
    645    nir_ssa_dest_init(&bary->instr, &bary->dest, 2, 32, NULL);
    646    nir_intrinsic_set_interp_mode(bary, interp_mode);
    647    nir_builder_instr_insert(build, &bary->instr);
    648    return &bary->dest.ssa;
    649 }
    650 
    651 static inline void
    652 nir_jump(nir_builder *build, nir_jump_type jump_type)
    653 {
    654    nir_jump_instr *jump = nir_jump_instr_create(build->shader, jump_type);
    655    nir_builder_instr_insert(build, &jump->instr);
    656 }
    657 
    658 static inline nir_ssa_def *
    659 nir_compare_func(nir_builder *b, enum compare_func func,
    660                  nir_ssa_def *src0, nir_ssa_def *src1)
    661 {
    662    switch (func) {
    663    case COMPARE_FUNC_NEVER:
    664       return nir_imm_int(b, 0);
    665    case COMPARE_FUNC_ALWAYS:
    666       return nir_imm_int(b, ~0);
    667    case COMPARE_FUNC_EQUAL:
    668       return nir_feq(b, src0, src1);
    669    case COMPARE_FUNC_NOTEQUAL:
    670       return nir_fne(b, src0, src1);
    671    case COMPARE_FUNC_GREATER:
    672       return nir_flt(b, src1, src0);
    673    case COMPARE_FUNC_GEQUAL:
    674       return nir_fge(b, src0, src1);
    675    case COMPARE_FUNC_LESS:
    676       return nir_flt(b, src0, src1);
    677    case COMPARE_FUNC_LEQUAL:
    678       return nir_fge(b, src1, src0);
    679    }
    680    unreachable("bad compare func");
    681 }
    682 
    683 #endif /* NIR_BUILDER_H */
    684