Home | History | Annotate | Download | only in vc4
      1 /*
      2  * Copyright  2014 Broadcom
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
     20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
     21  * IN THE SOFTWARE.
     22  */
     23 
     24 #ifndef VC4_QIR_H
     25 #define VC4_QIR_H
     26 
     27 #include <assert.h>
     28 #include <stdio.h>
     29 #include <stdlib.h>
     30 #include <stdbool.h>
     31 #include <stdint.h>
     32 #include <string.h>
     33 
     34 #include "util/macros.h"
     35 #include "compiler/nir/nir.h"
     36 #include "util/list.h"
     37 #include "util/u_math.h"
     38 
     39 #include "vc4_screen.h"
     40 #include "vc4_qpu_defines.h"
     41 #include "vc4_qpu.h"
     42 #include "kernel/vc4_packet.h"
     43 #include "pipe/p_state.h"
     44 
     45 struct nir_builder;
     46 
     47 enum qfile {
     48         QFILE_NULL,
     49         QFILE_TEMP,
     50         QFILE_VARY,
     51         QFILE_UNIF,
     52         QFILE_VPM,
     53         QFILE_TLB_COLOR_WRITE,
     54         QFILE_TLB_COLOR_WRITE_MS,
     55         QFILE_TLB_Z_WRITE,
     56         QFILE_TLB_STENCIL_SETUP,
     57 
     58         /* If tex_s is written on its own without preceding t/r/b setup, it's
     59          * a direct memory access using the input value, without the sideband
     60          * uniform load.  We represent these in QIR as a separate write
     61          * destination so we can tell if the sideband uniform is present.
     62          */
     63         QFILE_TEX_S_DIRECT,
     64 
     65         QFILE_TEX_S,
     66         QFILE_TEX_T,
     67         QFILE_TEX_R,
     68         QFILE_TEX_B,
     69 
     70         /* Payload registers that aren't in the physical register file, so we
     71          * can just use the corresponding qpu_reg at qpu_emit time.
     72          */
     73         QFILE_FRAG_X,
     74         QFILE_FRAG_Y,
     75         QFILE_FRAG_REV_FLAG,
     76         QFILE_QPU_ELEMENT,
     77 
     78         /**
     79          * Stores an immediate value in the index field that will be used
     80          * directly by qpu_load_imm().
     81          */
     82         QFILE_LOAD_IMM,
     83 
     84         /**
     85          * Stores an immediate value in the index field that can be turned
     86          * into a small immediate field by qpu_encode_small_immediate().
     87          */
     88         QFILE_SMALL_IMM,
     89 };
     90 
     91 struct qreg {
     92         enum qfile file;
     93         uint32_t index;
     94         int pack;
     95 };
     96 
     97 static inline struct qreg qir_reg(enum qfile file, uint32_t index)
     98 {
     99         return (struct qreg){file, index};
    100 }
    101 
    102 enum qop {
    103         QOP_UNDEF,
    104         QOP_MOV,
    105         QOP_FMOV,
    106         QOP_MMOV,
    107         QOP_FADD,
    108         QOP_FSUB,
    109         QOP_FMUL,
    110         QOP_V8MULD,
    111         QOP_V8MIN,
    112         QOP_V8MAX,
    113         QOP_V8ADDS,
    114         QOP_V8SUBS,
    115         QOP_MUL24,
    116         QOP_FMIN,
    117         QOP_FMAX,
    118         QOP_FMINABS,
    119         QOP_FMAXABS,
    120         QOP_ADD,
    121         QOP_SUB,
    122         QOP_SHL,
    123         QOP_SHR,
    124         QOP_ASR,
    125         QOP_MIN,
    126         QOP_MIN_NOIMM,
    127         QOP_MAX,
    128         QOP_AND,
    129         QOP_OR,
    130         QOP_XOR,
    131         QOP_NOT,
    132 
    133         QOP_FTOI,
    134         QOP_ITOF,
    135         QOP_RCP,
    136         QOP_RSQ,
    137         QOP_EXP2,
    138         QOP_LOG2,
    139         QOP_VW_SETUP,
    140         QOP_VR_SETUP,
    141         QOP_TLB_COLOR_READ,
    142         QOP_MS_MASK,
    143         QOP_VARY_ADD_C,
    144 
    145         QOP_FRAG_Z,
    146         QOP_FRAG_W,
    147 
    148         /**
    149          * Signal of texture read being necessary and then reading r4 into
    150          * the destination
    151          */
    152         QOP_TEX_RESULT,
    153 
    154         /**
    155          * Insert the signal for switching threads in a threaded fragment
    156          * shader.  No value can be live in an accumulator across a thrsw.
    157          *
    158          * At the QPU level, this will have several delay slots before the
    159          * switch happens.  Those slots are the responsibility of the
    160          * scheduler.
    161          */
    162         QOP_THRSW,
    163 
    164         /* 32-bit immediate loaded to each SIMD channel */
    165         QOP_LOAD_IMM,
    166 
    167         /* 32-bit immediate divided into 16 2-bit unsigned int values and
    168          * loaded to each corresponding SIMD channel.
    169          */
    170         QOP_LOAD_IMM_U2,
    171         /* 32-bit immediate divided into 16 2-bit signed int values and
    172          * loaded to each corresponding SIMD channel.
    173          */
    174         QOP_LOAD_IMM_I2,
    175 
    176         QOP_ROT_MUL,
    177 
    178         /* Jumps to block->successor[0] if the qinst->cond (as a
    179          * QPU_COND_BRANCH_*) passes, or block->successor[1] if not.  Note
    180          * that block->successor[1] may be unset if the condition is ALWAYS.
    181          */
    182         QOP_BRANCH,
    183 
    184         /* Emits an ADD from src[0] to src[1], where src[0] must be a
    185          * QOP_LOAD_IMM result and src[1] is a QUNIFORM_UNIFORMS_ADDRESS,
    186          * required by the kernel as part of its branch validation.
    187          */
    188         QOP_UNIFORMS_RESET,
    189 };
    190 
    191 struct queued_qpu_inst {
    192         struct list_head link;
    193         uint64_t inst;
    194 };
    195 
    196 struct qinst {
    197         struct list_head link;
    198 
    199         enum qop op;
    200         struct qreg dst;
    201         struct qreg src[3];
    202         bool sf;
    203         bool cond_is_exec_mask;
    204         uint8_t cond;
    205 };
    206 
    207 enum qstage {
    208         /**
    209          * Coordinate shader, runs during binning, before the VS, and just
    210          * outputs position.
    211          */
    212         QSTAGE_COORD,
    213         QSTAGE_VERT,
    214         QSTAGE_FRAG,
    215 };
    216 
    217 enum quniform_contents {
    218         /**
    219          * Indicates that a constant 32-bit value is copied from the program's
    220          * uniform contents.
    221          */
    222         QUNIFORM_CONSTANT,
    223         /**
    224          * Indicates that the program's uniform contents are used as an index
    225          * into the GL uniform storage.
    226          */
    227         QUNIFORM_UNIFORM,
    228 
    229         /** @{
    230          * Scaling factors from clip coordinates to relative to the viewport
    231          * center.
    232          *
    233          * This is used by the coordinate and vertex shaders to produce the
    234          * 32-bit entry consisting of 2 16-bit fields with 12.4 signed fixed
    235          * point offsets from the viewport ccenter.
    236          */
    237         QUNIFORM_VIEWPORT_X_SCALE,
    238         QUNIFORM_VIEWPORT_Y_SCALE,
    239         /** @} */
    240 
    241         QUNIFORM_VIEWPORT_Z_OFFSET,
    242         QUNIFORM_VIEWPORT_Z_SCALE,
    243 
    244         QUNIFORM_USER_CLIP_PLANE,
    245 
    246         /**
    247          * A reference to a texture config parameter 0 uniform.
    248          *
    249          * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
    250          * defines texture type, miplevels, and such.  It will be found as a
    251          * parameter to the first QOP_TEX_[STRB] instruction in a sequence.
    252          */
    253         QUNIFORM_TEXTURE_CONFIG_P0,
    254 
    255         /**
    256          * A reference to a texture config parameter 1 uniform.
    257          *
    258          * This is a uniform implicitly loaded with a QPU_W_TMU* write, which
    259          * defines texture width, height, filters, and wrap modes.  It will be
    260          * found as a parameter to the second QOP_TEX_[STRB] instruction in a
    261          * sequence.
    262          */
    263         QUNIFORM_TEXTURE_CONFIG_P1,
    264 
    265         /** A reference to a texture config parameter 2 cubemap stride uniform */
    266         QUNIFORM_TEXTURE_CONFIG_P2,
    267 
    268         QUNIFORM_TEXTURE_FIRST_LEVEL,
    269 
    270         QUNIFORM_TEXTURE_MSAA_ADDR,
    271 
    272         QUNIFORM_UBO_ADDR,
    273 
    274         QUNIFORM_TEXRECT_SCALE_X,
    275         QUNIFORM_TEXRECT_SCALE_Y,
    276 
    277         QUNIFORM_TEXTURE_BORDER_COLOR,
    278 
    279         QUNIFORM_BLEND_CONST_COLOR_X,
    280         QUNIFORM_BLEND_CONST_COLOR_Y,
    281         QUNIFORM_BLEND_CONST_COLOR_Z,
    282         QUNIFORM_BLEND_CONST_COLOR_W,
    283         QUNIFORM_BLEND_CONST_COLOR_RGBA,
    284         QUNIFORM_BLEND_CONST_COLOR_AAAA,
    285 
    286         QUNIFORM_STENCIL,
    287 
    288         QUNIFORM_ALPHA_REF,
    289         QUNIFORM_SAMPLE_MASK,
    290 
    291         /* Placeholder uniform that will be updated by the kernel when used by
    292          * an instruction writing to QPU_W_UNIFORMS_ADDRESS.
    293          */
    294         QUNIFORM_UNIFORMS_ADDRESS,
    295 };
    296 
    297 struct vc4_varying_slot {
    298         uint8_t slot;
    299         uint8_t swizzle;
    300 };
    301 
    302 struct vc4_compiler_ubo_range {
    303         /**
    304          * offset in bytes from the start of the ubo where this range is
    305          * uploaded.
    306          *
    307          * Only set once used is set.
    308          */
    309         uint32_t dst_offset;
    310 
    311         /**
    312          * offset in bytes from the start of the gallium uniforms where the
    313          * data comes from.
    314          */
    315         uint32_t src_offset;
    316 
    317         /** size in bytes of this ubo range */
    318         uint32_t size;
    319 
    320         /**
    321          * Set if this range is used by the shader for indirect uniforms
    322          * access.
    323          */
    324         bool used;
    325 };
    326 
    327 struct vc4_key {
    328         struct vc4_uncompiled_shader *shader_state;
    329         struct {
    330                 enum pipe_format format;
    331                 uint8_t swizzle[4];
    332                 union {
    333                         struct {
    334                                 unsigned compare_mode:1;
    335                                 unsigned compare_func:3;
    336                                 unsigned wrap_s:3;
    337                                 unsigned wrap_t:3;
    338                                 bool force_first_level:1;
    339                         };
    340                         struct {
    341                                 uint16_t msaa_width, msaa_height;
    342                         };
    343                 };
    344         } tex[VC4_MAX_TEXTURE_SAMPLERS];
    345         uint8_t ucp_enables;
    346 };
    347 
    348 struct vc4_fs_key {
    349         struct vc4_key base;
    350         enum pipe_format color_format;
    351         bool depth_enabled;
    352         bool stencil_enabled;
    353         bool stencil_twoside;
    354         bool stencil_full_writemasks;
    355         bool is_points;
    356         bool is_lines;
    357         bool point_coord_upper_left;
    358         bool light_twoside;
    359         bool msaa;
    360         bool sample_coverage;
    361         bool sample_alpha_to_coverage;
    362         bool sample_alpha_to_one;
    363         uint8_t alpha_test_func;
    364         uint8_t logicop_func;
    365         uint32_t point_sprite_mask;
    366 
    367         struct pipe_rt_blend_state blend;
    368 };
    369 
    370 struct vc4_vs_key {
    371         struct vc4_key base;
    372 
    373         const struct vc4_fs_inputs *fs_inputs;
    374         enum pipe_format attr_formats[8];
    375         bool is_coord;
    376         bool per_vertex_point_size;
    377         bool clamp_color;
    378 };
    379 
    380 /** A basic block of QIR intructions. */
    381 struct qblock {
    382         struct list_head link;
    383 
    384         struct list_head instructions;
    385         struct list_head qpu_inst_list;
    386 
    387         struct set *predecessors;
    388         struct qblock *successors[2];
    389 
    390         int index;
    391 
    392         /* Instruction IPs for the first and last instruction of the block.
    393          * Set by vc4_qpu_schedule.c.
    394          */
    395         uint32_t start_qpu_ip;
    396         uint32_t end_qpu_ip;
    397 
    398         /* Instruction IP for the branch instruction of the block.  Set by
    399          * vc4_qpu_schedule.c.
    400          */
    401         uint32_t branch_qpu_ip;
    402 
    403         /** @{ used by vc4_qir_live_variables.c */
    404         BITSET_WORD *def;
    405         BITSET_WORD *use;
    406         BITSET_WORD *live_in;
    407         BITSET_WORD *live_out;
    408         int start_ip, end_ip;
    409         /** @} */
    410 };
    411 
    412 struct vc4_compile {
    413         struct vc4_context *vc4;
    414         nir_shader *s;
    415         nir_function_impl *impl;
    416         struct exec_list *cf_node_list;
    417 
    418         /**
    419          * Mapping from nir_register * or nir_ssa_def * to array of struct
    420          * qreg for the values.
    421          */
    422         struct hash_table *def_ht;
    423 
    424         /* For each temp, the instruction generating its value. */
    425         struct qinst **defs;
    426         uint32_t defs_array_size;
    427 
    428         /**
    429          * Inputs to the shader, arranged by TGSI declaration order.
    430          *
    431          * Not all fragment shader QFILE_VARY reads are present in this array.
    432          */
    433         struct qreg *inputs;
    434         struct qreg *outputs;
    435         bool msaa_per_sample_output;
    436         struct qreg color_reads[VC4_MAX_SAMPLES];
    437         struct qreg sample_colors[VC4_MAX_SAMPLES];
    438         uint32_t inputs_array_size;
    439         uint32_t outputs_array_size;
    440         uint32_t uniforms_array_size;
    441 
    442         struct vc4_compiler_ubo_range *ubo_ranges;
    443         uint32_t ubo_ranges_array_size;
    444         /** Number of uniform areas declared in ubo_ranges. */
    445         uint32_t num_uniform_ranges;
    446         /** Number of uniform areas used for indirect addressed loads. */
    447         uint32_t num_ubo_ranges;
    448         uint32_t next_ubo_dst_offset;
    449 
    450         /* State for whether we're executing on each channel currently.  0 if
    451          * yes, otherwise a block number + 1 that the channel jumped to.
    452          */
    453         struct qreg execute;
    454 
    455         struct qreg line_x, point_x, point_y;
    456         /** boolean (~0 -> true) if the fragment has been discarded. */
    457         struct qreg discard;
    458         struct qreg payload_FRAG_Z;
    459         struct qreg payload_FRAG_W;
    460 
    461         uint8_t vattr_sizes[8];
    462 
    463         /**
    464          * Array of the VARYING_SLOT_* of all FS QFILE_VARY reads.
    465          *
    466          * This includes those that aren't part of the VPM varyings, like
    467          * point/line coordinates.
    468          */
    469         struct vc4_varying_slot *input_slots;
    470         uint32_t num_input_slots;
    471         uint32_t input_slots_array_size;
    472 
    473         /**
    474          * An entry per outputs[] in the VS indicating what the VARYING_SLOT_*
    475          * of the output is.  Used to emit from the VS in the order that the
    476          * FS needs.
    477          */
    478         struct vc4_varying_slot *output_slots;
    479 
    480         struct pipe_shader_state *shader_state;
    481         struct vc4_key *key;
    482         struct vc4_fs_key *fs_key;
    483         struct vc4_vs_key *vs_key;
    484 
    485         /* Live ranges of temps. */
    486         int *temp_start, *temp_end;
    487 
    488         uint32_t *uniform_data;
    489         enum quniform_contents *uniform_contents;
    490         uint32_t uniform_array_size;
    491         uint32_t num_uniforms;
    492         uint32_t num_outputs;
    493         uint32_t num_texture_samples;
    494         uint32_t output_position_index;
    495         uint32_t output_color_index;
    496         uint32_t output_point_size_index;
    497         uint32_t output_sample_mask_index;
    498 
    499         struct qreg undef;
    500         enum qstage stage;
    501         uint32_t num_temps;
    502 
    503         struct list_head blocks;
    504         int next_block_index;
    505         struct qblock *cur_block;
    506         struct qblock *loop_cont_block;
    507         struct qblock *loop_break_block;
    508         struct qblock *last_top_block;
    509 
    510         struct list_head qpu_inst_list;
    511 
    512         /* Pre-QPU-scheduled instruction containing the last THRSW */
    513         uint64_t *last_thrsw;
    514 
    515         uint64_t *qpu_insts;
    516         uint32_t qpu_inst_count;
    517         uint32_t qpu_inst_size;
    518         uint32_t num_inputs;
    519 
    520         /**
    521          * Number of inputs from num_inputs remaining to be queued to the read
    522          * FIFO in the VS/CS.
    523          */
    524         uint32_t num_inputs_remaining;
    525 
    526         /* Number of inputs currently in the read FIFO for the VS/CS */
    527         uint32_t num_inputs_in_fifo;
    528 
    529         /** Next offset in the VPM to read from in the VS/CS */
    530         uint32_t vpm_read_offset;
    531 
    532         uint32_t program_id;
    533         uint32_t variant_id;
    534 
    535         /* Set to compile program in threaded FS mode, where SIG_THREAD_SWITCH
    536          * is used to hide texturing latency at the cost of limiting ourselves
    537          * to the bottom half of physical reg space.
    538          */
    539         bool fs_threaded;
    540 
    541         bool last_thrsw_at_top_level;
    542 
    543         bool failed;
    544 };
    545 
    546 /* Special nir_load_input intrinsic index for loading the current TLB
    547  * destination color.
    548  */
    549 #define VC4_NIR_TLB_COLOR_READ_INPUT		2000000000
    550 
    551 #define VC4_NIR_MS_MASK_OUTPUT			2000000000
    552 
    553 struct vc4_compile *qir_compile_init(void);
    554 void qir_compile_destroy(struct vc4_compile *c);
    555 struct qblock *qir_new_block(struct vc4_compile *c);
    556 void qir_set_emit_block(struct vc4_compile *c, struct qblock *block);
    557 void qir_link_blocks(struct qblock *predecessor, struct qblock *successor);
    558 struct qblock *qir_entry_block(struct vc4_compile *c);
    559 struct qblock *qir_exit_block(struct vc4_compile *c);
    560 struct qinst *qir_inst(enum qop op, struct qreg dst,
    561                        struct qreg src0, struct qreg src1);
    562 void qir_remove_instruction(struct vc4_compile *c, struct qinst *qinst);
    563 struct qreg qir_uniform(struct vc4_compile *c,
    564                         enum quniform_contents contents,
    565                         uint32_t data);
    566 void qir_schedule_instructions(struct vc4_compile *c);
    567 void qir_reorder_uniforms(struct vc4_compile *c);
    568 void qir_emit_uniform_stream_resets(struct vc4_compile *c);
    569 
    570 struct qreg qir_emit_def(struct vc4_compile *c, struct qinst *inst);
    571 struct qinst *qir_emit_nondef(struct vc4_compile *c, struct qinst *inst);
    572 
    573 struct qreg qir_get_temp(struct vc4_compile *c);
    574 void qir_calculate_live_intervals(struct vc4_compile *c);
    575 int qir_get_nsrc(struct qinst *inst);
    576 int qir_get_non_sideband_nsrc(struct qinst *inst);
    577 int qir_get_tex_uniform_src(struct qinst *inst);
    578 bool qir_reg_equals(struct qreg a, struct qreg b);
    579 bool qir_has_side_effects(struct vc4_compile *c, struct qinst *inst);
    580 bool qir_has_side_effect_reads(struct vc4_compile *c, struct qinst *inst);
    581 bool qir_has_uniform_read(struct qinst *inst);
    582 bool qir_is_mul(struct qinst *inst);
    583 bool qir_is_raw_mov(struct qinst *inst);
    584 bool qir_is_tex(struct qinst *inst);
    585 bool qir_has_implicit_tex_uniform(struct qinst *inst);
    586 bool qir_is_float_input(struct qinst *inst);
    587 bool qir_depends_on_flags(struct qinst *inst);
    588 bool qir_writes_r4(struct qinst *inst);
    589 struct qreg qir_follow_movs(struct vc4_compile *c, struct qreg reg);
    590 uint8_t qir_channels_written(struct qinst *inst);
    591 
    592 void qir_dump(struct vc4_compile *c);
    593 void qir_dump_inst(struct vc4_compile *c, struct qinst *inst);
    594 const char *qir_get_stage_name(enum qstage stage);
    595 
    596 void qir_validate(struct vc4_compile *c);
    597 
    598 void qir_optimize(struct vc4_compile *c);
    599 bool qir_opt_algebraic(struct vc4_compile *c);
    600 bool qir_opt_coalesce_ff_writes(struct vc4_compile *c);
    601 bool qir_opt_constant_folding(struct vc4_compile *c);
    602 bool qir_opt_copy_propagation(struct vc4_compile *c);
    603 bool qir_opt_dead_code(struct vc4_compile *c);
    604 bool qir_opt_peephole_sf(struct vc4_compile *c);
    605 bool qir_opt_small_immediates(struct vc4_compile *c);
    606 bool qir_opt_vpm(struct vc4_compile *c);
    607 void vc4_nir_lower_blend(nir_shader *s, struct vc4_compile *c);
    608 void vc4_nir_lower_io(nir_shader *s, struct vc4_compile *c);
    609 nir_ssa_def *vc4_nir_get_swizzled_channel(struct nir_builder *b,
    610                                           nir_ssa_def **srcs, int swiz);
    611 void vc4_nir_lower_txf_ms(nir_shader *s, struct vc4_compile *c);
    612 void qir_lower_uniforms(struct vc4_compile *c);
    613 
    614 uint32_t qpu_schedule_instructions(struct vc4_compile *c);
    615 
    616 void qir_SF(struct vc4_compile *c, struct qreg src);
    617 
    618 static inline struct qreg
    619 qir_uniform_ui(struct vc4_compile *c, uint32_t ui)
    620 {
    621         return qir_uniform(c, QUNIFORM_CONSTANT, ui);
    622 }
    623 
    624 static inline struct qreg
    625 qir_uniform_f(struct vc4_compile *c, float f)
    626 {
    627         return qir_uniform(c, QUNIFORM_CONSTANT, fui(f));
    628 }
    629 
    630 #define QIR_ALU0(name)                                                   \
    631 static inline struct qreg                                                \
    632 qir_##name(struct vc4_compile *c)                                        \
    633 {                                                                        \
    634         return qir_emit_def(c, qir_inst(QOP_##name, c->undef,            \
    635                                         c->undef, c->undef));            \
    636 }                                                                        \
    637 static inline struct qinst *                                             \
    638 qir_##name##_dest(struct vc4_compile *c, struct qreg dest)               \
    639 {                                                                        \
    640         return qir_emit_nondef(c, qir_inst(QOP_##name, dest,             \
    641                                            c->undef, c->undef));         \
    642 }
    643 
    644 #define QIR_ALU1(name)                                                   \
    645 static inline struct qreg                                                \
    646 qir_##name(struct vc4_compile *c, struct qreg a)                         \
    647 {                                                                        \
    648         return qir_emit_def(c, qir_inst(QOP_##name, c->undef,            \
    649                                         a, c->undef));                   \
    650 }                                                                        \
    651 static inline struct qinst *                                             \
    652 qir_##name##_dest(struct vc4_compile *c, struct qreg dest,               \
    653                   struct qreg a)                                         \
    654 {                                                                        \
    655         return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a,          \
    656                                            c->undef));                   \
    657 }
    658 
    659 #define QIR_ALU2(name)                                                   \
    660 static inline struct qreg                                                \
    661 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b)          \
    662 {                                                                        \
    663         return qir_emit_def(c, qir_inst(QOP_##name, c->undef, a, b));    \
    664 }                                                                        \
    665 static inline struct qinst *                                             \
    666 qir_##name##_dest(struct vc4_compile *c, struct qreg dest,               \
    667                   struct qreg a, struct qreg b)                          \
    668 {                                                                        \
    669         return qir_emit_nondef(c, qir_inst(QOP_##name, dest, a, b));     \
    670 }
    671 
    672 #define QIR_NODST_1(name)                                               \
    673 static inline struct qinst *                                            \
    674 qir_##name(struct vc4_compile *c, struct qreg a)                        \
    675 {                                                                       \
    676         return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef,        \
    677                                            a, c->undef));               \
    678 }
    679 
    680 #define QIR_NODST_2(name)                                               \
    681 static inline struct qinst *                                            \
    682 qir_##name(struct vc4_compile *c, struct qreg a, struct qreg b)         \
    683 {                                                                       \
    684         return qir_emit_nondef(c, qir_inst(QOP_##name, c->undef,        \
    685                                            a, b));                      \
    686 }
    687 
    688 #define QIR_PAYLOAD(name)                                                \
    689 static inline struct qreg                                                \
    690 qir_##name(struct vc4_compile *c)                                        \
    691 {                                                                        \
    692         struct qreg *payload = &c->payload_##name;                       \
    693         if (payload->file != QFILE_NULL)                                 \
    694                 return *payload;                                         \
    695         *payload = qir_get_temp(c);                                      \
    696         struct qinst *inst = qir_inst(QOP_##name, *payload,              \
    697                                       c->undef, c->undef);               \
    698         struct qblock *entry = qir_entry_block(c);                       \
    699         list_add(&inst->link, &entry->instructions);                     \
    700         c->defs[payload->index] = inst;                                  \
    701         return *payload;                                                 \
    702 }
    703 
    704 QIR_ALU1(MOV)
    705 QIR_ALU1(FMOV)
    706 QIR_ALU1(MMOV)
    707 QIR_ALU2(FADD)
    708 QIR_ALU2(FSUB)
    709 QIR_ALU2(FMUL)
    710 QIR_ALU2(V8MULD)
    711 QIR_ALU2(V8MIN)
    712 QIR_ALU2(V8MAX)
    713 QIR_ALU2(V8ADDS)
    714 QIR_ALU2(V8SUBS)
    715 QIR_ALU2(MUL24)
    716 QIR_ALU2(FMIN)
    717 QIR_ALU2(FMAX)
    718 QIR_ALU2(FMINABS)
    719 QIR_ALU2(FMAXABS)
    720 QIR_ALU1(FTOI)
    721 QIR_ALU1(ITOF)
    722 
    723 QIR_ALU2(ADD)
    724 QIR_ALU2(SUB)
    725 QIR_ALU2(SHL)
    726 QIR_ALU2(SHR)
    727 QIR_ALU2(ASR)
    728 QIR_ALU2(MIN)
    729 QIR_ALU2(MIN_NOIMM)
    730 QIR_ALU2(MAX)
    731 QIR_ALU2(AND)
    732 QIR_ALU2(OR)
    733 QIR_ALU2(XOR)
    734 QIR_ALU1(NOT)
    735 
    736 QIR_ALU1(RCP)
    737 QIR_ALU1(RSQ)
    738 QIR_ALU1(EXP2)
    739 QIR_ALU1(LOG2)
    740 QIR_ALU1(VARY_ADD_C)
    741 QIR_PAYLOAD(FRAG_Z)
    742 QIR_PAYLOAD(FRAG_W)
    743 QIR_ALU0(TEX_RESULT)
    744 QIR_ALU0(TLB_COLOR_READ)
    745 QIR_NODST_1(MS_MASK)
    746 
    747 static inline struct qreg
    748 qir_SEL(struct vc4_compile *c, uint8_t cond, struct qreg src0, struct qreg src1)
    749 {
    750         struct qreg t = qir_get_temp(c);
    751         qir_MOV_dest(c, t, src1);
    752         qir_MOV_dest(c, t, src0)->cond = cond;
    753         return t;
    754 }
    755 
    756 static inline struct qreg
    757 qir_UNPACK_8_F(struct vc4_compile *c, struct qreg src, int i)
    758 {
    759         struct qreg t = qir_FMOV(c, src);
    760         c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
    761         return t;
    762 }
    763 
    764 static inline struct qreg
    765 qir_UNPACK_8_I(struct vc4_compile *c, struct qreg src, int i)
    766 {
    767         struct qreg t = qir_MOV(c, src);
    768         c->defs[t.index]->src[0].pack = QPU_UNPACK_8A + i;
    769         return t;
    770 }
    771 
    772 static inline struct qreg
    773 qir_UNPACK_16_F(struct vc4_compile *c, struct qreg src, int i)
    774 {
    775         struct qreg t = qir_FMOV(c, src);
    776         c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
    777         return t;
    778 }
    779 
    780 static inline struct qreg
    781 qir_UNPACK_16_I(struct vc4_compile *c, struct qreg src, int i)
    782 {
    783         struct qreg t = qir_MOV(c, src);
    784         c->defs[t.index]->src[0].pack = QPU_UNPACK_16A + i;
    785         return t;
    786 }
    787 
    788 static inline void
    789 qir_PACK_8_F(struct vc4_compile *c, struct qreg dest, struct qreg val, int chan)
    790 {
    791         assert(!dest.pack);
    792         dest.pack = QPU_PACK_MUL_8A + chan;
    793         qir_emit_nondef(c, qir_inst(QOP_MMOV, dest, val, c->undef));
    794 }
    795 
    796 static inline struct qreg
    797 qir_PACK_8888_F(struct vc4_compile *c, struct qreg val)
    798 {
    799         struct qreg dest = qir_MMOV(c, val);
    800         c->defs[dest.index]->dst.pack = QPU_PACK_MUL_8888;
    801         return dest;
    802 }
    803 
    804 static inline struct qreg
    805 qir_POW(struct vc4_compile *c, struct qreg x, struct qreg y)
    806 {
    807         return qir_EXP2(c, qir_FMUL(c,
    808                                     y,
    809                                     qir_LOG2(c, x)));
    810 }
    811 
    812 static inline void
    813 qir_VPM_WRITE(struct vc4_compile *c, struct qreg val)
    814 {
    815         qir_MOV_dest(c, qir_reg(QFILE_VPM, 0), val);
    816 }
    817 
    818 static inline struct qreg
    819 qir_LOAD_IMM(struct vc4_compile *c, uint32_t val)
    820 {
    821         return qir_emit_def(c, qir_inst(QOP_LOAD_IMM, c->undef,
    822                                         qir_reg(QFILE_LOAD_IMM, val), c->undef));
    823 }
    824 
    825 static inline struct qreg
    826 qir_LOAD_IMM_U2(struct vc4_compile *c, uint32_t val)
    827 {
    828         return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_U2, c->undef,
    829                                         qir_reg(QFILE_LOAD_IMM, val),
    830                                         c->undef));
    831 }
    832 
    833 static inline struct qreg
    834 qir_LOAD_IMM_I2(struct vc4_compile *c, uint32_t val)
    835 {
    836         return qir_emit_def(c, qir_inst(QOP_LOAD_IMM_I2, c->undef,
    837                                         qir_reg(QFILE_LOAD_IMM, val),
    838                                         c->undef));
    839 }
    840 
    841 /** Shifts the multiply output to the right by rot channels */
    842 static inline struct qreg
    843 qir_ROT_MUL(struct vc4_compile *c, struct qreg val, uint32_t rot)
    844 {
    845         return qir_emit_def(c, qir_inst(QOP_ROT_MUL, c->undef,
    846                                         val,
    847                                         qir_reg(QFILE_LOAD_IMM,
    848                                                 QPU_SMALL_IMM_MUL_ROT + rot)));
    849 }
    850 
    851 static inline struct qinst *
    852 qir_MOV_cond(struct vc4_compile *c, uint8_t cond,
    853              struct qreg dest, struct qreg src)
    854 {
    855         struct qinst *mov = qir_MOV_dest(c, dest, src);
    856         mov->cond = cond;
    857         return mov;
    858 }
    859 
    860 static inline struct qinst *
    861 qir_BRANCH(struct vc4_compile *c, uint8_t cond)
    862 {
    863         struct qinst *inst = qir_inst(QOP_BRANCH, c->undef, c->undef, c->undef);
    864         inst->cond = cond;
    865         qir_emit_nondef(c, inst);
    866         return inst;
    867 }
    868 
    869 #define qir_for_each_block(block, c)                                    \
    870         list_for_each_entry(struct qblock, block, &c->blocks, link)
    871 
    872 #define qir_for_each_block_rev(block, c)                                \
    873         list_for_each_entry_rev(struct qblock, block, &c->blocks, link)
    874 
    875 /* Loop over the non-NULL members of the successors array. */
    876 #define qir_for_each_successor(succ, block)                             \
    877         for (struct qblock *succ = block->successors[0];                \
    878              succ != NULL;                                              \
    879              succ = (succ == block->successors[1] ? NULL :              \
    880                      block->successors[1]))
    881 
    882 #define qir_for_each_inst(inst, block)                                  \
    883         list_for_each_entry(struct qinst, inst, &block->instructions, link)
    884 
    885 #define qir_for_each_inst_rev(inst, block)                                  \
    886         list_for_each_entry_rev(struct qinst, inst, &block->instructions, link)
    887 
    888 #define qir_for_each_inst_safe(inst, block)                             \
    889         list_for_each_entry_safe(struct qinst, inst, &block->instructions, link)
    890 
    891 #define qir_for_each_inst_inorder(inst, c)                              \
    892         qir_for_each_block(_block, c)                                   \
    893                 qir_for_each_inst_safe(inst, _block)
    894 
    895 #endif /* VC4_QIR_H */
    896