1 /* 2 * Copyright 2014 Connor Abbott 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Connor Abbott (cwabbott0 (at) gmail.com) 25 * 26 */ 27 28 #ifndef NIR_H 29 #define NIR_H 30 31 #include "util/hash_table.h" 32 #include "compiler/glsl/list.h" 33 #include "GL/gl.h" /* GLenum */ 34 #include "util/list.h" 35 #include "util/ralloc.h" 36 #include "util/set.h" 37 #include "util/bitset.h" 38 #include "util/macros.h" 39 #include "compiler/nir_types.h" 40 #include "compiler/shader_enums.h" 41 #include "compiler/shader_info.h" 42 #include <stdio.h> 43 44 #ifndef NDEBUG 45 #include "util/debug.h" 46 #endif /* NDEBUG */ 47 48 #include "nir_opcodes.h" 49 50 #if defined(_WIN32) && !defined(snprintf) 51 #define snprintf _snprintf 52 #endif 53 54 #ifdef __cplusplus 55 extern "C" { 56 #endif 57 58 struct gl_program; 59 struct gl_shader_program; 60 61 #define NIR_FALSE 0u 62 #define NIR_TRUE (~0u) 63 64 /** Defines a cast function 65 * 66 * This macro defines a cast function from in_type to out_type where 67 * out_type is some structure type that contains a field of type out_type. 68 * 69 * Note that you have to be a bit careful as the generated cast function 70 * destroys constness. 71 */ 72 #define NIR_DEFINE_CAST(name, in_type, out_type, field, \ 73 type_field, type_value) \ 74 static inline out_type * \ 75 name(const in_type *parent) \ 76 { \ 77 assert(parent && parent->type_field == type_value); \ 78 return exec_node_data(out_type, parent, field); \ 79 } 80 81 struct nir_function; 82 struct nir_shader; 83 struct nir_instr; 84 85 86 /** 87 * Description of built-in state associated with a uniform 88 * 89 * \sa nir_variable::state_slots 90 */ 91 typedef struct { 92 int tokens[5]; 93 int swizzle; 94 } nir_state_slot; 95 96 typedef enum { 97 nir_var_shader_in = (1 << 0), 98 nir_var_shader_out = (1 << 1), 99 nir_var_global = (1 << 2), 100 nir_var_local = (1 << 3), 101 nir_var_uniform = (1 << 4), 102 nir_var_shader_storage = (1 << 5), 103 nir_var_system_value = (1 << 6), 104 nir_var_param = (1 << 7), 105 nir_var_shared = (1 << 8), 106 nir_var_all = ~0, 107 } nir_variable_mode; 108 109 /** 110 * Rounding modes. 111 */ 112 typedef enum { 113 nir_rounding_mode_undef = 0, 114 nir_rounding_mode_rtne = 1, /* round to nearest even */ 115 nir_rounding_mode_ru = 2, /* round up */ 116 nir_rounding_mode_rd = 3, /* round down */ 117 nir_rounding_mode_rtz = 4, /* round towards zero */ 118 } nir_rounding_mode; 119 120 typedef union { 121 float f32[4]; 122 double f64[4]; 123 int8_t i8[4]; 124 uint8_t u8[4]; 125 int16_t i16[4]; 126 uint16_t u16[4]; 127 int32_t i32[4]; 128 uint32_t u32[4]; 129 int64_t i64[4]; 130 uint64_t u64[4]; 131 } nir_const_value; 132 133 typedef struct nir_constant { 134 /** 135 * Value of the constant. 136 * 137 * The field used to back the values supplied by the constant is determined 138 * by the type associated with the \c nir_variable. Constants may be 139 * scalars, vectors, or matrices. 140 */ 141 nir_const_value values[4]; 142 143 /* we could get this from the var->type but makes clone *much* easier to 144 * not have to care about the type. 145 */ 146 unsigned num_elements; 147 148 /* Array elements / Structure Fields */ 149 struct nir_constant **elements; 150 } nir_constant; 151 152 /** 153 * \brief Layout qualifiers for gl_FragDepth. 154 * 155 * The AMD/ARB_conservative_depth extensions allow gl_FragDepth to be redeclared 156 * with a layout qualifier. 157 */ 158 typedef enum { 159 nir_depth_layout_none, /**< No depth layout is specified. */ 160 nir_depth_layout_any, 161 nir_depth_layout_greater, 162 nir_depth_layout_less, 163 nir_depth_layout_unchanged 164 } nir_depth_layout; 165 166 /** 167 * Either a uniform, global variable, shader input, or shader output. Based on 168 * ir_variable - it should be easy to translate between the two. 169 */ 170 171 typedef struct nir_variable { 172 struct exec_node node; 173 174 /** 175 * Declared type of the variable 176 */ 177 const struct glsl_type *type; 178 179 /** 180 * Declared name of the variable 181 */ 182 char *name; 183 184 struct nir_variable_data { 185 /** 186 * Storage class of the variable. 187 * 188 * \sa nir_variable_mode 189 */ 190 nir_variable_mode mode; 191 192 /** 193 * Is the variable read-only? 194 * 195 * This is set for variables declared as \c const, shader inputs, 196 * and uniforms. 197 */ 198 unsigned read_only:1; 199 unsigned centroid:1; 200 unsigned sample:1; 201 unsigned patch:1; 202 unsigned invariant:1; 203 204 /** 205 * When separate shader programs are enabled, only input/outputs between 206 * the stages of a multi-stage separate program can be safely removed 207 * from the shader interface. Other input/outputs must remains active. 208 * 209 * This is also used to make sure xfb varyings that are unused by the 210 * fragment shader are not removed. 211 */ 212 unsigned always_active_io:1; 213 214 /** 215 * Interpolation mode for shader inputs / outputs 216 * 217 * \sa glsl_interp_mode 218 */ 219 unsigned interpolation:2; 220 221 /** 222 * \name ARB_fragment_coord_conventions 223 * @{ 224 */ 225 unsigned origin_upper_left:1; 226 unsigned pixel_center_integer:1; 227 /*@}*/ 228 229 /** 230 * If non-zero, then this variable may be packed along with other variables 231 * into a single varying slot, so this offset should be applied when 232 * accessing components. For example, an offset of 1 means that the x 233 * component of this variable is actually stored in component y of the 234 * location specified by \c location. 235 */ 236 unsigned location_frac:2; 237 238 /** 239 * If true, this variable represents an array of scalars that should 240 * be tightly packed. In other words, consecutive array elements 241 * should be stored one component apart, rather than one slot apart. 242 */ 243 unsigned compact:1; 244 245 /** 246 * Whether this is a fragment shader output implicitly initialized with 247 * the previous contents of the specified render target at the 248 * framebuffer location corresponding to this shader invocation. 249 */ 250 unsigned fb_fetch_output:1; 251 252 /** 253 * \brief Layout qualifier for gl_FragDepth. 254 * 255 * This is not equal to \c ir_depth_layout_none if and only if this 256 * variable is \c gl_FragDepth and a layout qualifier is specified. 257 */ 258 nir_depth_layout depth_layout; 259 260 /** 261 * Storage location of the base of this variable 262 * 263 * The precise meaning of this field depends on the nature of the variable. 264 * 265 * - Vertex shader input: one of the values from \c gl_vert_attrib. 266 * - Vertex shader output: one of the values from \c gl_varying_slot. 267 * - Geometry shader input: one of the values from \c gl_varying_slot. 268 * - Geometry shader output: one of the values from \c gl_varying_slot. 269 * - Fragment shader input: one of the values from \c gl_varying_slot. 270 * - Fragment shader output: one of the values from \c gl_frag_result. 271 * - Uniforms: Per-stage uniform slot number for default uniform block. 272 * - Uniforms: Index within the uniform block definition for UBO members. 273 * - Non-UBO Uniforms: uniform slot number. 274 * - Other: This field is not currently used. 275 * 276 * If the variable is a uniform, shader input, or shader output, and the 277 * slot has not been assigned, the value will be -1. 278 */ 279 int location; 280 281 /** 282 * The actual location of the variable in the IR. Only valid for inputs 283 * and outputs. 284 */ 285 unsigned int driver_location; 286 287 /** 288 * Vertex stream output identifier. 289 * 290 * For packed outputs, bit 31 is set and bits [2*i+1,2*i] indicate the 291 * stream of the i-th component. 292 */ 293 unsigned stream; 294 295 /** 296 * output index for dual source blending. 297 */ 298 int index; 299 300 /** 301 * Descriptor set binding for sampler or UBO. 302 */ 303 int descriptor_set; 304 305 /** 306 * Initial binding point for a sampler or UBO. 307 * 308 * For array types, this represents the binding point for the first element. 309 */ 310 int binding; 311 312 /** 313 * Location an atomic counter is stored at. 314 */ 315 unsigned offset; 316 317 /** 318 * ARB_shader_image_load_store qualifiers. 319 */ 320 struct { 321 bool read_only; /**< "readonly" qualifier. */ 322 bool write_only; /**< "writeonly" qualifier. */ 323 bool coherent; 324 bool _volatile; 325 bool restrict_flag; 326 327 /** Image internal format if specified explicitly, otherwise GL_NONE. */ 328 GLenum format; 329 } image; 330 } data; 331 332 /** 333 * Built-in state that backs this uniform 334 * 335 * Once set at variable creation, \c state_slots must remain invariant. 336 * This is because, ideally, this array would be shared by all clones of 337 * this variable in the IR tree. In other words, we'd really like for it 338 * to be a fly-weight. 339 * 340 * If the variable is not a uniform, \c num_state_slots will be zero and 341 * \c state_slots will be \c NULL. 342 */ 343 /*@{*/ 344 unsigned num_state_slots; /**< Number of state slots used */ 345 nir_state_slot *state_slots; /**< State descriptors. */ 346 /*@}*/ 347 348 /** 349 * Constant expression assigned in the initializer of the variable 350 * 351 * This field should only be used temporarily by creators of NIR shaders 352 * and then lower_constant_initializers can be used to get rid of them. 353 * Most of the rest of NIR ignores this field or asserts that it's NULL. 354 */ 355 nir_constant *constant_initializer; 356 357 /** 358 * For variables that are in an interface block or are an instance of an 359 * interface block, this is the \c GLSL_TYPE_INTERFACE type for that block. 360 * 361 * \sa ir_variable::location 362 */ 363 const struct glsl_type *interface_type; 364 } nir_variable; 365 366 #define nir_foreach_variable(var, var_list) \ 367 foreach_list_typed(nir_variable, var, node, var_list) 368 369 #define nir_foreach_variable_safe(var, var_list) \ 370 foreach_list_typed_safe(nir_variable, var, node, var_list) 371 372 static inline bool 373 nir_variable_is_global(const nir_variable *var) 374 { 375 return var->data.mode != nir_var_local && var->data.mode != nir_var_param; 376 } 377 378 typedef struct nir_register { 379 struct exec_node node; 380 381 unsigned num_components; /** < number of vector components */ 382 unsigned num_array_elems; /** < size of array (0 for no array) */ 383 384 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */ 385 uint8_t bit_size; 386 387 /** generic register index. */ 388 unsigned index; 389 390 /** only for debug purposes, can be NULL */ 391 const char *name; 392 393 /** whether this register is local (per-function) or global (per-shader) */ 394 bool is_global; 395 396 /** 397 * If this flag is set to true, then accessing channels >= num_components 398 * is well-defined, and simply spills over to the next array element. This 399 * is useful for backends that can do per-component accessing, in 400 * particular scalar backends. By setting this flag and making 401 * num_components equal to 1, structures can be packed tightly into 402 * registers and then registers can be accessed per-component to get to 403 * each structure member, even if it crosses vec4 boundaries. 404 */ 405 bool is_packed; 406 407 /** set of nir_srcs where this register is used (read from) */ 408 struct list_head uses; 409 410 /** set of nir_dests where this register is defined (written to) */ 411 struct list_head defs; 412 413 /** set of nir_ifs where this register is used as a condition */ 414 struct list_head if_uses; 415 } nir_register; 416 417 #define nir_foreach_register(reg, reg_list) \ 418 foreach_list_typed(nir_register, reg, node, reg_list) 419 #define nir_foreach_register_safe(reg, reg_list) \ 420 foreach_list_typed_safe(nir_register, reg, node, reg_list) 421 422 typedef enum { 423 nir_instr_type_alu, 424 nir_instr_type_call, 425 nir_instr_type_tex, 426 nir_instr_type_intrinsic, 427 nir_instr_type_load_const, 428 nir_instr_type_jump, 429 nir_instr_type_ssa_undef, 430 nir_instr_type_phi, 431 nir_instr_type_parallel_copy, 432 } nir_instr_type; 433 434 typedef struct nir_instr { 435 struct exec_node node; 436 nir_instr_type type; 437 struct nir_block *block; 438 439 /** generic instruction index. */ 440 unsigned index; 441 442 /* A temporary for optimization and analysis passes to use for storing 443 * flags. For instance, DCE uses this to store the "dead/live" info. 444 */ 445 uint8_t pass_flags; 446 } nir_instr; 447 448 static inline nir_instr * 449 nir_instr_next(nir_instr *instr) 450 { 451 struct exec_node *next = exec_node_get_next(&instr->node); 452 if (exec_node_is_tail_sentinel(next)) 453 return NULL; 454 else 455 return exec_node_data(nir_instr, next, node); 456 } 457 458 static inline nir_instr * 459 nir_instr_prev(nir_instr *instr) 460 { 461 struct exec_node *prev = exec_node_get_prev(&instr->node); 462 if (exec_node_is_head_sentinel(prev)) 463 return NULL; 464 else 465 return exec_node_data(nir_instr, prev, node); 466 } 467 468 static inline bool 469 nir_instr_is_first(const nir_instr *instr) 470 { 471 return exec_node_is_head_sentinel(exec_node_get_prev_const(&instr->node)); 472 } 473 474 static inline bool 475 nir_instr_is_last(const nir_instr *instr) 476 { 477 return exec_node_is_tail_sentinel(exec_node_get_next_const(&instr->node)); 478 } 479 480 typedef struct nir_ssa_def { 481 /** for debugging only, can be NULL */ 482 const char* name; 483 484 /** generic SSA definition index. */ 485 unsigned index; 486 487 /** Index into the live_in and live_out bitfields */ 488 unsigned live_index; 489 490 nir_instr *parent_instr; 491 492 /** set of nir_instrs where this register is used (read from) */ 493 struct list_head uses; 494 495 /** set of nir_ifs where this register is used as a condition */ 496 struct list_head if_uses; 497 498 uint8_t num_components; 499 500 /* The bit-size of each channel; must be one of 8, 16, 32, or 64 */ 501 uint8_t bit_size; 502 } nir_ssa_def; 503 504 struct nir_src; 505 506 typedef struct { 507 nir_register *reg; 508 struct nir_src *indirect; /** < NULL for no indirect offset */ 509 unsigned base_offset; 510 511 /* TODO use-def chain goes here */ 512 } nir_reg_src; 513 514 typedef struct { 515 nir_instr *parent_instr; 516 struct list_head def_link; 517 518 nir_register *reg; 519 struct nir_src *indirect; /** < NULL for no indirect offset */ 520 unsigned base_offset; 521 522 /* TODO def-use chain goes here */ 523 } nir_reg_dest; 524 525 struct nir_if; 526 527 typedef struct nir_src { 528 union { 529 nir_instr *parent_instr; 530 struct nir_if *parent_if; 531 }; 532 533 struct list_head use_link; 534 535 union { 536 nir_reg_src reg; 537 nir_ssa_def *ssa; 538 }; 539 540 bool is_ssa; 541 } nir_src; 542 543 static inline nir_src 544 nir_src_init(void) 545 { 546 nir_src src = { { NULL } }; 547 return src; 548 } 549 550 #define NIR_SRC_INIT nir_src_init() 551 552 #define nir_foreach_use(src, reg_or_ssa_def) \ 553 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->uses, use_link) 554 555 #define nir_foreach_use_safe(src, reg_or_ssa_def) \ 556 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->uses, use_link) 557 558 #define nir_foreach_if_use(src, reg_or_ssa_def) \ 559 list_for_each_entry(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link) 560 561 #define nir_foreach_if_use_safe(src, reg_or_ssa_def) \ 562 list_for_each_entry_safe(nir_src, src, &(reg_or_ssa_def)->if_uses, use_link) 563 564 typedef struct { 565 union { 566 nir_reg_dest reg; 567 nir_ssa_def ssa; 568 }; 569 570 bool is_ssa; 571 } nir_dest; 572 573 static inline nir_dest 574 nir_dest_init(void) 575 { 576 nir_dest dest = { { { NULL } } }; 577 return dest; 578 } 579 580 #define NIR_DEST_INIT nir_dest_init() 581 582 #define nir_foreach_def(dest, reg) \ 583 list_for_each_entry(nir_dest, dest, &(reg)->defs, reg.def_link) 584 585 #define nir_foreach_def_safe(dest, reg) \ 586 list_for_each_entry_safe(nir_dest, dest, &(reg)->defs, reg.def_link) 587 588 static inline nir_src 589 nir_src_for_ssa(nir_ssa_def *def) 590 { 591 nir_src src = NIR_SRC_INIT; 592 593 src.is_ssa = true; 594 src.ssa = def; 595 596 return src; 597 } 598 599 static inline nir_src 600 nir_src_for_reg(nir_register *reg) 601 { 602 nir_src src = NIR_SRC_INIT; 603 604 src.is_ssa = false; 605 src.reg.reg = reg; 606 src.reg.indirect = NULL; 607 src.reg.base_offset = 0; 608 609 return src; 610 } 611 612 static inline nir_dest 613 nir_dest_for_reg(nir_register *reg) 614 { 615 nir_dest dest = NIR_DEST_INIT; 616 617 dest.reg.reg = reg; 618 619 return dest; 620 } 621 622 static inline unsigned 623 nir_src_bit_size(nir_src src) 624 { 625 return src.is_ssa ? src.ssa->bit_size : src.reg.reg->bit_size; 626 } 627 628 static inline unsigned 629 nir_dest_bit_size(nir_dest dest) 630 { 631 return dest.is_ssa ? dest.ssa.bit_size : dest.reg.reg->bit_size; 632 } 633 634 void nir_src_copy(nir_src *dest, const nir_src *src, void *instr_or_if); 635 void nir_dest_copy(nir_dest *dest, const nir_dest *src, nir_instr *instr); 636 637 typedef struct { 638 nir_src src; 639 640 /** 641 * \name input modifiers 642 */ 643 /*@{*/ 644 /** 645 * For inputs interpreted as floating point, flips the sign bit. For 646 * inputs interpreted as integers, performs the two's complement negation. 647 */ 648 bool negate; 649 650 /** 651 * Clears the sign bit for floating point values, and computes the integer 652 * absolute value for integers. Note that the negate modifier acts after 653 * the absolute value modifier, therefore if both are set then all inputs 654 * will become negative. 655 */ 656 bool abs; 657 /*@}*/ 658 659 /** 660 * For each input component, says which component of the register it is 661 * chosen from. Note that which elements of the swizzle are used and which 662 * are ignored are based on the write mask for most opcodes - for example, 663 * a statement like "foo.xzw = bar.zyx" would have a writemask of 1101b and 664 * a swizzle of {2, x, 1, 0} where x means "don't care." 665 */ 666 uint8_t swizzle[4]; 667 } nir_alu_src; 668 669 typedef struct { 670 nir_dest dest; 671 672 /** 673 * \name saturate output modifier 674 * 675 * Only valid for opcodes that output floating-point numbers. Clamps the 676 * output to between 0.0 and 1.0 inclusive. 677 */ 678 679 bool saturate; 680 681 unsigned write_mask : 4; /* ignored if dest.is_ssa is true */ 682 } nir_alu_dest; 683 684 typedef enum { 685 nir_type_invalid = 0, /* Not a valid type */ 686 nir_type_float, 687 nir_type_int, 688 nir_type_uint, 689 nir_type_bool, 690 nir_type_bool32 = 32 | nir_type_bool, 691 nir_type_int8 = 8 | nir_type_int, 692 nir_type_int16 = 16 | nir_type_int, 693 nir_type_int32 = 32 | nir_type_int, 694 nir_type_int64 = 64 | nir_type_int, 695 nir_type_uint8 = 8 | nir_type_uint, 696 nir_type_uint16 = 16 | nir_type_uint, 697 nir_type_uint32 = 32 | nir_type_uint, 698 nir_type_uint64 = 64 | nir_type_uint, 699 nir_type_float16 = 16 | nir_type_float, 700 nir_type_float32 = 32 | nir_type_float, 701 nir_type_float64 = 64 | nir_type_float, 702 } nir_alu_type; 703 704 #define NIR_ALU_TYPE_SIZE_MASK 0xfffffff8 705 #define NIR_ALU_TYPE_BASE_TYPE_MASK 0x00000007 706 707 static inline unsigned 708 nir_alu_type_get_type_size(nir_alu_type type) 709 { 710 return type & NIR_ALU_TYPE_SIZE_MASK; 711 } 712 713 static inline unsigned 714 nir_alu_type_get_base_type(nir_alu_type type) 715 { 716 return type & NIR_ALU_TYPE_BASE_TYPE_MASK; 717 } 718 719 static inline nir_alu_type 720 nir_get_nir_type_for_glsl_base_type(enum glsl_base_type base_type) 721 { 722 switch (base_type) { 723 case GLSL_TYPE_BOOL: 724 return nir_type_bool32; 725 break; 726 case GLSL_TYPE_UINT: 727 return nir_type_uint32; 728 break; 729 case GLSL_TYPE_INT: 730 return nir_type_int32; 731 break; 732 case GLSL_TYPE_UINT16: 733 return nir_type_uint16; 734 break; 735 case GLSL_TYPE_INT16: 736 return nir_type_int16; 737 break; 738 case GLSL_TYPE_UINT64: 739 return nir_type_uint64; 740 break; 741 case GLSL_TYPE_INT64: 742 return nir_type_int64; 743 break; 744 case GLSL_TYPE_FLOAT: 745 return nir_type_float32; 746 break; 747 case GLSL_TYPE_FLOAT16: 748 return nir_type_float16; 749 break; 750 case GLSL_TYPE_DOUBLE: 751 return nir_type_float64; 752 break; 753 default: 754 unreachable("unknown type"); 755 } 756 } 757 758 static inline nir_alu_type 759 nir_get_nir_type_for_glsl_type(const struct glsl_type *type) 760 { 761 return nir_get_nir_type_for_glsl_base_type(glsl_get_base_type(type)); 762 } 763 764 nir_op nir_type_conversion_op(nir_alu_type src, nir_alu_type dst, 765 nir_rounding_mode rnd); 766 767 typedef enum { 768 NIR_OP_IS_COMMUTATIVE = (1 << 0), 769 NIR_OP_IS_ASSOCIATIVE = (1 << 1), 770 } nir_op_algebraic_property; 771 772 typedef struct { 773 const char *name; 774 775 unsigned num_inputs; 776 777 /** 778 * The number of components in the output 779 * 780 * If non-zero, this is the size of the output and input sizes are 781 * explicitly given; swizzle and writemask are still in effect, but if 782 * the output component is masked out, then the input component may 783 * still be in use. 784 * 785 * If zero, the opcode acts in the standard, per-component manner; the 786 * operation is performed on each component (except the ones that are 787 * masked out) with the input being taken from the input swizzle for 788 * that component. 789 * 790 * The size of some of the inputs may be given (i.e. non-zero) even 791 * though output_size is zero; in that case, the inputs with a zero 792 * size act per-component, while the inputs with non-zero size don't. 793 */ 794 unsigned output_size; 795 796 /** 797 * The type of vector that the instruction outputs. Note that the 798 * staurate modifier is only allowed on outputs with the float type. 799 */ 800 801 nir_alu_type output_type; 802 803 /** 804 * The number of components in each input 805 */ 806 unsigned input_sizes[4]; 807 808 /** 809 * The type of vector that each input takes. Note that negate and 810 * absolute value are only allowed on inputs with int or float type and 811 * behave differently on the two. 812 */ 813 nir_alu_type input_types[4]; 814 815 nir_op_algebraic_property algebraic_properties; 816 } nir_op_info; 817 818 extern const nir_op_info nir_op_infos[nir_num_opcodes]; 819 820 typedef struct nir_alu_instr { 821 nir_instr instr; 822 nir_op op; 823 824 /** Indicates that this ALU instruction generates an exact value 825 * 826 * This is kind of a mixture of GLSL "precise" and "invariant" and not 827 * really equivalent to either. This indicates that the value generated by 828 * this operation is high-precision and any code transformations that touch 829 * it must ensure that the resulting value is bit-for-bit identical to the 830 * original. 831 */ 832 bool exact; 833 834 nir_alu_dest dest; 835 nir_alu_src src[]; 836 } nir_alu_instr; 837 838 void nir_alu_src_copy(nir_alu_src *dest, const nir_alu_src *src, 839 nir_alu_instr *instr); 840 void nir_alu_dest_copy(nir_alu_dest *dest, const nir_alu_dest *src, 841 nir_alu_instr *instr); 842 843 /* is this source channel used? */ 844 static inline bool 845 nir_alu_instr_channel_used(const nir_alu_instr *instr, unsigned src, 846 unsigned channel) 847 { 848 if (nir_op_infos[instr->op].input_sizes[src] > 0) 849 return channel < nir_op_infos[instr->op].input_sizes[src]; 850 851 return (instr->dest.write_mask >> channel) & 1; 852 } 853 854 /* 855 * For instructions whose destinations are SSA, get the number of channels 856 * used for a source 857 */ 858 static inline unsigned 859 nir_ssa_alu_instr_src_components(const nir_alu_instr *instr, unsigned src) 860 { 861 assert(instr->dest.dest.is_ssa); 862 863 if (nir_op_infos[instr->op].input_sizes[src] > 0) 864 return nir_op_infos[instr->op].input_sizes[src]; 865 866 return instr->dest.dest.ssa.num_components; 867 } 868 869 bool nir_alu_srcs_equal(const nir_alu_instr *alu1, const nir_alu_instr *alu2, 870 unsigned src1, unsigned src2); 871 872 typedef enum { 873 nir_deref_type_var, 874 nir_deref_type_array, 875 nir_deref_type_struct 876 } nir_deref_type; 877 878 typedef struct nir_deref { 879 nir_deref_type deref_type; 880 struct nir_deref *child; 881 const struct glsl_type *type; 882 } nir_deref; 883 884 typedef struct { 885 nir_deref deref; 886 887 nir_variable *var; 888 } nir_deref_var; 889 890 /* This enum describes how the array is referenced. If the deref is 891 * direct then the base_offset is used. If the deref is indirect then 892 * offset is given by base_offset + indirect. If the deref is a wildcard 893 * then the deref refers to all of the elements of the array at the same 894 * time. Wildcard dereferences are only ever allowed in copy_var 895 * intrinsics and the source and destination derefs must have matching 896 * wildcards. 897 */ 898 typedef enum { 899 nir_deref_array_type_direct, 900 nir_deref_array_type_indirect, 901 nir_deref_array_type_wildcard, 902 } nir_deref_array_type; 903 904 typedef struct { 905 nir_deref deref; 906 907 nir_deref_array_type deref_array_type; 908 unsigned base_offset; 909 nir_src indirect; 910 } nir_deref_array; 911 912 typedef struct { 913 nir_deref deref; 914 915 unsigned index; 916 } nir_deref_struct; 917 918 NIR_DEFINE_CAST(nir_deref_as_var, nir_deref, nir_deref_var, deref, 919 deref_type, nir_deref_type_var) 920 NIR_DEFINE_CAST(nir_deref_as_array, nir_deref, nir_deref_array, deref, 921 deref_type, nir_deref_type_array) 922 NIR_DEFINE_CAST(nir_deref_as_struct, nir_deref, nir_deref_struct, deref, 923 deref_type, nir_deref_type_struct) 924 925 /* Returns the last deref in the chain. */ 926 static inline nir_deref * 927 nir_deref_tail(nir_deref *deref) 928 { 929 while (deref->child) 930 deref = deref->child; 931 return deref; 932 } 933 934 typedef struct { 935 nir_instr instr; 936 937 unsigned num_params; 938 nir_deref_var **params; 939 nir_deref_var *return_deref; 940 941 struct nir_function *callee; 942 } nir_call_instr; 943 944 #define INTRINSIC(name, num_srcs, src_components, has_dest, dest_components, \ 945 num_variables, num_indices, idx0, idx1, idx2, flags) \ 946 nir_intrinsic_##name, 947 948 #define LAST_INTRINSIC(name) nir_last_intrinsic = nir_intrinsic_##name, 949 950 typedef enum { 951 #include "nir_intrinsics.h" 952 nir_num_intrinsics = nir_last_intrinsic + 1 953 } nir_intrinsic_op; 954 955 #define NIR_INTRINSIC_MAX_CONST_INDEX 3 956 957 /** Represents an intrinsic 958 * 959 * An intrinsic is an instruction type for handling things that are 960 * more-or-less regular operations but don't just consume and produce SSA 961 * values like ALU operations do. Intrinsics are not for things that have 962 * special semantic meaning such as phi nodes and parallel copies. 963 * Examples of intrinsics include variable load/store operations, system 964 * value loads, and the like. Even though texturing more-or-less falls 965 * under this category, texturing is its own instruction type because 966 * trying to represent texturing with intrinsics would lead to a 967 * combinatorial explosion of intrinsic opcodes. 968 * 969 * By having a single instruction type for handling a lot of different 970 * cases, optimization passes can look for intrinsics and, for the most 971 * part, completely ignore them. Each intrinsic type also has a few 972 * possible flags that govern whether or not they can be reordered or 973 * eliminated. That way passes like dead code elimination can still work 974 * on intrisics without understanding the meaning of each. 975 * 976 * Each intrinsic has some number of constant indices, some number of 977 * variables, and some number of sources. What these sources, variables, 978 * and indices mean depends on the intrinsic and is documented with the 979 * intrinsic declaration in nir_intrinsics.h. Intrinsics and texture 980 * instructions are the only types of instruction that can operate on 981 * variables. 982 */ 983 typedef struct { 984 nir_instr instr; 985 986 nir_intrinsic_op intrinsic; 987 988 nir_dest dest; 989 990 /** number of components if this is a vectorized intrinsic 991 * 992 * Similarly to ALU operations, some intrinsics are vectorized. 993 * An intrinsic is vectorized if nir_intrinsic_infos.dest_components == 0. 994 * For vectorized intrinsics, the num_components field specifies the 995 * number of destination components and the number of source components 996 * for all sources with nir_intrinsic_infos.src_components[i] == 0. 997 */ 998 uint8_t num_components; 999 1000 int const_index[NIR_INTRINSIC_MAX_CONST_INDEX]; 1001 1002 nir_deref_var *variables[2]; 1003 1004 nir_src src[]; 1005 } nir_intrinsic_instr; 1006 1007 /** 1008 * \name NIR intrinsics semantic flags 1009 * 1010 * information about what the compiler can do with the intrinsics. 1011 * 1012 * \sa nir_intrinsic_info::flags 1013 */ 1014 typedef enum { 1015 /** 1016 * whether the intrinsic can be safely eliminated if none of its output 1017 * value is not being used. 1018 */ 1019 NIR_INTRINSIC_CAN_ELIMINATE = (1 << 0), 1020 1021 /** 1022 * Whether the intrinsic can be reordered with respect to any other 1023 * intrinsic, i.e. whether the only reordering dependencies of the 1024 * intrinsic are due to the register reads/writes. 1025 */ 1026 NIR_INTRINSIC_CAN_REORDER = (1 << 1), 1027 } nir_intrinsic_semantic_flag; 1028 1029 /** 1030 * \name NIR intrinsics const-index flag 1031 * 1032 * Indicates the usage of a const_index slot. 1033 * 1034 * \sa nir_intrinsic_info::index_map 1035 */ 1036 typedef enum { 1037 /** 1038 * Generally instructions that take a offset src argument, can encode 1039 * a constant 'base' value which is added to the offset. 1040 */ 1041 NIR_INTRINSIC_BASE = 1, 1042 1043 /** 1044 * For store instructions, a writemask for the store. 1045 */ 1046 NIR_INTRINSIC_WRMASK = 2, 1047 1048 /** 1049 * The stream-id for GS emit_vertex/end_primitive intrinsics. 1050 */ 1051 NIR_INTRINSIC_STREAM_ID = 3, 1052 1053 /** 1054 * The clip-plane id for load_user_clip_plane intrinsic. 1055 */ 1056 NIR_INTRINSIC_UCP_ID = 4, 1057 1058 /** 1059 * The amount of data, starting from BASE, that this instruction may 1060 * access. This is used to provide bounds if the offset is not constant. 1061 */ 1062 NIR_INTRINSIC_RANGE = 5, 1063 1064 /** 1065 * The Vulkan descriptor set for vulkan_resource_index intrinsic. 1066 */ 1067 NIR_INTRINSIC_DESC_SET = 6, 1068 1069 /** 1070 * The Vulkan descriptor set binding for vulkan_resource_index intrinsic. 1071 */ 1072 NIR_INTRINSIC_BINDING = 7, 1073 1074 /** 1075 * Component offset. 1076 */ 1077 NIR_INTRINSIC_COMPONENT = 8, 1078 1079 /** 1080 * Interpolation mode (only meaningful for FS inputs). 1081 */ 1082 NIR_INTRINSIC_INTERP_MODE = 9, 1083 1084 NIR_INTRINSIC_NUM_INDEX_FLAGS, 1085 1086 } nir_intrinsic_index_flag; 1087 1088 #define NIR_INTRINSIC_MAX_INPUTS 4 1089 1090 typedef struct { 1091 const char *name; 1092 1093 unsigned num_srcs; /** < number of register/SSA inputs */ 1094 1095 /** number of components of each input register 1096 * 1097 * If this value is 0, the number of components is given by the 1098 * num_components field of nir_intrinsic_instr. 1099 */ 1100 unsigned src_components[NIR_INTRINSIC_MAX_INPUTS]; 1101 1102 bool has_dest; 1103 1104 /** number of components of the output register 1105 * 1106 * If this value is 0, the number of components is given by the 1107 * num_components field of nir_intrinsic_instr. 1108 */ 1109 unsigned dest_components; 1110 1111 /** the number of inputs/outputs that are variables */ 1112 unsigned num_variables; 1113 1114 /** the number of constant indices used by the intrinsic */ 1115 unsigned num_indices; 1116 1117 /** indicates the usage of intr->const_index[n] */ 1118 unsigned index_map[NIR_INTRINSIC_NUM_INDEX_FLAGS]; 1119 1120 /** semantic flags for calls to this intrinsic */ 1121 nir_intrinsic_semantic_flag flags; 1122 } nir_intrinsic_info; 1123 1124 extern const nir_intrinsic_info nir_intrinsic_infos[nir_num_intrinsics]; 1125 1126 1127 #define INTRINSIC_IDX_ACCESSORS(name, flag, type) \ 1128 static inline type \ 1129 nir_intrinsic_##name(const nir_intrinsic_instr *instr) \ 1130 { \ 1131 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \ 1132 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \ 1133 return instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1]; \ 1134 } \ 1135 static inline void \ 1136 nir_intrinsic_set_##name(nir_intrinsic_instr *instr, type val) \ 1137 { \ 1138 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic]; \ 1139 assert(info->index_map[NIR_INTRINSIC_##flag] > 0); \ 1140 instr->const_index[info->index_map[NIR_INTRINSIC_##flag] - 1] = val; \ 1141 } 1142 1143 INTRINSIC_IDX_ACCESSORS(write_mask, WRMASK, unsigned) 1144 INTRINSIC_IDX_ACCESSORS(base, BASE, int) 1145 INTRINSIC_IDX_ACCESSORS(stream_id, STREAM_ID, unsigned) 1146 INTRINSIC_IDX_ACCESSORS(ucp_id, UCP_ID, unsigned) 1147 INTRINSIC_IDX_ACCESSORS(range, RANGE, unsigned) 1148 INTRINSIC_IDX_ACCESSORS(desc_set, DESC_SET, unsigned) 1149 INTRINSIC_IDX_ACCESSORS(binding, BINDING, unsigned) 1150 INTRINSIC_IDX_ACCESSORS(component, COMPONENT, unsigned) 1151 INTRINSIC_IDX_ACCESSORS(interp_mode, INTERP_MODE, unsigned) 1152 1153 /** 1154 * \group texture information 1155 * 1156 * This gives semantic information about textures which is useful to the 1157 * frontend, the backend, and lowering passes, but not the optimizer. 1158 */ 1159 1160 typedef enum { 1161 nir_tex_src_coord, 1162 nir_tex_src_projector, 1163 nir_tex_src_comparator, /* shadow comparator */ 1164 nir_tex_src_offset, 1165 nir_tex_src_bias, 1166 nir_tex_src_lod, 1167 nir_tex_src_ms_index, /* MSAA sample index */ 1168 nir_tex_src_ms_mcs, /* MSAA compression value */ 1169 nir_tex_src_ddx, 1170 nir_tex_src_ddy, 1171 nir_tex_src_texture_offset, /* < dynamically uniform indirect offset */ 1172 nir_tex_src_sampler_offset, /* < dynamically uniform indirect offset */ 1173 nir_tex_src_plane, /* < selects plane for planar textures */ 1174 nir_num_tex_src_types 1175 } nir_tex_src_type; 1176 1177 typedef struct { 1178 nir_src src; 1179 nir_tex_src_type src_type; 1180 } nir_tex_src; 1181 1182 typedef enum { 1183 nir_texop_tex, /**< Regular texture look-up */ 1184 nir_texop_txb, /**< Texture look-up with LOD bias */ 1185 nir_texop_txl, /**< Texture look-up with explicit LOD */ 1186 nir_texop_txd, /**< Texture look-up with partial derivatives */ 1187 nir_texop_txf, /**< Texel fetch with explicit LOD */ 1188 nir_texop_txf_ms, /**< Multisample texture fetch */ 1189 nir_texop_txf_ms_mcs, /**< Multisample compression value fetch */ 1190 nir_texop_txs, /**< Texture size */ 1191 nir_texop_lod, /**< Texture lod query */ 1192 nir_texop_tg4, /**< Texture gather */ 1193 nir_texop_query_levels, /**< Texture levels query */ 1194 nir_texop_texture_samples, /**< Texture samples query */ 1195 nir_texop_samples_identical, /**< Query whether all samples are definitely 1196 * identical. 1197 */ 1198 } nir_texop; 1199 1200 typedef struct { 1201 nir_instr instr; 1202 1203 enum glsl_sampler_dim sampler_dim; 1204 nir_alu_type dest_type; 1205 1206 nir_texop op; 1207 nir_dest dest; 1208 nir_tex_src *src; 1209 unsigned num_srcs, coord_components; 1210 bool is_array, is_shadow; 1211 1212 /** 1213 * If is_shadow is true, whether this is the old-style shadow that outputs 4 1214 * components or the new-style shadow that outputs 1 component. 1215 */ 1216 bool is_new_style_shadow; 1217 1218 /* gather component selector */ 1219 unsigned component : 2; 1220 1221 /** The texture index 1222 * 1223 * If this texture instruction has a nir_tex_src_texture_offset source, 1224 * then the texture index is given by texture_index + texture_offset. 1225 */ 1226 unsigned texture_index; 1227 1228 /** The size of the texture array or 0 if it's not an array */ 1229 unsigned texture_array_size; 1230 1231 /** The texture deref 1232 * 1233 * If this is null, use texture_index instead. 1234 */ 1235 nir_deref_var *texture; 1236 1237 /** The sampler index 1238 * 1239 * The following operations do not require a sampler and, as such, this 1240 * field should be ignored: 1241 * - nir_texop_txf 1242 * - nir_texop_txf_ms 1243 * - nir_texop_txs 1244 * - nir_texop_lod 1245 * - nir_texop_query_levels 1246 * - nir_texop_texture_samples 1247 * - nir_texop_samples_identical 1248 * 1249 * If this texture instruction has a nir_tex_src_sampler_offset source, 1250 * then the sampler index is given by sampler_index + sampler_offset. 1251 */ 1252 unsigned sampler_index; 1253 1254 /** The sampler deref 1255 * 1256 * If this is null, use sampler_index instead. 1257 */ 1258 nir_deref_var *sampler; 1259 } nir_tex_instr; 1260 1261 static inline unsigned 1262 nir_tex_instr_dest_size(const nir_tex_instr *instr) 1263 { 1264 switch (instr->op) { 1265 case nir_texop_txs: { 1266 unsigned ret; 1267 switch (instr->sampler_dim) { 1268 case GLSL_SAMPLER_DIM_1D: 1269 case GLSL_SAMPLER_DIM_BUF: 1270 ret = 1; 1271 break; 1272 case GLSL_SAMPLER_DIM_2D: 1273 case GLSL_SAMPLER_DIM_CUBE: 1274 case GLSL_SAMPLER_DIM_MS: 1275 case GLSL_SAMPLER_DIM_RECT: 1276 case GLSL_SAMPLER_DIM_EXTERNAL: 1277 case GLSL_SAMPLER_DIM_SUBPASS: 1278 ret = 2; 1279 break; 1280 case GLSL_SAMPLER_DIM_3D: 1281 ret = 3; 1282 break; 1283 default: 1284 unreachable("not reached"); 1285 } 1286 if (instr->is_array) 1287 ret++; 1288 return ret; 1289 } 1290 1291 case nir_texop_lod: 1292 return 2; 1293 1294 case nir_texop_texture_samples: 1295 case nir_texop_query_levels: 1296 case nir_texop_samples_identical: 1297 return 1; 1298 1299 default: 1300 if (instr->is_shadow && instr->is_new_style_shadow) 1301 return 1; 1302 1303 return 4; 1304 } 1305 } 1306 1307 /* Returns true if this texture operation queries something about the texture 1308 * rather than actually sampling it. 1309 */ 1310 static inline bool 1311 nir_tex_instr_is_query(const nir_tex_instr *instr) 1312 { 1313 switch (instr->op) { 1314 case nir_texop_txs: 1315 case nir_texop_lod: 1316 case nir_texop_texture_samples: 1317 case nir_texop_query_levels: 1318 case nir_texop_txf_ms_mcs: 1319 return true; 1320 case nir_texop_tex: 1321 case nir_texop_txb: 1322 case nir_texop_txl: 1323 case nir_texop_txd: 1324 case nir_texop_txf: 1325 case nir_texop_txf_ms: 1326 case nir_texop_tg4: 1327 return false; 1328 default: 1329 unreachable("Invalid texture opcode"); 1330 } 1331 } 1332 1333 static inline nir_alu_type 1334 nir_tex_instr_src_type(const nir_tex_instr *instr, unsigned src) 1335 { 1336 switch (instr->src[src].src_type) { 1337 case nir_tex_src_coord: 1338 switch (instr->op) { 1339 case nir_texop_txf: 1340 case nir_texop_txf_ms: 1341 case nir_texop_txf_ms_mcs: 1342 case nir_texop_samples_identical: 1343 return nir_type_int; 1344 1345 default: 1346 return nir_type_float; 1347 } 1348 1349 case nir_tex_src_lod: 1350 switch (instr->op) { 1351 case nir_texop_txs: 1352 case nir_texop_txf: 1353 return nir_type_int; 1354 1355 default: 1356 return nir_type_float; 1357 } 1358 1359 case nir_tex_src_projector: 1360 case nir_tex_src_comparator: 1361 case nir_tex_src_bias: 1362 case nir_tex_src_ddx: 1363 case nir_tex_src_ddy: 1364 return nir_type_float; 1365 1366 case nir_tex_src_offset: 1367 case nir_tex_src_ms_index: 1368 case nir_tex_src_texture_offset: 1369 case nir_tex_src_sampler_offset: 1370 return nir_type_int; 1371 1372 default: 1373 unreachable("Invalid texture source type"); 1374 } 1375 } 1376 1377 static inline unsigned 1378 nir_tex_instr_src_size(const nir_tex_instr *instr, unsigned src) 1379 { 1380 if (instr->src[src].src_type == nir_tex_src_coord) 1381 return instr->coord_components; 1382 1383 /* The MCS value is expected to be a vec4 returned by a txf_ms_mcs */ 1384 if (instr->src[src].src_type == nir_tex_src_ms_mcs) 1385 return 4; 1386 1387 if (instr->src[src].src_type == nir_tex_src_ddx || 1388 instr->src[src].src_type == nir_tex_src_ddy) { 1389 if (instr->is_array) 1390 return instr->coord_components - 1; 1391 else 1392 return instr->coord_components; 1393 } 1394 1395 /* Usual APIs don't allow cube + offset, but we allow it, with 2 coords for 1396 * the offset, since a cube maps to a single face. 1397 */ 1398 if (instr->src[src].src_type == nir_tex_src_offset) { 1399 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) 1400 return 2; 1401 else if (instr->is_array) 1402 return instr->coord_components - 1; 1403 else 1404 return instr->coord_components; 1405 } 1406 1407 return 1; 1408 } 1409 1410 static inline int 1411 nir_tex_instr_src_index(const nir_tex_instr *instr, nir_tex_src_type type) 1412 { 1413 for (unsigned i = 0; i < instr->num_srcs; i++) 1414 if (instr->src[i].src_type == type) 1415 return (int) i; 1416 1417 return -1; 1418 } 1419 1420 void nir_tex_instr_add_src(nir_tex_instr *tex, 1421 nir_tex_src_type src_type, 1422 nir_src src); 1423 1424 void nir_tex_instr_remove_src(nir_tex_instr *tex, unsigned src_idx); 1425 1426 typedef struct { 1427 nir_instr instr; 1428 1429 nir_const_value value; 1430 1431 nir_ssa_def def; 1432 } nir_load_const_instr; 1433 1434 typedef enum { 1435 nir_jump_return, 1436 nir_jump_break, 1437 nir_jump_continue, 1438 } nir_jump_type; 1439 1440 typedef struct { 1441 nir_instr instr; 1442 nir_jump_type type; 1443 } nir_jump_instr; 1444 1445 /* creates a new SSA variable in an undefined state */ 1446 1447 typedef struct { 1448 nir_instr instr; 1449 nir_ssa_def def; 1450 } nir_ssa_undef_instr; 1451 1452 typedef struct { 1453 struct exec_node node; 1454 1455 /* The predecessor block corresponding to this source */ 1456 struct nir_block *pred; 1457 1458 nir_src src; 1459 } nir_phi_src; 1460 1461 #define nir_foreach_phi_src(phi_src, phi) \ 1462 foreach_list_typed(nir_phi_src, phi_src, node, &(phi)->srcs) 1463 #define nir_foreach_phi_src_safe(phi_src, phi) \ 1464 foreach_list_typed_safe(nir_phi_src, phi_src, node, &(phi)->srcs) 1465 1466 typedef struct { 1467 nir_instr instr; 1468 1469 struct exec_list srcs; /** < list of nir_phi_src */ 1470 1471 nir_dest dest; 1472 } nir_phi_instr; 1473 1474 typedef struct { 1475 struct exec_node node; 1476 nir_src src; 1477 nir_dest dest; 1478 } nir_parallel_copy_entry; 1479 1480 #define nir_foreach_parallel_copy_entry(entry, pcopy) \ 1481 foreach_list_typed(nir_parallel_copy_entry, entry, node, &(pcopy)->entries) 1482 1483 typedef struct { 1484 nir_instr instr; 1485 1486 /* A list of nir_parallel_copy_entrys. The sources of all of the 1487 * entries are copied to the corresponding destinations "in parallel". 1488 * In other words, if we have two entries: a -> b and b -> a, the values 1489 * get swapped. 1490 */ 1491 struct exec_list entries; 1492 } nir_parallel_copy_instr; 1493 1494 NIR_DEFINE_CAST(nir_instr_as_alu, nir_instr, nir_alu_instr, instr, 1495 type, nir_instr_type_alu) 1496 NIR_DEFINE_CAST(nir_instr_as_call, nir_instr, nir_call_instr, instr, 1497 type, nir_instr_type_call) 1498 NIR_DEFINE_CAST(nir_instr_as_jump, nir_instr, nir_jump_instr, instr, 1499 type, nir_instr_type_jump) 1500 NIR_DEFINE_CAST(nir_instr_as_tex, nir_instr, nir_tex_instr, instr, 1501 type, nir_instr_type_tex) 1502 NIR_DEFINE_CAST(nir_instr_as_intrinsic, nir_instr, nir_intrinsic_instr, instr, 1503 type, nir_instr_type_intrinsic) 1504 NIR_DEFINE_CAST(nir_instr_as_load_const, nir_instr, nir_load_const_instr, instr, 1505 type, nir_instr_type_load_const) 1506 NIR_DEFINE_CAST(nir_instr_as_ssa_undef, nir_instr, nir_ssa_undef_instr, instr, 1507 type, nir_instr_type_ssa_undef) 1508 NIR_DEFINE_CAST(nir_instr_as_phi, nir_instr, nir_phi_instr, instr, 1509 type, nir_instr_type_phi) 1510 NIR_DEFINE_CAST(nir_instr_as_parallel_copy, nir_instr, 1511 nir_parallel_copy_instr, instr, 1512 type, nir_instr_type_parallel_copy) 1513 1514 /* 1515 * Control flow 1516 * 1517 * Control flow consists of a tree of control flow nodes, which include 1518 * if-statements and loops. The leaves of the tree are basic blocks, lists of 1519 * instructions that always run start-to-finish. Each basic block also keeps 1520 * track of its successors (blocks which may run immediately after the current 1521 * block) and predecessors (blocks which could have run immediately before the 1522 * current block). Each function also has a start block and an end block which 1523 * all return statements point to (which is always empty). Together, all the 1524 * blocks with their predecessors and successors make up the control flow 1525 * graph (CFG) of the function. There are helpers that modify the tree of 1526 * control flow nodes while modifying the CFG appropriately; these should be 1527 * used instead of modifying the tree directly. 1528 */ 1529 1530 typedef enum { 1531 nir_cf_node_block, 1532 nir_cf_node_if, 1533 nir_cf_node_loop, 1534 nir_cf_node_function 1535 } nir_cf_node_type; 1536 1537 typedef struct nir_cf_node { 1538 struct exec_node node; 1539 nir_cf_node_type type; 1540 struct nir_cf_node *parent; 1541 } nir_cf_node; 1542 1543 typedef struct nir_block { 1544 nir_cf_node cf_node; 1545 1546 struct exec_list instr_list; /** < list of nir_instr */ 1547 1548 /** generic block index; generated by nir_index_blocks */ 1549 unsigned index; 1550 1551 /* 1552 * Each block can only have up to 2 successors, so we put them in a simple 1553 * array - no need for anything more complicated. 1554 */ 1555 struct nir_block *successors[2]; 1556 1557 /* Set of nir_block predecessors in the CFG */ 1558 struct set *predecessors; 1559 1560 /* 1561 * this node's immediate dominator in the dominance tree - set to NULL for 1562 * the start block. 1563 */ 1564 struct nir_block *imm_dom; 1565 1566 /* This node's children in the dominance tree */ 1567 unsigned num_dom_children; 1568 struct nir_block **dom_children; 1569 1570 /* Set of nir_blocks on the dominance frontier of this block */ 1571 struct set *dom_frontier; 1572 1573 /* 1574 * These two indices have the property that dom_{pre,post}_index for each 1575 * child of this block in the dominance tree will always be between 1576 * dom_pre_index and dom_post_index for this block, which makes testing if 1577 * a given block is dominated by another block an O(1) operation. 1578 */ 1579 unsigned dom_pre_index, dom_post_index; 1580 1581 /* live in and out for this block; used for liveness analysis */ 1582 BITSET_WORD *live_in; 1583 BITSET_WORD *live_out; 1584 } nir_block; 1585 1586 static inline nir_instr * 1587 nir_block_first_instr(nir_block *block) 1588 { 1589 struct exec_node *head = exec_list_get_head(&block->instr_list); 1590 return exec_node_data(nir_instr, head, node); 1591 } 1592 1593 static inline nir_instr * 1594 nir_block_last_instr(nir_block *block) 1595 { 1596 struct exec_node *tail = exec_list_get_tail(&block->instr_list); 1597 return exec_node_data(nir_instr, tail, node); 1598 } 1599 1600 #define nir_foreach_instr(instr, block) \ 1601 foreach_list_typed(nir_instr, instr, node, &(block)->instr_list) 1602 #define nir_foreach_instr_reverse(instr, block) \ 1603 foreach_list_typed_reverse(nir_instr, instr, node, &(block)->instr_list) 1604 #define nir_foreach_instr_safe(instr, block) \ 1605 foreach_list_typed_safe(nir_instr, instr, node, &(block)->instr_list) 1606 #define nir_foreach_instr_reverse_safe(instr, block) \ 1607 foreach_list_typed_reverse_safe(nir_instr, instr, node, &(block)->instr_list) 1608 1609 typedef struct nir_if { 1610 nir_cf_node cf_node; 1611 nir_src condition; 1612 1613 struct exec_list then_list; /** < list of nir_cf_node */ 1614 struct exec_list else_list; /** < list of nir_cf_node */ 1615 } nir_if; 1616 1617 typedef struct { 1618 nir_if *nif; 1619 1620 nir_instr *conditional_instr; 1621 1622 nir_block *break_block; 1623 nir_block *continue_from_block; 1624 1625 bool continue_from_then; 1626 1627 struct list_head loop_terminator_link; 1628 } nir_loop_terminator; 1629 1630 typedef struct { 1631 /* Number of instructions in the loop */ 1632 unsigned num_instructions; 1633 1634 /* How many times the loop is run (if known) */ 1635 unsigned trip_count; 1636 bool is_trip_count_known; 1637 1638 /* Unroll the loop regardless of its size */ 1639 bool force_unroll; 1640 1641 nir_loop_terminator *limiting_terminator; 1642 1643 /* A list of loop_terminators terminating this loop. */ 1644 struct list_head loop_terminator_list; 1645 } nir_loop_info; 1646 1647 typedef struct { 1648 nir_cf_node cf_node; 1649 1650 struct exec_list body; /** < list of nir_cf_node */ 1651 1652 nir_loop_info *info; 1653 } nir_loop; 1654 1655 /** 1656 * Various bits of metadata that can may be created or required by 1657 * optimization and analysis passes 1658 */ 1659 typedef enum { 1660 nir_metadata_none = 0x0, 1661 nir_metadata_block_index = 0x1, 1662 nir_metadata_dominance = 0x2, 1663 nir_metadata_live_ssa_defs = 0x4, 1664 nir_metadata_not_properly_reset = 0x8, 1665 nir_metadata_loop_analysis = 0x10, 1666 } nir_metadata; 1667 1668 typedef struct { 1669 nir_cf_node cf_node; 1670 1671 /** pointer to the function of which this is an implementation */ 1672 struct nir_function *function; 1673 1674 struct exec_list body; /** < list of nir_cf_node */ 1675 1676 nir_block *end_block; 1677 1678 /** list for all local variables in the function */ 1679 struct exec_list locals; 1680 1681 /** array of variables used as parameters */ 1682 unsigned num_params; 1683 nir_variable **params; 1684 1685 /** variable used to hold the result of the function */ 1686 nir_variable *return_var; 1687 1688 /** list of local registers in the function */ 1689 struct exec_list registers; 1690 1691 /** next available local register index */ 1692 unsigned reg_alloc; 1693 1694 /** next available SSA value index */ 1695 unsigned ssa_alloc; 1696 1697 /* total number of basic blocks, only valid when block_index_dirty = false */ 1698 unsigned num_blocks; 1699 1700 nir_metadata valid_metadata; 1701 } nir_function_impl; 1702 1703 ATTRIBUTE_RETURNS_NONNULL static inline nir_block * 1704 nir_start_block(nir_function_impl *impl) 1705 { 1706 return (nir_block *) impl->body.head_sentinel.next; 1707 } 1708 1709 ATTRIBUTE_RETURNS_NONNULL static inline nir_block * 1710 nir_impl_last_block(nir_function_impl *impl) 1711 { 1712 return (nir_block *) impl->body.tail_sentinel.prev; 1713 } 1714 1715 static inline nir_cf_node * 1716 nir_cf_node_next(nir_cf_node *node) 1717 { 1718 struct exec_node *next = exec_node_get_next(&node->node); 1719 if (exec_node_is_tail_sentinel(next)) 1720 return NULL; 1721 else 1722 return exec_node_data(nir_cf_node, next, node); 1723 } 1724 1725 static inline nir_cf_node * 1726 nir_cf_node_prev(nir_cf_node *node) 1727 { 1728 struct exec_node *prev = exec_node_get_prev(&node->node); 1729 if (exec_node_is_head_sentinel(prev)) 1730 return NULL; 1731 else 1732 return exec_node_data(nir_cf_node, prev, node); 1733 } 1734 1735 static inline bool 1736 nir_cf_node_is_first(const nir_cf_node *node) 1737 { 1738 return exec_node_is_head_sentinel(node->node.prev); 1739 } 1740 1741 static inline bool 1742 nir_cf_node_is_last(const nir_cf_node *node) 1743 { 1744 return exec_node_is_tail_sentinel(node->node.next); 1745 } 1746 1747 NIR_DEFINE_CAST(nir_cf_node_as_block, nir_cf_node, nir_block, cf_node, 1748 type, nir_cf_node_block) 1749 NIR_DEFINE_CAST(nir_cf_node_as_if, nir_cf_node, nir_if, cf_node, 1750 type, nir_cf_node_if) 1751 NIR_DEFINE_CAST(nir_cf_node_as_loop, nir_cf_node, nir_loop, cf_node, 1752 type, nir_cf_node_loop) 1753 NIR_DEFINE_CAST(nir_cf_node_as_function, nir_cf_node, 1754 nir_function_impl, cf_node, type, nir_cf_node_function) 1755 1756 static inline nir_block * 1757 nir_if_first_then_block(nir_if *if_stmt) 1758 { 1759 struct exec_node *head = exec_list_get_head(&if_stmt->then_list); 1760 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node)); 1761 } 1762 1763 static inline nir_block * 1764 nir_if_last_then_block(nir_if *if_stmt) 1765 { 1766 struct exec_node *tail = exec_list_get_tail(&if_stmt->then_list); 1767 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node)); 1768 } 1769 1770 static inline nir_block * 1771 nir_if_first_else_block(nir_if *if_stmt) 1772 { 1773 struct exec_node *head = exec_list_get_head(&if_stmt->else_list); 1774 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node)); 1775 } 1776 1777 static inline nir_block * 1778 nir_if_last_else_block(nir_if *if_stmt) 1779 { 1780 struct exec_node *tail = exec_list_get_tail(&if_stmt->else_list); 1781 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node)); 1782 } 1783 1784 static inline nir_block * 1785 nir_loop_first_block(nir_loop *loop) 1786 { 1787 struct exec_node *head = exec_list_get_head(&loop->body); 1788 return nir_cf_node_as_block(exec_node_data(nir_cf_node, head, node)); 1789 } 1790 1791 static inline nir_block * 1792 nir_loop_last_block(nir_loop *loop) 1793 { 1794 struct exec_node *tail = exec_list_get_tail(&loop->body); 1795 return nir_cf_node_as_block(exec_node_data(nir_cf_node, tail, node)); 1796 } 1797 1798 typedef enum { 1799 nir_parameter_in, 1800 nir_parameter_out, 1801 nir_parameter_inout, 1802 } nir_parameter_type; 1803 1804 typedef struct { 1805 nir_parameter_type param_type; 1806 const struct glsl_type *type; 1807 } nir_parameter; 1808 1809 typedef struct nir_function { 1810 struct exec_node node; 1811 1812 const char *name; 1813 struct nir_shader *shader; 1814 1815 unsigned num_params; 1816 nir_parameter *params; 1817 const struct glsl_type *return_type; 1818 1819 /** The implementation of this function. 1820 * 1821 * If the function is only declared and not implemented, this is NULL. 1822 */ 1823 nir_function_impl *impl; 1824 } nir_function; 1825 1826 typedef struct nir_shader_compiler_options { 1827 bool lower_fdiv; 1828 bool lower_ffma; 1829 bool fuse_ffma; 1830 bool lower_flrp32; 1831 /** Lowers flrp when it does not support doubles */ 1832 bool lower_flrp64; 1833 bool lower_fpow; 1834 bool lower_fsat; 1835 bool lower_fsqrt; 1836 bool lower_fmod32; 1837 bool lower_fmod64; 1838 bool lower_bitfield_extract; 1839 bool lower_bitfield_insert; 1840 bool lower_uadd_carry; 1841 bool lower_usub_borrow; 1842 /** lowers fneg and ineg to fsub and isub. */ 1843 bool lower_negate; 1844 /** lowers fsub and isub to fadd+fneg and iadd+ineg. */ 1845 bool lower_sub; 1846 1847 /* lower {slt,sge,seq,sne} to {flt,fge,feq,fne} + b2f: */ 1848 bool lower_scmp; 1849 1850 /** enables rules to lower idiv by power-of-two: */ 1851 bool lower_idiv; 1852 1853 /* Does the native fdot instruction replicate its result for four 1854 * components? If so, then opt_algebraic_late will turn all fdotN 1855 * instructions into fdot_replicatedN instructions. 1856 */ 1857 bool fdot_replicates; 1858 1859 /** lowers ffract to fsub+ffloor: */ 1860 bool lower_ffract; 1861 1862 bool lower_pack_half_2x16; 1863 bool lower_pack_unorm_2x16; 1864 bool lower_pack_snorm_2x16; 1865 bool lower_pack_unorm_4x8; 1866 bool lower_pack_snorm_4x8; 1867 bool lower_unpack_half_2x16; 1868 bool lower_unpack_unorm_2x16; 1869 bool lower_unpack_snorm_2x16; 1870 bool lower_unpack_unorm_4x8; 1871 bool lower_unpack_snorm_4x8; 1872 1873 bool lower_extract_byte; 1874 bool lower_extract_word; 1875 1876 /** 1877 * Does the driver support real 32-bit integers? (Otherwise, integers 1878 * are simulated by floats.) 1879 */ 1880 bool native_integers; 1881 1882 /* Indicates that the driver only has zero-based vertex id */ 1883 bool vertex_id_zero_based; 1884 1885 bool lower_cs_local_index_from_id; 1886 1887 /** 1888 * Should nir_lower_io() create load_interpolated_input intrinsics? 1889 * 1890 * If not, it generates regular load_input intrinsics and interpolation 1891 * information must be inferred from the list of input nir_variables. 1892 */ 1893 bool use_interpolated_input_intrinsics; 1894 1895 unsigned max_unroll_iterations; 1896 } nir_shader_compiler_options; 1897 1898 typedef struct nir_shader { 1899 /** list of uniforms (nir_variable) */ 1900 struct exec_list uniforms; 1901 1902 /** list of inputs (nir_variable) */ 1903 struct exec_list inputs; 1904 1905 /** list of outputs (nir_variable) */ 1906 struct exec_list outputs; 1907 1908 /** list of shared compute variables (nir_variable) */ 1909 struct exec_list shared; 1910 1911 /** Set of driver-specific options for the shader. 1912 * 1913 * The memory for the options is expected to be kept in a single static 1914 * copy by the driver. 1915 */ 1916 const struct nir_shader_compiler_options *options; 1917 1918 /** Various bits of compile-time information about a given shader */ 1919 struct shader_info info; 1920 1921 /** list of global variables in the shader (nir_variable) */ 1922 struct exec_list globals; 1923 1924 /** list of system value variables in the shader (nir_variable) */ 1925 struct exec_list system_values; 1926 1927 struct exec_list functions; /** < list of nir_function */ 1928 1929 /** list of global register in the shader */ 1930 struct exec_list registers; 1931 1932 /** next available global register index */ 1933 unsigned reg_alloc; 1934 1935 /** 1936 * the highest index a load_input_*, load_uniform_*, etc. intrinsic can 1937 * access plus one 1938 */ 1939 unsigned num_inputs, num_uniforms, num_outputs, num_shared; 1940 } nir_shader; 1941 1942 static inline nir_function_impl * 1943 nir_shader_get_entrypoint(nir_shader *shader) 1944 { 1945 assert(exec_list_length(&shader->functions) == 1); 1946 struct exec_node *func_node = exec_list_get_head(&shader->functions); 1947 nir_function *func = exec_node_data(nir_function, func_node, node); 1948 assert(func->return_type == glsl_void_type()); 1949 assert(func->num_params == 0); 1950 assert(func->impl); 1951 return func->impl; 1952 } 1953 1954 #define nir_foreach_function(func, shader) \ 1955 foreach_list_typed(nir_function, func, node, &(shader)->functions) 1956 1957 nir_shader *nir_shader_create(void *mem_ctx, 1958 gl_shader_stage stage, 1959 const nir_shader_compiler_options *options, 1960 shader_info *si); 1961 1962 /** creates a register, including assigning it an index and adding it to the list */ 1963 nir_register *nir_global_reg_create(nir_shader *shader); 1964 1965 nir_register *nir_local_reg_create(nir_function_impl *impl); 1966 1967 void nir_reg_remove(nir_register *reg); 1968 1969 /** Adds a variable to the appropriate list in nir_shader */ 1970 void nir_shader_add_variable(nir_shader *shader, nir_variable *var); 1971 1972 static inline void 1973 nir_function_impl_add_variable(nir_function_impl *impl, nir_variable *var) 1974 { 1975 assert(var->data.mode == nir_var_local); 1976 exec_list_push_tail(&impl->locals, &var->node); 1977 } 1978 1979 /** creates a variable, sets a few defaults, and adds it to the list */ 1980 nir_variable *nir_variable_create(nir_shader *shader, 1981 nir_variable_mode mode, 1982 const struct glsl_type *type, 1983 const char *name); 1984 /** creates a local variable and adds it to the list */ 1985 nir_variable *nir_local_variable_create(nir_function_impl *impl, 1986 const struct glsl_type *type, 1987 const char *name); 1988 1989 /** creates a function and adds it to the shader's list of functions */ 1990 nir_function *nir_function_create(nir_shader *shader, const char *name); 1991 1992 nir_function_impl *nir_function_impl_create(nir_function *func); 1993 /** creates a function_impl that isn't tied to any particular function */ 1994 nir_function_impl *nir_function_impl_create_bare(nir_shader *shader); 1995 1996 nir_block *nir_block_create(nir_shader *shader); 1997 nir_if *nir_if_create(nir_shader *shader); 1998 nir_loop *nir_loop_create(nir_shader *shader); 1999 2000 nir_function_impl *nir_cf_node_get_function(nir_cf_node *node); 2001 2002 /** requests that the given pieces of metadata be generated */ 2003 void nir_metadata_require(nir_function_impl *impl, nir_metadata required, ...); 2004 /** dirties all but the preserved metadata */ 2005 void nir_metadata_preserve(nir_function_impl *impl, nir_metadata preserved); 2006 2007 /** creates an instruction with default swizzle/writemask/etc. with NULL registers */ 2008 nir_alu_instr *nir_alu_instr_create(nir_shader *shader, nir_op op); 2009 2010 nir_jump_instr *nir_jump_instr_create(nir_shader *shader, nir_jump_type type); 2011 2012 nir_load_const_instr *nir_load_const_instr_create(nir_shader *shader, 2013 unsigned num_components, 2014 unsigned bit_size); 2015 2016 nir_intrinsic_instr *nir_intrinsic_instr_create(nir_shader *shader, 2017 nir_intrinsic_op op); 2018 2019 nir_call_instr *nir_call_instr_create(nir_shader *shader, 2020 nir_function *callee); 2021 2022 nir_tex_instr *nir_tex_instr_create(nir_shader *shader, unsigned num_srcs); 2023 2024 nir_phi_instr *nir_phi_instr_create(nir_shader *shader); 2025 2026 nir_parallel_copy_instr *nir_parallel_copy_instr_create(nir_shader *shader); 2027 2028 nir_ssa_undef_instr *nir_ssa_undef_instr_create(nir_shader *shader, 2029 unsigned num_components, 2030 unsigned bit_size); 2031 2032 nir_deref_var *nir_deref_var_create(void *mem_ctx, nir_variable *var); 2033 nir_deref_array *nir_deref_array_create(void *mem_ctx); 2034 nir_deref_struct *nir_deref_struct_create(void *mem_ctx, unsigned field_index); 2035 2036 typedef bool (*nir_deref_foreach_leaf_cb)(nir_deref_var *deref, void *state); 2037 bool nir_deref_foreach_leaf(nir_deref_var *deref, 2038 nir_deref_foreach_leaf_cb cb, void *state); 2039 2040 nir_load_const_instr * 2041 nir_deref_get_const_initializer_load(nir_shader *shader, nir_deref_var *deref); 2042 2043 /** 2044 * NIR Cursors and Instruction Insertion API 2045 * @{ 2046 * 2047 * A tiny struct representing a point to insert/extract instructions or 2048 * control flow nodes. Helps reduce the combinatorial explosion of possible 2049 * points to insert/extract. 2050 * 2051 * \sa nir_control_flow.h 2052 */ 2053 typedef enum { 2054 nir_cursor_before_block, 2055 nir_cursor_after_block, 2056 nir_cursor_before_instr, 2057 nir_cursor_after_instr, 2058 } nir_cursor_option; 2059 2060 typedef struct { 2061 nir_cursor_option option; 2062 union { 2063 nir_block *block; 2064 nir_instr *instr; 2065 }; 2066 } nir_cursor; 2067 2068 static inline nir_block * 2069 nir_cursor_current_block(nir_cursor cursor) 2070 { 2071 if (cursor.option == nir_cursor_before_instr || 2072 cursor.option == nir_cursor_after_instr) { 2073 return cursor.instr->block; 2074 } else { 2075 return cursor.block; 2076 } 2077 } 2078 2079 bool nir_cursors_equal(nir_cursor a, nir_cursor b); 2080 2081 static inline nir_cursor 2082 nir_before_block(nir_block *block) 2083 { 2084 nir_cursor cursor; 2085 cursor.option = nir_cursor_before_block; 2086 cursor.block = block; 2087 return cursor; 2088 } 2089 2090 static inline nir_cursor 2091 nir_after_block(nir_block *block) 2092 { 2093 nir_cursor cursor; 2094 cursor.option = nir_cursor_after_block; 2095 cursor.block = block; 2096 return cursor; 2097 } 2098 2099 static inline nir_cursor 2100 nir_before_instr(nir_instr *instr) 2101 { 2102 nir_cursor cursor; 2103 cursor.option = nir_cursor_before_instr; 2104 cursor.instr = instr; 2105 return cursor; 2106 } 2107 2108 static inline nir_cursor 2109 nir_after_instr(nir_instr *instr) 2110 { 2111 nir_cursor cursor; 2112 cursor.option = nir_cursor_after_instr; 2113 cursor.instr = instr; 2114 return cursor; 2115 } 2116 2117 static inline nir_cursor 2118 nir_after_block_before_jump(nir_block *block) 2119 { 2120 nir_instr *last_instr = nir_block_last_instr(block); 2121 if (last_instr && last_instr->type == nir_instr_type_jump) { 2122 return nir_before_instr(last_instr); 2123 } else { 2124 return nir_after_block(block); 2125 } 2126 } 2127 2128 static inline nir_cursor 2129 nir_before_cf_node(nir_cf_node *node) 2130 { 2131 if (node->type == nir_cf_node_block) 2132 return nir_before_block(nir_cf_node_as_block(node)); 2133 2134 return nir_after_block(nir_cf_node_as_block(nir_cf_node_prev(node))); 2135 } 2136 2137 static inline nir_cursor 2138 nir_after_cf_node(nir_cf_node *node) 2139 { 2140 if (node->type == nir_cf_node_block) 2141 return nir_after_block(nir_cf_node_as_block(node)); 2142 2143 return nir_before_block(nir_cf_node_as_block(nir_cf_node_next(node))); 2144 } 2145 2146 static inline nir_cursor 2147 nir_after_phis(nir_block *block) 2148 { 2149 nir_foreach_instr(instr, block) { 2150 if (instr->type != nir_instr_type_phi) 2151 return nir_before_instr(instr); 2152 } 2153 return nir_after_block(block); 2154 } 2155 2156 static inline nir_cursor 2157 nir_after_cf_node_and_phis(nir_cf_node *node) 2158 { 2159 if (node->type == nir_cf_node_block) 2160 return nir_after_block(nir_cf_node_as_block(node)); 2161 2162 nir_block *block = nir_cf_node_as_block(nir_cf_node_next(node)); 2163 2164 return nir_after_phis(block); 2165 } 2166 2167 static inline nir_cursor 2168 nir_before_cf_list(struct exec_list *cf_list) 2169 { 2170 nir_cf_node *first_node = exec_node_data(nir_cf_node, 2171 exec_list_get_head(cf_list), node); 2172 return nir_before_cf_node(first_node); 2173 } 2174 2175 static inline nir_cursor 2176 nir_after_cf_list(struct exec_list *cf_list) 2177 { 2178 nir_cf_node *last_node = exec_node_data(nir_cf_node, 2179 exec_list_get_tail(cf_list), node); 2180 return nir_after_cf_node(last_node); 2181 } 2182 2183 /** 2184 * Insert a NIR instruction at the given cursor. 2185 * 2186 * Note: This does not update the cursor. 2187 */ 2188 void nir_instr_insert(nir_cursor cursor, nir_instr *instr); 2189 2190 static inline void 2191 nir_instr_insert_before(nir_instr *instr, nir_instr *before) 2192 { 2193 nir_instr_insert(nir_before_instr(instr), before); 2194 } 2195 2196 static inline void 2197 nir_instr_insert_after(nir_instr *instr, nir_instr *after) 2198 { 2199 nir_instr_insert(nir_after_instr(instr), after); 2200 } 2201 2202 static inline void 2203 nir_instr_insert_before_block(nir_block *block, nir_instr *before) 2204 { 2205 nir_instr_insert(nir_before_block(block), before); 2206 } 2207 2208 static inline void 2209 nir_instr_insert_after_block(nir_block *block, nir_instr *after) 2210 { 2211 nir_instr_insert(nir_after_block(block), after); 2212 } 2213 2214 static inline void 2215 nir_instr_insert_before_cf(nir_cf_node *node, nir_instr *before) 2216 { 2217 nir_instr_insert(nir_before_cf_node(node), before); 2218 } 2219 2220 static inline void 2221 nir_instr_insert_after_cf(nir_cf_node *node, nir_instr *after) 2222 { 2223 nir_instr_insert(nir_after_cf_node(node), after); 2224 } 2225 2226 static inline void 2227 nir_instr_insert_before_cf_list(struct exec_list *list, nir_instr *before) 2228 { 2229 nir_instr_insert(nir_before_cf_list(list), before); 2230 } 2231 2232 static inline void 2233 nir_instr_insert_after_cf_list(struct exec_list *list, nir_instr *after) 2234 { 2235 nir_instr_insert(nir_after_cf_list(list), after); 2236 } 2237 2238 void nir_instr_remove(nir_instr *instr); 2239 2240 /** @} */ 2241 2242 typedef bool (*nir_foreach_ssa_def_cb)(nir_ssa_def *def, void *state); 2243 typedef bool (*nir_foreach_dest_cb)(nir_dest *dest, void *state); 2244 typedef bool (*nir_foreach_src_cb)(nir_src *src, void *state); 2245 bool nir_foreach_ssa_def(nir_instr *instr, nir_foreach_ssa_def_cb cb, 2246 void *state); 2247 bool nir_foreach_dest(nir_instr *instr, nir_foreach_dest_cb cb, void *state); 2248 bool nir_foreach_src(nir_instr *instr, nir_foreach_src_cb cb, void *state); 2249 2250 nir_const_value *nir_src_as_const_value(nir_src src); 2251 bool nir_src_is_dynamically_uniform(nir_src src); 2252 bool nir_srcs_equal(nir_src src1, nir_src src2); 2253 void nir_instr_rewrite_src(nir_instr *instr, nir_src *src, nir_src new_src); 2254 void nir_instr_move_src(nir_instr *dest_instr, nir_src *dest, nir_src *src); 2255 void nir_if_rewrite_condition(nir_if *if_stmt, nir_src new_src); 2256 void nir_instr_rewrite_dest(nir_instr *instr, nir_dest *dest, 2257 nir_dest new_dest); 2258 void nir_instr_rewrite_deref(nir_instr *instr, nir_deref_var **deref, 2259 nir_deref_var *new_deref); 2260 2261 void nir_ssa_dest_init(nir_instr *instr, nir_dest *dest, 2262 unsigned num_components, unsigned bit_size, 2263 const char *name); 2264 void nir_ssa_def_init(nir_instr *instr, nir_ssa_def *def, 2265 unsigned num_components, unsigned bit_size, 2266 const char *name); 2267 static inline void 2268 nir_ssa_dest_init_for_type(nir_instr *instr, nir_dest *dest, 2269 const struct glsl_type *type, 2270 const char *name) 2271 { 2272 assert(glsl_type_is_vector_or_scalar(type)); 2273 nir_ssa_dest_init(instr, dest, glsl_get_components(type), 2274 glsl_get_bit_size(type), name); 2275 } 2276 void nir_ssa_def_rewrite_uses(nir_ssa_def *def, nir_src new_src); 2277 void nir_ssa_def_rewrite_uses_after(nir_ssa_def *def, nir_src new_src, 2278 nir_instr *after_me); 2279 2280 uint8_t nir_ssa_def_components_read(const nir_ssa_def *def); 2281 2282 /* 2283 * finds the next basic block in source-code order, returns NULL if there is 2284 * none 2285 */ 2286 2287 nir_block *nir_block_cf_tree_next(nir_block *block); 2288 2289 /* Performs the opposite of nir_block_cf_tree_next() */ 2290 2291 nir_block *nir_block_cf_tree_prev(nir_block *block); 2292 2293 /* Gets the first block in a CF node in source-code order */ 2294 2295 nir_block *nir_cf_node_cf_tree_first(nir_cf_node *node); 2296 2297 /* Gets the last block in a CF node in source-code order */ 2298 2299 nir_block *nir_cf_node_cf_tree_last(nir_cf_node *node); 2300 2301 /* Gets the next block after a CF node in source-code order */ 2302 2303 nir_block *nir_cf_node_cf_tree_next(nir_cf_node *node); 2304 2305 /* Macros for loops that visit blocks in source-code order */ 2306 2307 #define nir_foreach_block(block, impl) \ 2308 for (nir_block *block = nir_start_block(impl); block != NULL; \ 2309 block = nir_block_cf_tree_next(block)) 2310 2311 #define nir_foreach_block_safe(block, impl) \ 2312 for (nir_block *block = nir_start_block(impl), \ 2313 *next = nir_block_cf_tree_next(block); \ 2314 block != NULL; \ 2315 block = next, next = nir_block_cf_tree_next(block)) 2316 2317 #define nir_foreach_block_reverse(block, impl) \ 2318 for (nir_block *block = nir_impl_last_block(impl); block != NULL; \ 2319 block = nir_block_cf_tree_prev(block)) 2320 2321 #define nir_foreach_block_reverse_safe(block, impl) \ 2322 for (nir_block *block = nir_impl_last_block(impl), \ 2323 *prev = nir_block_cf_tree_prev(block); \ 2324 block != NULL; \ 2325 block = prev, prev = nir_block_cf_tree_prev(block)) 2326 2327 #define nir_foreach_block_in_cf_node(block, node) \ 2328 for (nir_block *block = nir_cf_node_cf_tree_first(node); \ 2329 block != nir_cf_node_cf_tree_next(node); \ 2330 block = nir_block_cf_tree_next(block)) 2331 2332 /* If the following CF node is an if, this function returns that if. 2333 * Otherwise, it returns NULL. 2334 */ 2335 nir_if *nir_block_get_following_if(nir_block *block); 2336 2337 nir_loop *nir_block_get_following_loop(nir_block *block); 2338 2339 void nir_index_local_regs(nir_function_impl *impl); 2340 void nir_index_global_regs(nir_shader *shader); 2341 void nir_index_ssa_defs(nir_function_impl *impl); 2342 unsigned nir_index_instrs(nir_function_impl *impl); 2343 2344 void nir_index_blocks(nir_function_impl *impl); 2345 2346 void nir_print_shader(nir_shader *shader, FILE *fp); 2347 void nir_print_shader_annotated(nir_shader *shader, FILE *fp, struct hash_table *errors); 2348 void nir_print_instr(const nir_instr *instr, FILE *fp); 2349 2350 nir_shader *nir_shader_clone(void *mem_ctx, const nir_shader *s); 2351 nir_function_impl *nir_function_impl_clone(const nir_function_impl *fi); 2352 nir_constant *nir_constant_clone(const nir_constant *c, nir_variable *var); 2353 nir_variable *nir_variable_clone(const nir_variable *c, nir_shader *shader); 2354 nir_deref *nir_deref_clone(const nir_deref *deref, void *mem_ctx); 2355 nir_deref_var *nir_deref_var_clone(const nir_deref_var *deref, void *mem_ctx); 2356 2357 nir_shader *nir_shader_serialize_deserialize(void *mem_ctx, nir_shader *s); 2358 2359 #ifndef NDEBUG 2360 void nir_validate_shader(nir_shader *shader); 2361 void nir_metadata_set_validation_flag(nir_shader *shader); 2362 void nir_metadata_check_validation_flag(nir_shader *shader); 2363 2364 static inline bool 2365 should_clone_nir(void) 2366 { 2367 static int should_clone = -1; 2368 if (should_clone < 0) 2369 should_clone = env_var_as_boolean("NIR_TEST_CLONE", false); 2370 2371 return should_clone; 2372 } 2373 2374 static inline bool 2375 should_serialize_deserialize_nir(void) 2376 { 2377 static int test_serialize = -1; 2378 if (test_serialize < 0) 2379 test_serialize = env_var_as_boolean("NIR_TEST_SERIALIZE", false); 2380 2381 return test_serialize; 2382 } 2383 2384 static inline bool 2385 should_print_nir(void) 2386 { 2387 static int should_print = -1; 2388 if (should_print < 0) 2389 should_print = env_var_as_boolean("NIR_PRINT", false); 2390 2391 return should_print; 2392 } 2393 #else 2394 static inline void nir_validate_shader(nir_shader *shader) { (void) shader; } 2395 static inline void nir_metadata_set_validation_flag(nir_shader *shader) { (void) shader; } 2396 static inline void nir_metadata_check_validation_flag(nir_shader *shader) { (void) shader; } 2397 static inline bool should_clone_nir(void) { return false; } 2398 static inline bool should_serialize_deserialize_nir(void) { return false; } 2399 static inline bool should_print_nir(void) { return false; } 2400 #endif /* NDEBUG */ 2401 2402 #define _PASS(nir, do_pass) do { \ 2403 do_pass \ 2404 nir_validate_shader(nir); \ 2405 if (should_clone_nir()) { \ 2406 nir_shader *clone = nir_shader_clone(ralloc_parent(nir), nir); \ 2407 ralloc_free(nir); \ 2408 nir = clone; \ 2409 } \ 2410 if (should_serialize_deserialize_nir()) { \ 2411 void *mem_ctx = ralloc_parent(nir); \ 2412 nir = nir_shader_serialize_deserialize(mem_ctx, nir); \ 2413 } \ 2414 } while (0) 2415 2416 #define NIR_PASS(progress, nir, pass, ...) _PASS(nir, \ 2417 nir_metadata_set_validation_flag(nir); \ 2418 if (should_print_nir()) \ 2419 printf("%s\n", #pass); \ 2420 if (pass(nir, ##__VA_ARGS__)) { \ 2421 progress = true; \ 2422 if (should_print_nir()) \ 2423 nir_print_shader(nir, stdout); \ 2424 nir_metadata_check_validation_flag(nir); \ 2425 } \ 2426 ) 2427 2428 #define NIR_PASS_V(nir, pass, ...) _PASS(nir, \ 2429 if (should_print_nir()) \ 2430 printf("%s\n", #pass); \ 2431 pass(nir, ##__VA_ARGS__); \ 2432 if (should_print_nir()) \ 2433 nir_print_shader(nir, stdout); \ 2434 ) 2435 2436 void nir_calc_dominance_impl(nir_function_impl *impl); 2437 void nir_calc_dominance(nir_shader *shader); 2438 2439 nir_block *nir_dominance_lca(nir_block *b1, nir_block *b2); 2440 bool nir_block_dominates(nir_block *parent, nir_block *child); 2441 2442 void nir_dump_dom_tree_impl(nir_function_impl *impl, FILE *fp); 2443 void nir_dump_dom_tree(nir_shader *shader, FILE *fp); 2444 2445 void nir_dump_dom_frontier_impl(nir_function_impl *impl, FILE *fp); 2446 void nir_dump_dom_frontier(nir_shader *shader, FILE *fp); 2447 2448 void nir_dump_cfg_impl(nir_function_impl *impl, FILE *fp); 2449 void nir_dump_cfg(nir_shader *shader, FILE *fp); 2450 2451 int nir_gs_count_vertices(const nir_shader *shader); 2452 2453 bool nir_split_var_copies(nir_shader *shader); 2454 2455 bool nir_lower_returns_impl(nir_function_impl *impl); 2456 bool nir_lower_returns(nir_shader *shader); 2457 2458 bool nir_inline_functions(nir_shader *shader); 2459 2460 bool nir_propagate_invariant(nir_shader *shader); 2461 2462 void nir_lower_var_copy_instr(nir_intrinsic_instr *copy, nir_shader *shader); 2463 bool nir_lower_var_copies(nir_shader *shader); 2464 2465 bool nir_lower_global_vars_to_local(nir_shader *shader); 2466 2467 bool nir_lower_indirect_derefs(nir_shader *shader, nir_variable_mode modes); 2468 2469 bool nir_lower_locals_to_regs(nir_shader *shader); 2470 2471 void nir_lower_io_to_temporaries(nir_shader *shader, 2472 nir_function_impl *entrypoint, 2473 bool outputs, bool inputs); 2474 2475 void nir_shader_gather_info(nir_shader *shader, nir_function_impl *entrypoint); 2476 2477 void nir_assign_var_locations(struct exec_list *var_list, unsigned *size, 2478 int (*type_size)(const struct glsl_type *)); 2479 2480 /* Some helpers to do very simple linking */ 2481 bool nir_remove_unused_varyings(nir_shader *producer, nir_shader *consumer); 2482 void nir_compact_varyings(nir_shader *producer, nir_shader *consumer, 2483 bool default_to_smooth_interp); 2484 2485 typedef enum { 2486 /* If set, this forces all non-flat fragment shader inputs to be 2487 * interpolated as if with the "sample" qualifier. This requires 2488 * nir_shader_compiler_options::use_interpolated_input_intrinsics. 2489 */ 2490 nir_lower_io_force_sample_interpolation = (1 << 1), 2491 } nir_lower_io_options; 2492 bool nir_lower_io(nir_shader *shader, 2493 nir_variable_mode modes, 2494 int (*type_size)(const struct glsl_type *), 2495 nir_lower_io_options); 2496 nir_src *nir_get_io_offset_src(nir_intrinsic_instr *instr); 2497 nir_src *nir_get_io_vertex_index_src(nir_intrinsic_instr *instr); 2498 2499 bool nir_is_per_vertex_io(const nir_variable *var, gl_shader_stage stage); 2500 2501 void nir_lower_io_types(nir_shader *shader); 2502 bool nir_lower_regs_to_ssa_impl(nir_function_impl *impl); 2503 bool nir_lower_regs_to_ssa(nir_shader *shader); 2504 bool nir_lower_vars_to_ssa(nir_shader *shader); 2505 2506 bool nir_remove_dead_variables(nir_shader *shader, nir_variable_mode modes); 2507 bool nir_lower_constant_initializers(nir_shader *shader, 2508 nir_variable_mode modes); 2509 2510 bool nir_move_vec_src_uses_to_dest(nir_shader *shader); 2511 bool nir_lower_vec_to_movs(nir_shader *shader); 2512 void nir_lower_alpha_test(nir_shader *shader, enum compare_func func, 2513 bool alpha_to_one); 2514 bool nir_lower_alu_to_scalar(nir_shader *shader); 2515 bool nir_lower_load_const_to_scalar(nir_shader *shader); 2516 bool nir_lower_read_invocation_to_scalar(nir_shader *shader); 2517 bool nir_lower_phis_to_scalar(nir_shader *shader); 2518 void nir_lower_io_arrays_to_elements(nir_shader *producer, nir_shader *consumer); 2519 void nir_lower_io_arrays_to_elements_no_indirects(nir_shader *shader); 2520 void nir_lower_io_to_scalar(nir_shader *shader, nir_variable_mode mask); 2521 void nir_lower_io_to_scalar_early(nir_shader *shader, nir_variable_mode mask); 2522 2523 bool nir_lower_samplers(nir_shader *shader, 2524 const struct gl_shader_program *shader_program); 2525 bool nir_lower_samplers_as_deref(nir_shader *shader, 2526 const struct gl_shader_program *shader_program); 2527 2528 typedef struct nir_lower_subgroups_options { 2529 uint8_t subgroup_size; 2530 uint8_t ballot_bit_size; 2531 bool lower_to_scalar:1; 2532 bool lower_vote_trivial:1; 2533 bool lower_subgroup_masks:1; 2534 } nir_lower_subgroups_options; 2535 2536 bool nir_lower_subgroups(nir_shader *shader, 2537 const nir_lower_subgroups_options *options); 2538 2539 bool nir_lower_system_values(nir_shader *shader); 2540 2541 typedef struct nir_lower_tex_options { 2542 /** 2543 * bitmask of (1 << GLSL_SAMPLER_DIM_x) to control for which 2544 * sampler types a texture projector is lowered. 2545 */ 2546 unsigned lower_txp; 2547 2548 /** 2549 * If true, lower away nir_tex_src_offset for all texelfetch instructions. 2550 */ 2551 bool lower_txf_offset; 2552 2553 /** 2554 * If true, lower away nir_tex_src_offset for all rect textures. 2555 */ 2556 bool lower_rect_offset; 2557 2558 /** 2559 * If true, lower rect textures to 2D, using txs to fetch the 2560 * texture dimensions and dividing the texture coords by the 2561 * texture dims to normalize. 2562 */ 2563 bool lower_rect; 2564 2565 /** 2566 * If true, convert yuv to rgb. 2567 */ 2568 unsigned lower_y_uv_external; 2569 unsigned lower_y_u_v_external; 2570 unsigned lower_yx_xuxv_external; 2571 unsigned lower_xy_uxvx_external; 2572 2573 /** 2574 * To emulate certain texture wrap modes, this can be used 2575 * to saturate the specified tex coord to [0.0, 1.0]. The 2576 * bits are according to sampler #, ie. if, for example: 2577 * 2578 * (conf->saturate_s & (1 << n)) 2579 * 2580 * is true, then the s coord for sampler n is saturated. 2581 * 2582 * Note that clamping must happen *after* projector lowering 2583 * so any projected texture sample instruction with a clamped 2584 * coordinate gets automatically lowered, regardless of the 2585 * 'lower_txp' setting. 2586 */ 2587 unsigned saturate_s; 2588 unsigned saturate_t; 2589 unsigned saturate_r; 2590 2591 /* Bitmask of textures that need swizzling. 2592 * 2593 * If (swizzle_result & (1 << texture_index)), then the swizzle in 2594 * swizzles[texture_index] is applied to the result of the texturing 2595 * operation. 2596 */ 2597 unsigned swizzle_result; 2598 2599 /* A swizzle for each texture. Values 0-3 represent x, y, z, or w swizzles 2600 * while 4 and 5 represent 0 and 1 respectively. 2601 */ 2602 uint8_t swizzles[32][4]; 2603 2604 /** 2605 * Bitmap of textures that need srgb to linear conversion. If 2606 * (lower_srgb & (1 << texture_index)) then the rgb (xyz) components 2607 * of the texture are lowered to linear. 2608 */ 2609 unsigned lower_srgb; 2610 2611 /** 2612 * If true, lower nir_texop_txd on cube maps with nir_texop_txl. 2613 */ 2614 bool lower_txd_cube_map; 2615 2616 /** 2617 * If true, lower nir_texop_txd on shadow samplers (except cube maps) 2618 * with nir_texop_txl. Notice that cube map shadow samplers are lowered 2619 * with lower_txd_cube_map. 2620 */ 2621 bool lower_txd_shadow; 2622 2623 /** 2624 * If true, lower nir_texop_txd on all samplers to a nir_texop_txl. 2625 * Implies lower_txd_cube_map and lower_txd_shadow. 2626 */ 2627 bool lower_txd; 2628 } nir_lower_tex_options; 2629 2630 bool nir_lower_tex(nir_shader *shader, 2631 const nir_lower_tex_options *options); 2632 2633 bool nir_lower_idiv(nir_shader *shader); 2634 2635 bool nir_lower_clip_vs(nir_shader *shader, unsigned ucp_enables); 2636 bool nir_lower_clip_fs(nir_shader *shader, unsigned ucp_enables); 2637 bool nir_lower_clip_cull_distance_arrays(nir_shader *nir); 2638 2639 void nir_lower_two_sided_color(nir_shader *shader); 2640 2641 bool nir_lower_clamp_color_outputs(nir_shader *shader); 2642 2643 void nir_lower_passthrough_edgeflags(nir_shader *shader); 2644 void nir_lower_tes_patch_vertices(nir_shader *tes, unsigned patch_vertices); 2645 2646 typedef struct nir_lower_wpos_ytransform_options { 2647 int state_tokens[5]; 2648 bool fs_coord_origin_upper_left :1; 2649 bool fs_coord_origin_lower_left :1; 2650 bool fs_coord_pixel_center_integer :1; 2651 bool fs_coord_pixel_center_half_integer :1; 2652 } nir_lower_wpos_ytransform_options; 2653 2654 bool nir_lower_wpos_ytransform(nir_shader *shader, 2655 const nir_lower_wpos_ytransform_options *options); 2656 bool nir_lower_wpos_center(nir_shader *shader, const bool for_sample_shading); 2657 2658 typedef struct nir_lower_drawpixels_options { 2659 int texcoord_state_tokens[5]; 2660 int scale_state_tokens[5]; 2661 int bias_state_tokens[5]; 2662 unsigned drawpix_sampler; 2663 unsigned pixelmap_sampler; 2664 bool pixel_maps :1; 2665 bool scale_and_bias :1; 2666 } nir_lower_drawpixels_options; 2667 2668 void nir_lower_drawpixels(nir_shader *shader, 2669 const nir_lower_drawpixels_options *options); 2670 2671 typedef struct nir_lower_bitmap_options { 2672 unsigned sampler; 2673 bool swizzle_xxxx; 2674 } nir_lower_bitmap_options; 2675 2676 void nir_lower_bitmap(nir_shader *shader, const nir_lower_bitmap_options *options); 2677 2678 bool nir_lower_atomics(nir_shader *shader, 2679 const struct gl_shader_program *shader_program); 2680 bool nir_lower_atomics_to_ssbo(nir_shader *shader, unsigned ssbo_offset); 2681 bool nir_lower_uniforms_to_ubo(nir_shader *shader); 2682 bool nir_lower_to_source_mods(nir_shader *shader); 2683 2684 bool nir_lower_gs_intrinsics(nir_shader *shader); 2685 2686 typedef enum { 2687 nir_lower_imul64 = (1 << 0), 2688 nir_lower_isign64 = (1 << 1), 2689 /** Lower all int64 modulus and division opcodes */ 2690 nir_lower_divmod64 = (1 << 2), 2691 } nir_lower_int64_options; 2692 2693 bool nir_lower_int64(nir_shader *shader, nir_lower_int64_options options); 2694 2695 typedef enum { 2696 nir_lower_drcp = (1 << 0), 2697 nir_lower_dsqrt = (1 << 1), 2698 nir_lower_drsq = (1 << 2), 2699 nir_lower_dtrunc = (1 << 3), 2700 nir_lower_dfloor = (1 << 4), 2701 nir_lower_dceil = (1 << 5), 2702 nir_lower_dfract = (1 << 6), 2703 nir_lower_dround_even = (1 << 7), 2704 nir_lower_dmod = (1 << 8) 2705 } nir_lower_doubles_options; 2706 2707 bool nir_lower_doubles(nir_shader *shader, nir_lower_doubles_options options); 2708 bool nir_lower_64bit_pack(nir_shader *shader); 2709 2710 bool nir_normalize_cubemap_coords(nir_shader *shader); 2711 2712 void nir_live_ssa_defs_impl(nir_function_impl *impl); 2713 2714 void nir_loop_analyze_impl(nir_function_impl *impl, 2715 nir_variable_mode indirect_mask); 2716 2717 bool nir_ssa_defs_interfere(nir_ssa_def *a, nir_ssa_def *b); 2718 2719 bool nir_repair_ssa_impl(nir_function_impl *impl); 2720 bool nir_repair_ssa(nir_shader *shader); 2721 2722 void nir_convert_loop_to_lcssa(nir_loop *loop); 2723 2724 /* If phi_webs_only is true, only convert SSA values involved in phi nodes to 2725 * registers. If false, convert all values (even those not involved in a phi 2726 * node) to registers. 2727 */ 2728 bool nir_convert_from_ssa(nir_shader *shader, bool phi_webs_only); 2729 2730 bool nir_lower_phis_to_regs_block(nir_block *block); 2731 bool nir_lower_ssa_defs_to_regs_block(nir_block *block); 2732 2733 bool nir_opt_algebraic(nir_shader *shader); 2734 bool nir_opt_algebraic_before_ffma(nir_shader *shader); 2735 bool nir_opt_algebraic_late(nir_shader *shader); 2736 bool nir_opt_constant_folding(nir_shader *shader); 2737 2738 bool nir_opt_global_to_local(nir_shader *shader); 2739 2740 bool nir_copy_prop(nir_shader *shader); 2741 2742 bool nir_opt_copy_prop_vars(nir_shader *shader); 2743 2744 bool nir_opt_cse(nir_shader *shader); 2745 2746 bool nir_opt_dce(nir_shader *shader); 2747 2748 bool nir_opt_dead_cf(nir_shader *shader); 2749 2750 bool nir_opt_gcm(nir_shader *shader, bool value_number); 2751 2752 bool nir_opt_if(nir_shader *shader); 2753 2754 bool nir_opt_intrinsics(nir_shader *shader); 2755 2756 bool nir_opt_loop_unroll(nir_shader *shader, nir_variable_mode indirect_mask); 2757 2758 bool nir_opt_move_comparisons(nir_shader *shader); 2759 2760 bool nir_opt_peephole_select(nir_shader *shader, unsigned limit); 2761 2762 bool nir_opt_remove_phis(nir_shader *shader); 2763 2764 bool nir_opt_trivial_continues(nir_shader *shader); 2765 2766 bool nir_opt_undef(nir_shader *shader); 2767 2768 bool nir_opt_conditional_discard(nir_shader *shader); 2769 2770 void nir_sweep(nir_shader *shader); 2771 2772 nir_intrinsic_op nir_intrinsic_from_system_value(gl_system_value val); 2773 gl_system_value nir_system_value_from_intrinsic(nir_intrinsic_op intrin); 2774 2775 #ifdef __cplusplus 2776 } /* extern "C" */ 2777 #endif 2778 2779 #endif /* NIR_H */ 2780