1 /* 2 * Copyright 2016 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include "anv_nir.h" 25 #include "nir/nir_builder.h" 26 27 /** 28 * This file implements the lowering required for VK_KHR_multiview. We 29 * implement multiview using instanced rendering. The number of instances in 30 * each draw call is multiplied by the number of views in the subpass. Then, 31 * in the shader, we divide gl_InstanceId by the number of views and use 32 * gl_InstanceId % view_count to compute the actual ViewIndex. 33 */ 34 35 struct lower_multiview_state { 36 nir_builder builder; 37 38 uint32_t view_mask; 39 40 nir_ssa_def *instance_id; 41 nir_ssa_def *view_index; 42 }; 43 44 static nir_ssa_def * 45 build_instance_id(struct lower_multiview_state *state) 46 { 47 assert(state->builder.shader->info.stage == MESA_SHADER_VERTEX); 48 49 if (state->instance_id == NULL) { 50 nir_builder *b = &state->builder; 51 52 b->cursor = nir_before_block(nir_start_block(b->impl)); 53 54 /* We use instancing for implementing multiview. The actual instance id 55 * is given by dividing instance_id by the number of views in this 56 * subpass. 57 */ 58 state->instance_id = 59 nir_idiv(b, nir_load_instance_id(b), 60 nir_imm_int(b, _mesa_bitcount(state->view_mask))); 61 } 62 63 return state->instance_id; 64 } 65 66 static nir_ssa_def * 67 build_view_index(struct lower_multiview_state *state) 68 { 69 if (state->view_index == NULL) { 70 nir_builder *b = &state->builder; 71 72 b->cursor = nir_before_block(nir_start_block(b->impl)); 73 74 assert(state->view_mask != 0); 75 if (0 && _mesa_bitcount(state->view_mask) == 1) { 76 state->view_index = nir_imm_int(b, ffs(state->view_mask) - 1); 77 } else if (state->builder.shader->info.stage == MESA_SHADER_VERTEX) { 78 /* We only support 16 viewports */ 79 assert((state->view_mask & 0xffff0000) == 0); 80 81 /* We use instancing for implementing multiview. The compacted view 82 * id is given by instance_id % view_count. We then have to convert 83 * that to an actual view id. 84 */ 85 nir_ssa_def *compacted = 86 nir_umod(b, nir_load_instance_id(b), 87 nir_imm_int(b, _mesa_bitcount(state->view_mask))); 88 89 if (0 && util_is_power_of_two(state->view_mask + 1)) { 90 /* If we have a full view mask, then compacted is what we want */ 91 state->view_index = compacted; 92 } else { 93 /* Now we define a map from compacted view index to the actual 94 * view index that's based on the view_mask. The map is given by 95 * 16 nibbles, each of which is a value from 0 to 15. 96 */ 97 uint64_t remap = 0; 98 uint32_t bit, i = 0; 99 for_each_bit(bit, state->view_mask) { 100 assert(bit < 16); 101 remap |= (uint64_t)bit << (i++ * 4); 102 } 103 104 nir_ssa_def *shift = nir_imul(b, compacted, nir_imm_int(b, 4)); 105 106 /* One of these days, when we have int64 everywhere, this will be 107 * easier. 108 */ 109 nir_ssa_def *shifted; 110 if (remap <= UINT32_MAX) { 111 shifted = nir_ushr(b, nir_imm_int(b, remap), shift); 112 } else { 113 nir_ssa_def *shifted_low = 114 nir_ushr(b, nir_imm_int(b, remap), shift); 115 nir_ssa_def *shifted_high = 116 nir_ushr(b, nir_imm_int(b, remap >> 32), 117 nir_isub(b, shift, nir_imm_int(b, 32))); 118 shifted = nir_bcsel(b, nir_ilt(b, shift, nir_imm_int(b, 32)), 119 shifted_low, shifted_high); 120 } 121 state->view_index = nir_iand(b, shifted, nir_imm_int(b, 0xf)); 122 } 123 } else { 124 const struct glsl_type *type = glsl_int_type(); 125 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL || 126 b->shader->info.stage == MESA_SHADER_GEOMETRY) 127 type = glsl_array_type(type, 1); 128 129 nir_variable *idx_var = 130 nir_variable_create(b->shader, nir_var_shader_in, 131 type, "view index"); 132 idx_var->data.location = VARYING_SLOT_VIEW_INDEX; 133 if (b->shader->info.stage == MESA_SHADER_FRAGMENT) 134 idx_var->data.interpolation = INTERP_MODE_FLAT; 135 136 if (glsl_type_is_array(type)) { 137 nir_deref_var *deref = nir_deref_var_create(b->shader, idx_var); 138 nir_deref_array *arr = nir_deref_array_create(b->shader); 139 arr->deref.type = glsl_int_type(); 140 arr->deref_array_type = nir_deref_array_type_direct; 141 arr->base_offset = 0; 142 deref->deref.child = &arr->deref; 143 144 state->view_index = nir_load_deref_var(b, deref); 145 } else { 146 state->view_index = nir_load_var(b, idx_var); 147 } 148 } 149 } 150 151 return state->view_index; 152 } 153 154 bool 155 anv_nir_lower_multiview(nir_shader *shader, uint32_t view_mask) 156 { 157 assert(shader->info.stage != MESA_SHADER_COMPUTE); 158 159 /* If multiview isn't enabled, we have nothing to do. */ 160 if (view_mask == 0) 161 return false; 162 163 struct lower_multiview_state state = { 164 .view_mask = view_mask, 165 }; 166 167 /* This pass assumes a single entrypoint */ 168 nir_function_impl *entrypoint = nir_shader_get_entrypoint(shader); 169 170 nir_builder_init(&state.builder, entrypoint); 171 172 bool progress = false; 173 nir_foreach_block(block, entrypoint) { 174 nir_foreach_instr_safe(instr, block) { 175 if (instr->type != nir_instr_type_intrinsic) 176 continue; 177 178 nir_intrinsic_instr *load = nir_instr_as_intrinsic(instr); 179 180 if (load->intrinsic != nir_intrinsic_load_instance_id && 181 load->intrinsic != nir_intrinsic_load_view_index) 182 continue; 183 184 assert(load->dest.is_ssa); 185 186 nir_ssa_def *value; 187 if (load->intrinsic == nir_intrinsic_load_instance_id) { 188 value = build_instance_id(&state); 189 } else { 190 assert(load->intrinsic == nir_intrinsic_load_view_index); 191 value = build_view_index(&state); 192 } 193 194 nir_ssa_def_rewrite_uses(&load->dest.ssa, nir_src_for_ssa(value)); 195 196 nir_instr_remove(&load->instr); 197 progress = true; 198 } 199 } 200 201 /* The view index is available in all stages but the instance id is only 202 * available in the VS. If it's not a fragment shader, we need to pass 203 * the view index on to the next stage. 204 */ 205 if (shader->info.stage != MESA_SHADER_FRAGMENT) { 206 nir_ssa_def *view_index = build_view_index(&state); 207 208 nir_builder *b = &state.builder; 209 210 assert(view_index->parent_instr->block == nir_start_block(entrypoint)); 211 b->cursor = nir_after_instr(view_index->parent_instr); 212 213 nir_variable *view_index_out = 214 nir_variable_create(shader, nir_var_shader_out, 215 glsl_int_type(), "view index"); 216 view_index_out->data.location = VARYING_SLOT_VIEW_INDEX; 217 nir_store_var(b, view_index_out, view_index, 0x1); 218 219 nir_variable *layer_id_out = 220 nir_variable_create(shader, nir_var_shader_out, 221 glsl_int_type(), "layer ID"); 222 layer_id_out->data.location = VARYING_SLOT_LAYER; 223 nir_store_var(b, layer_id_out, view_index, 0x1); 224 225 progress = true; 226 } 227 228 if (progress) { 229 nir_metadata_preserve(entrypoint, nir_metadata_block_index | 230 nir_metadata_dominance); 231 } 232 233 return progress; 234 } 235