1 /* 2 * Copyright 2012 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Christian Knig <christian.koenig (at) amd.com> 25 */ 26 27 #include "si_pipe.h" 28 #include "radeon/r600_cs.h" 29 #include "sid.h" 30 31 #include "util/u_index_modify.h" 32 #include "util/u_upload_mgr.h" 33 #include "util/u_prim.h" 34 35 #include "ac_debug.h" 36 37 static unsigned si_conv_pipe_prim(unsigned mode) 38 { 39 static const unsigned prim_conv[] = { 40 [PIPE_PRIM_POINTS] = V_008958_DI_PT_POINTLIST, 41 [PIPE_PRIM_LINES] = V_008958_DI_PT_LINELIST, 42 [PIPE_PRIM_LINE_LOOP] = V_008958_DI_PT_LINELOOP, 43 [PIPE_PRIM_LINE_STRIP] = V_008958_DI_PT_LINESTRIP, 44 [PIPE_PRIM_TRIANGLES] = V_008958_DI_PT_TRILIST, 45 [PIPE_PRIM_TRIANGLE_STRIP] = V_008958_DI_PT_TRISTRIP, 46 [PIPE_PRIM_TRIANGLE_FAN] = V_008958_DI_PT_TRIFAN, 47 [PIPE_PRIM_QUADS] = V_008958_DI_PT_QUADLIST, 48 [PIPE_PRIM_QUAD_STRIP] = V_008958_DI_PT_QUADSTRIP, 49 [PIPE_PRIM_POLYGON] = V_008958_DI_PT_POLYGON, 50 [PIPE_PRIM_LINES_ADJACENCY] = V_008958_DI_PT_LINELIST_ADJ, 51 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_008958_DI_PT_LINESTRIP_ADJ, 52 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_008958_DI_PT_TRILIST_ADJ, 53 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_008958_DI_PT_TRISTRIP_ADJ, 54 [PIPE_PRIM_PATCHES] = V_008958_DI_PT_PATCH, 55 [R600_PRIM_RECTANGLE_LIST] = V_008958_DI_PT_RECTLIST 56 }; 57 assert(mode < ARRAY_SIZE(prim_conv)); 58 return prim_conv[mode]; 59 } 60 61 static unsigned si_conv_prim_to_gs_out(unsigned mode) 62 { 63 static const int prim_conv[] = { 64 [PIPE_PRIM_POINTS] = V_028A6C_OUTPRIM_TYPE_POINTLIST, 65 [PIPE_PRIM_LINES] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 66 [PIPE_PRIM_LINE_LOOP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 67 [PIPE_PRIM_LINE_STRIP] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 68 [PIPE_PRIM_TRIANGLES] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 69 [PIPE_PRIM_TRIANGLE_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 70 [PIPE_PRIM_TRIANGLE_FAN] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 71 [PIPE_PRIM_QUADS] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 72 [PIPE_PRIM_QUAD_STRIP] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 73 [PIPE_PRIM_POLYGON] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 74 [PIPE_PRIM_LINES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 75 [PIPE_PRIM_LINE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_LINESTRIP, 76 [PIPE_PRIM_TRIANGLES_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 77 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = V_028A6C_OUTPRIM_TYPE_TRISTRIP, 78 [PIPE_PRIM_PATCHES] = V_028A6C_OUTPRIM_TYPE_POINTLIST, 79 [R600_PRIM_RECTANGLE_LIST] = V_028A6C_OUTPRIM_TYPE_TRISTRIP 80 }; 81 assert(mode < ARRAY_SIZE(prim_conv)); 82 83 return prim_conv[mode]; 84 } 85 86 /** 87 * This calculates the LDS size for tessellation shaders (VS, TCS, TES). 88 * LS.LDS_SIZE is shared by all 3 shader stages. 89 * 90 * The information about LDS and other non-compile-time parameters is then 91 * written to userdata SGPRs. 92 */ 93 static void si_emit_derived_tess_state(struct si_context *sctx, 94 const struct pipe_draw_info *info, 95 unsigned *num_patches) 96 { 97 struct radeon_winsys_cs *cs = sctx->b.gfx.cs; 98 struct si_shader_ctx_state *ls = &sctx->vs_shader; 99 /* The TES pointer will only be used for sctx->last_tcs. 100 * It would be wrong to think that TCS = TES. */ 101 struct si_shader_selector *tcs = 102 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso; 103 unsigned tes_sh_base = sctx->shader_userdata.sh_base[PIPE_SHADER_TESS_EVAL]; 104 unsigned num_tcs_input_cp = info->vertices_per_patch; 105 unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs; 106 unsigned num_tcs_patch_outputs; 107 unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size; 108 unsigned input_patch_size, output_patch_size, output_patch0_offset; 109 unsigned perpatch_output_offset, lds_size, ls_rsrc2; 110 unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets; 111 unsigned offchip_layout, hardware_lds_size, ls_hs_config; 112 113 if (sctx->last_ls == ls->current && 114 sctx->last_tcs == tcs && 115 sctx->last_tes_sh_base == tes_sh_base && 116 sctx->last_num_tcs_input_cp == num_tcs_input_cp) { 117 *num_patches = sctx->last_num_patches; 118 return; 119 } 120 121 sctx->last_ls = ls->current; 122 sctx->last_tcs = tcs; 123 sctx->last_tes_sh_base = tes_sh_base; 124 sctx->last_num_tcs_input_cp = num_tcs_input_cp; 125 126 /* This calculates how shader inputs and outputs among VS, TCS, and TES 127 * are laid out in LDS. */ 128 num_tcs_inputs = util_last_bit64(ls->cso->outputs_written); 129 130 if (sctx->tcs_shader.cso) { 131 num_tcs_outputs = util_last_bit64(tcs->outputs_written); 132 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT]; 133 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written); 134 } else { 135 /* No TCS. Route varyings from LS to TES. */ 136 num_tcs_outputs = num_tcs_inputs; 137 num_tcs_output_cp = num_tcs_input_cp; 138 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */ 139 } 140 141 input_vertex_size = num_tcs_inputs * 16; 142 output_vertex_size = num_tcs_outputs * 16; 143 144 input_patch_size = num_tcs_input_cp * input_vertex_size; 145 146 pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size; 147 output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16; 148 149 /* Ensure that we only need one wave per SIMD so we don't need to check 150 * resource usage. Also ensures that the number of tcs in and out 151 * vertices per threadgroup are at most 256. 152 */ 153 *num_patches = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp) * 4; 154 155 /* Make sure that the data fits in LDS. This assumes the shaders only 156 * use LDS for the inputs and outputs. 157 */ 158 hardware_lds_size = sctx->b.chip_class >= CIK ? 65536 : 32768; 159 *num_patches = MIN2(*num_patches, hardware_lds_size / (input_patch_size + 160 output_patch_size)); 161 162 /* Make sure the output data fits in the offchip buffer */ 163 *num_patches = MIN2(*num_patches, 164 (sctx->screen->tess_offchip_block_dw_size * 4) / 165 output_patch_size); 166 167 /* Not necessary for correctness, but improves performance. The 168 * specific value is taken from the proprietary driver. 169 */ 170 *num_patches = MIN2(*num_patches, 40); 171 172 /* SI bug workaround - limit LS-HS threadgroups to only one wave. */ 173 if (sctx->b.chip_class == SI) { 174 unsigned one_wave = 64 / MAX2(num_tcs_input_cp, num_tcs_output_cp); 175 *num_patches = MIN2(*num_patches, one_wave); 176 } 177 178 sctx->last_num_patches = *num_patches; 179 180 output_patch0_offset = input_patch_size * *num_patches; 181 perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size; 182 183 lds_size = output_patch0_offset + output_patch_size * *num_patches; 184 ls_rsrc2 = ls->current->config.rsrc2; 185 186 if (sctx->b.chip_class >= CIK) { 187 assert(lds_size <= 65536); 188 lds_size = align(lds_size, 512) / 512; 189 } else { 190 assert(lds_size <= 32768); 191 lds_size = align(lds_size, 256) / 256; 192 } 193 si_multiwave_lds_size_workaround(sctx->screen, &lds_size); 194 ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size); 195 196 /* Due to a hw bug, RSRC2_LS must be written twice with another 197 * LS register written in between. */ 198 if (sctx->b.chip_class == CIK && sctx->b.family != CHIP_HAWAII) 199 radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2); 200 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2); 201 radeon_emit(cs, ls->current->config.rsrc1); 202 radeon_emit(cs, ls_rsrc2); 203 204 /* Compute userdata SGPRs. */ 205 assert(((input_vertex_size / 4) & ~0xff) == 0); 206 assert(((output_vertex_size / 4) & ~0xff) == 0); 207 assert(((input_patch_size / 4) & ~0x1fff) == 0); 208 assert(((output_patch_size / 4) & ~0x1fff) == 0); 209 assert(((output_patch0_offset / 16) & ~0xffff) == 0); 210 assert(((perpatch_output_offset / 16) & ~0xffff) == 0); 211 assert(num_tcs_input_cp <= 32); 212 assert(num_tcs_output_cp <= 32); 213 214 tcs_in_layout = (input_patch_size / 4) | 215 ((input_vertex_size / 4) << 13); 216 tcs_out_layout = (output_patch_size / 4) | 217 ((output_vertex_size / 4) << 13); 218 tcs_out_offsets = (output_patch0_offset / 16) | 219 ((perpatch_output_offset / 16) << 16); 220 offchip_layout = (pervertex_output_patch_size * *num_patches << 16) | 221 (num_tcs_output_cp << 9) | *num_patches; 222 223 /* Set them for LS. */ 224 radeon_set_sh_reg(cs, 225 R_00B530_SPI_SHADER_USER_DATA_LS_0 + SI_SGPR_LS_OUT_LAYOUT * 4, 226 tcs_in_layout); 227 228 /* Set them for TCS. */ 229 radeon_set_sh_reg_seq(cs, 230 R_00B430_SPI_SHADER_USER_DATA_HS_0 + SI_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4); 231 radeon_emit(cs, offchip_layout); 232 radeon_emit(cs, tcs_out_offsets); 233 radeon_emit(cs, tcs_out_layout | (num_tcs_input_cp << 26)); 234 radeon_emit(cs, tcs_in_layout); 235 236 /* Set them for TES. */ 237 radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TCS_OFFCHIP_LAYOUT * 4, 1); 238 radeon_emit(cs, offchip_layout); 239 240 ls_hs_config = S_028B58_NUM_PATCHES(*num_patches) | 241 S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) | 242 S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp); 243 244 if (sctx->b.chip_class >= CIK) 245 radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2, 246 ls_hs_config); 247 else 248 radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG, 249 ls_hs_config); 250 } 251 252 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info) 253 { 254 switch (info->mode) { 255 case PIPE_PRIM_PATCHES: 256 return info->count / info->vertices_per_patch; 257 case R600_PRIM_RECTANGLE_LIST: 258 return info->count / 3; 259 default: 260 return u_prims_for_vertices(info->mode, info->count); 261 } 262 } 263 264 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx, 265 const struct pipe_draw_info *info, 266 unsigned num_patches) 267 { 268 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; 269 unsigned prim = info->mode; 270 unsigned primgroup_size = 128; /* recommended without a GS */ 271 unsigned max_primgroup_in_wave = 2; 272 273 /* SWITCH_ON_EOP(0) is always preferable. */ 274 bool wd_switch_on_eop = false; 275 bool ia_switch_on_eop = false; 276 bool ia_switch_on_eoi = false; 277 bool partial_vs_wave = false; 278 bool partial_es_wave = false; 279 280 if (sctx->gs_shader.cso) 281 primgroup_size = 64; /* recommended with a GS */ 282 283 if (sctx->tes_shader.cso) { 284 /* primgroup_size must be set to a multiple of NUM_PATCHES */ 285 primgroup_size = num_patches; 286 287 /* SWITCH_ON_EOI must be set if PrimID is used. */ 288 if ((sctx->tcs_shader.cso && sctx->tcs_shader.cso->info.uses_primid) || 289 sctx->tes_shader.cso->info.uses_primid) 290 ia_switch_on_eoi = true; 291 292 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */ 293 if ((sctx->b.family == CHIP_TAHITI || 294 sctx->b.family == CHIP_PITCAIRN || 295 sctx->b.family == CHIP_BONAIRE) && 296 sctx->gs_shader.cso) 297 partial_vs_wave = true; 298 299 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */ 300 if (sctx->screen->has_distributed_tess) { 301 if (sctx->gs_shader.cso) { 302 partial_es_wave = true; 303 304 /* GPU hang workaround. */ 305 if (sctx->b.family == CHIP_TONGA || 306 sctx->b.family == CHIP_FIJI || 307 sctx->b.family == CHIP_POLARIS10 || 308 sctx->b.family == CHIP_POLARIS11) 309 partial_vs_wave = true; 310 } else { 311 partial_vs_wave = true; 312 } 313 } 314 } 315 316 /* This is a hardware requirement. */ 317 if ((rs && rs->line_stipple_enable) || 318 (sctx->b.screen->debug_flags & DBG_SWITCH_ON_EOP)) { 319 ia_switch_on_eop = true; 320 wd_switch_on_eop = true; 321 } 322 323 if (sctx->b.chip_class >= CIK) { 324 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than 325 * 4 shader engines. Set 1 to pass the assertion below. 326 * The other cases are hardware requirements. 327 * 328 * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0 329 * for points, line strips, and tri strips. 330 */ 331 if (sctx->b.screen->info.max_se < 4 || 332 prim == PIPE_PRIM_POLYGON || 333 prim == PIPE_PRIM_LINE_LOOP || 334 prim == PIPE_PRIM_TRIANGLE_FAN || 335 prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY || 336 (info->primitive_restart && 337 (sctx->b.family < CHIP_POLARIS10 || 338 (prim != PIPE_PRIM_POINTS && 339 prim != PIPE_PRIM_LINE_STRIP && 340 prim != PIPE_PRIM_TRIANGLE_STRIP))) || 341 info->count_from_stream_output) 342 wd_switch_on_eop = true; 343 344 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0. 345 * We don't know that for indirect drawing, so treat it as 346 * always problematic. */ 347 if (sctx->b.family == CHIP_HAWAII && 348 (info->indirect || info->instance_count > 1)) 349 wd_switch_on_eop = true; 350 351 /* Performance recommendation for 4 SE Gfx7-8 parts if 352 * instances are smaller than a primgroup. 353 * Assume indirect draws always use small instances. 354 * This is needed for good VS wave utilization. 355 */ 356 if (sctx->b.chip_class <= VI && 357 sctx->b.screen->info.max_se >= 4 && 358 (info->indirect || 359 (info->instance_count > 1 && 360 si_num_prims_for_vertices(info) < primgroup_size))) 361 wd_switch_on_eop = true; 362 363 /* Required on CIK and later. */ 364 if (sctx->b.screen->info.max_se > 2 && !wd_switch_on_eop) 365 ia_switch_on_eoi = true; 366 367 /* Required by Hawaii and, for some special cases, by VI. */ 368 if (ia_switch_on_eoi && 369 (sctx->b.family == CHIP_HAWAII || 370 (sctx->b.chip_class == VI && 371 (sctx->gs_shader.cso || max_primgroup_in_wave != 2)))) 372 partial_vs_wave = true; 373 374 /* Instancing bug on Bonaire. */ 375 if (sctx->b.family == CHIP_BONAIRE && ia_switch_on_eoi && 376 (info->indirect || info->instance_count > 1)) 377 partial_vs_wave = true; 378 379 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI. 380 * The hw doc says all multi-SE chips are affected, but Vulkan 381 * only applies it to Hawaii. Do what Vulkan does. 382 */ 383 if (sctx->b.family == CHIP_HAWAII && 384 sctx->gs_shader.cso && 385 ia_switch_on_eoi && 386 (info->indirect || 387 (info->instance_count > 1 && 388 si_num_prims_for_vertices(info) <= 1))) 389 sctx->b.flags |= SI_CONTEXT_VGT_FLUSH; 390 391 392 /* If the WD switch is false, the IA switch must be false too. */ 393 assert(wd_switch_on_eop || !ia_switch_on_eop); 394 } 395 396 /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */ 397 if (ia_switch_on_eoi) 398 partial_es_wave = true; 399 400 /* GS requirement. */ 401 if (SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3) 402 partial_es_wave = true; 403 404 return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | 405 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) | 406 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) | 407 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) | 408 S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1) | 409 S_028AA8_WD_SWITCH_ON_EOP(sctx->b.chip_class >= CIK ? wd_switch_on_eop : 0) | 410 S_028AA8_MAX_PRIMGRP_IN_WAVE(sctx->b.chip_class >= VI ? 411 max_primgroup_in_wave : 0); 412 } 413 414 static void si_emit_scratch_reloc(struct si_context *sctx) 415 { 416 struct radeon_winsys_cs *cs = sctx->b.gfx.cs; 417 418 if (!sctx->emit_scratch_reloc) 419 return; 420 421 radeon_set_context_reg(cs, R_0286E8_SPI_TMPRING_SIZE, 422 sctx->spi_tmpring_size); 423 424 if (sctx->scratch_buffer) { 425 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, 426 sctx->scratch_buffer, RADEON_USAGE_READWRITE, 427 RADEON_PRIO_SCRATCH_BUFFER); 428 429 } 430 sctx->emit_scratch_reloc = false; 431 } 432 433 /* rast_prim is the primitive type after GS. */ 434 static void si_emit_rasterizer_prim_state(struct si_context *sctx) 435 { 436 struct radeon_winsys_cs *cs = sctx->b.gfx.cs; 437 unsigned rast_prim = sctx->current_rast_prim; 438 struct si_state_rasterizer *rs = sctx->emitted.named.rasterizer; 439 440 /* Skip this if not rendering lines. */ 441 if (rast_prim != PIPE_PRIM_LINES && 442 rast_prim != PIPE_PRIM_LINE_LOOP && 443 rast_prim != PIPE_PRIM_LINE_STRIP && 444 rast_prim != PIPE_PRIM_LINES_ADJACENCY && 445 rast_prim != PIPE_PRIM_LINE_STRIP_ADJACENCY) 446 return; 447 448 if (rast_prim == sctx->last_rast_prim && 449 rs->pa_sc_line_stipple == sctx->last_sc_line_stipple) 450 return; 451 452 /* For lines, reset the stipple pattern at each primitive. Otherwise, 453 * reset the stipple pattern at each packet (line strips, line loops). 454 */ 455 radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE, 456 rs->pa_sc_line_stipple | 457 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2)); 458 459 sctx->last_rast_prim = rast_prim; 460 sctx->last_sc_line_stipple = rs->pa_sc_line_stipple; 461 } 462 463 static void si_emit_draw_registers(struct si_context *sctx, 464 const struct pipe_draw_info *info) 465 { 466 struct radeon_winsys_cs *cs = sctx->b.gfx.cs; 467 unsigned prim = si_conv_pipe_prim(info->mode); 468 unsigned gs_out_prim = si_conv_prim_to_gs_out(sctx->current_rast_prim); 469 unsigned ia_multi_vgt_param, num_patches = 0; 470 471 /* Polaris needs different VTX_REUSE_DEPTH settings depending on 472 * whether the "fractional odd" tessellation spacing is used. 473 */ 474 if (sctx->b.family >= CHIP_POLARIS10) { 475 struct si_shader_selector *tes = sctx->tes_shader.cso; 476 unsigned vtx_reuse_depth = 30; 477 478 if (tes && 479 tes->info.properties[TGSI_PROPERTY_TES_SPACING] == 480 PIPE_TESS_SPACING_FRACTIONAL_ODD) 481 vtx_reuse_depth = 14; 482 483 if (vtx_reuse_depth != sctx->last_vtx_reuse_depth) { 484 radeon_set_context_reg(cs, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 485 vtx_reuse_depth); 486 sctx->last_vtx_reuse_depth = vtx_reuse_depth; 487 } 488 } 489 490 if (sctx->tes_shader.cso) 491 si_emit_derived_tess_state(sctx, info, &num_patches); 492 493 ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches); 494 495 /* Draw state. */ 496 if (ia_multi_vgt_param != sctx->last_multi_vgt_param) { 497 if (sctx->b.chip_class >= CIK) 498 radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); 499 else 500 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param); 501 502 sctx->last_multi_vgt_param = ia_multi_vgt_param; 503 } 504 if (prim != sctx->last_prim) { 505 if (sctx->b.chip_class >= CIK) 506 radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim); 507 else 508 radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim); 509 510 sctx->last_prim = prim; 511 } 512 513 if (gs_out_prim != sctx->last_gs_out_prim) { 514 radeon_set_context_reg(cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, gs_out_prim); 515 sctx->last_gs_out_prim = gs_out_prim; 516 } 517 518 /* Primitive restart. */ 519 if (info->primitive_restart != sctx->last_primitive_restart_en) { 520 radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, info->primitive_restart); 521 sctx->last_primitive_restart_en = info->primitive_restart; 522 523 } 524 if (info->primitive_restart && 525 (info->restart_index != sctx->last_restart_index || 526 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN)) { 527 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 528 info->restart_index); 529 sctx->last_restart_index = info->restart_index; 530 } 531 } 532 533 static void si_emit_draw_packets(struct si_context *sctx, 534 const struct pipe_draw_info *info, 535 const struct pipe_index_buffer *ib) 536 { 537 struct radeon_winsys_cs *cs = sctx->b.gfx.cs; 538 unsigned sh_base_reg = sctx->shader_userdata.sh_base[PIPE_SHADER_VERTEX]; 539 bool render_cond_bit = sctx->b.render_cond && !sctx->b.render_cond_force_off; 540 uint32_t index_max_size = 0; 541 uint64_t index_va = 0; 542 543 if (info->count_from_stream_output) { 544 struct r600_so_target *t = 545 (struct r600_so_target*)info->count_from_stream_output; 546 uint64_t va = t->buf_filled_size->gpu_address + 547 t->buf_filled_size_offset; 548 549 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE, 550 t->stride_in_dw); 551 552 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0)); 553 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 554 COPY_DATA_DST_SEL(COPY_DATA_REG) | 555 COPY_DATA_WR_CONFIRM); 556 radeon_emit(cs, va); /* src address lo */ 557 radeon_emit(cs, va >> 32); /* src address hi */ 558 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2); 559 radeon_emit(cs, 0); /* unused */ 560 561 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, 562 t->buf_filled_size, RADEON_USAGE_READ, 563 RADEON_PRIO_SO_FILLED_SIZE); 564 } 565 566 /* draw packet */ 567 if (info->indexed) { 568 if (ib->index_size != sctx->last_index_size) { 569 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 570 571 /* index type */ 572 switch (ib->index_size) { 573 case 1: 574 radeon_emit(cs, V_028A7C_VGT_INDEX_8); 575 break; 576 case 2: 577 radeon_emit(cs, V_028A7C_VGT_INDEX_16 | 578 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ? 579 V_028A7C_VGT_DMA_SWAP_16_BIT : 0)); 580 break; 581 case 4: 582 radeon_emit(cs, V_028A7C_VGT_INDEX_32 | 583 (SI_BIG_ENDIAN && sctx->b.chip_class <= CIK ? 584 V_028A7C_VGT_DMA_SWAP_32_BIT : 0)); 585 break; 586 default: 587 assert(!"unreachable"); 588 return; 589 } 590 591 sctx->last_index_size = ib->index_size; 592 } 593 594 index_max_size = (ib->buffer->width0 - ib->offset) / 595 ib->index_size; 596 index_va = r600_resource(ib->buffer)->gpu_address + ib->offset; 597 598 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, 599 (struct r600_resource *)ib->buffer, 600 RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER); 601 } else { 602 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE, 603 * so the state must be re-emitted before the next indexed draw. 604 */ 605 if (sctx->b.chip_class >= CIK) 606 sctx->last_index_size = -1; 607 } 608 609 if (!info->indirect) { 610 int base_vertex; 611 612 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 613 radeon_emit(cs, info->instance_count); 614 615 /* Base vertex and start instance. */ 616 base_vertex = info->indexed ? info->index_bias : info->start; 617 618 if (base_vertex != sctx->last_base_vertex || 619 sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN || 620 info->start_instance != sctx->last_start_instance || 621 info->drawid != sctx->last_drawid || 622 sh_base_reg != sctx->last_sh_base_reg) { 623 radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3); 624 radeon_emit(cs, base_vertex); 625 radeon_emit(cs, info->start_instance); 626 radeon_emit(cs, info->drawid); 627 628 sctx->last_base_vertex = base_vertex; 629 sctx->last_start_instance = info->start_instance; 630 sctx->last_drawid = info->drawid; 631 sctx->last_sh_base_reg = sh_base_reg; 632 } 633 } else { 634 uint64_t indirect_va = r600_resource(info->indirect)->gpu_address; 635 636 assert(indirect_va % 8 == 0); 637 638 si_invalidate_draw_sh_constants(sctx); 639 640 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0)); 641 radeon_emit(cs, 1); 642 radeon_emit(cs, indirect_va); 643 radeon_emit(cs, indirect_va >> 32); 644 645 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, 646 (struct r600_resource *)info->indirect, 647 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT); 648 } 649 650 if (info->indirect) { 651 unsigned di_src_sel = info->indexed ? V_0287F0_DI_SRC_SEL_DMA 652 : V_0287F0_DI_SRC_SEL_AUTO_INDEX; 653 654 assert(info->indirect_offset % 4 == 0); 655 656 if (info->indexed) { 657 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0)); 658 radeon_emit(cs, index_va); 659 radeon_emit(cs, index_va >> 32); 660 661 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0)); 662 radeon_emit(cs, index_max_size); 663 } 664 665 if (!sctx->screen->has_draw_indirect_multi) { 666 radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT 667 : PKT3_DRAW_INDIRECT, 668 3, render_cond_bit)); 669 radeon_emit(cs, info->indirect_offset); 670 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2); 671 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2); 672 radeon_emit(cs, di_src_sel); 673 } else { 674 uint64_t count_va = 0; 675 676 if (info->indirect_params) { 677 struct r600_resource *params_buf = 678 (struct r600_resource *)info->indirect_params; 679 680 radeon_add_to_buffer_list( 681 &sctx->b, &sctx->b.gfx, params_buf, 682 RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT); 683 684 count_va = params_buf->gpu_address + info->indirect_params_offset; 685 } 686 687 radeon_emit(cs, PKT3(info->indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI : 688 PKT3_DRAW_INDIRECT_MULTI, 689 8, render_cond_bit)); 690 radeon_emit(cs, info->indirect_offset); 691 radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2); 692 radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2); 693 radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) | 694 S_2C3_DRAW_INDEX_ENABLE(1) | 695 S_2C3_COUNT_INDIRECT_ENABLE(!!info->indirect_params)); 696 radeon_emit(cs, info->indirect_count); 697 radeon_emit(cs, count_va); 698 radeon_emit(cs, count_va >> 32); 699 radeon_emit(cs, info->indirect_stride); 700 radeon_emit(cs, di_src_sel); 701 } 702 } else { 703 if (info->indexed) { 704 index_va += info->start * ib->index_size; 705 706 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit)); 707 radeon_emit(cs, index_max_size); 708 radeon_emit(cs, index_va); 709 radeon_emit(cs, (index_va >> 32UL) & 0xFF); 710 radeon_emit(cs, info->count); 711 radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA); 712 } else { 713 radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit)); 714 radeon_emit(cs, info->count); 715 radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX | 716 S_0287F0_USE_OPAQUE(!!info->count_from_stream_output)); 717 } 718 } 719 } 720 721 static void si_emit_surface_sync(struct r600_common_context *rctx, 722 unsigned cp_coher_cntl) 723 { 724 struct radeon_winsys_cs *cs = rctx->gfx.cs; 725 726 /* ACQUIRE_MEM is only required on a compute ring. */ 727 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0)); 728 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */ 729 radeon_emit(cs, 0xffffffff); /* CP_COHER_SIZE */ 730 radeon_emit(cs, 0); /* CP_COHER_BASE */ 731 radeon_emit(cs, 0x0000000A); /* POLL_INTERVAL */ 732 } 733 734 void si_emit_cache_flush(struct si_context *sctx) 735 { 736 struct r600_common_context *rctx = &sctx->b; 737 struct radeon_winsys_cs *cs = rctx->gfx.cs; 738 uint32_t cp_coher_cntl = 0; 739 740 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_FRAMEBUFFER) 741 sctx->b.num_fb_cache_flushes++; 742 743 /* SI has a bug that it always flushes ICACHE and KCACHE if either 744 * bit is set. An alternative way is to write SQC_CACHES, but that 745 * doesn't seem to work reliably. Since the bug doesn't affect 746 * correctness (it only does more work than necessary) and 747 * the performance impact is likely negligible, there is no plan 748 * to add a workaround for it. 749 */ 750 751 if (rctx->flags & SI_CONTEXT_INV_ICACHE) 752 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1); 753 if (rctx->flags & SI_CONTEXT_INV_SMEM_L1) 754 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1); 755 756 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB) { 757 cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | 758 S_0085F0_CB0_DEST_BASE_ENA(1) | 759 S_0085F0_CB1_DEST_BASE_ENA(1) | 760 S_0085F0_CB2_DEST_BASE_ENA(1) | 761 S_0085F0_CB3_DEST_BASE_ENA(1) | 762 S_0085F0_CB4_DEST_BASE_ENA(1) | 763 S_0085F0_CB5_DEST_BASE_ENA(1) | 764 S_0085F0_CB6_DEST_BASE_ENA(1) | 765 S_0085F0_CB7_DEST_BASE_ENA(1); 766 767 /* Necessary for DCC */ 768 if (rctx->chip_class == VI) 769 r600_gfx_write_event_eop(rctx, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 770 0, 0, NULL, 0, 0, 0); 771 } 772 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB) { 773 cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | 774 S_0085F0_DB_DEST_BASE_ENA(1); 775 } 776 777 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB_META) { 778 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 779 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0)); 780 /* needed for wait for idle in SURFACE_SYNC */ 781 assert(rctx->flags & SI_CONTEXT_FLUSH_AND_INV_CB); 782 } 783 if (rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB_META) { 784 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 785 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0)); 786 /* needed for wait for idle in SURFACE_SYNC */ 787 assert(rctx->flags & SI_CONTEXT_FLUSH_AND_INV_DB); 788 } 789 790 /* Wait for shader engines to go idle. 791 * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait 792 * for everything including CB/DB cache flushes. 793 */ 794 if (!(rctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB | 795 SI_CONTEXT_FLUSH_AND_INV_DB))) { 796 if (rctx->flags & SI_CONTEXT_PS_PARTIAL_FLUSH) { 797 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 798 radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 799 /* Only count explicit shader flushes, not implicit ones 800 * done by SURFACE_SYNC. 801 */ 802 rctx->num_vs_flushes++; 803 rctx->num_ps_flushes++; 804 } else if (rctx->flags & SI_CONTEXT_VS_PARTIAL_FLUSH) { 805 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 806 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 807 rctx->num_vs_flushes++; 808 } 809 } 810 811 if (rctx->flags & SI_CONTEXT_CS_PARTIAL_FLUSH && 812 sctx->compute_is_busy) { 813 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 814 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH | EVENT_INDEX(4))); 815 rctx->num_cs_flushes++; 816 sctx->compute_is_busy = false; 817 } 818 819 /* VGT state synchronization. */ 820 if (rctx->flags & SI_CONTEXT_VGT_FLUSH) { 821 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 822 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0)); 823 } 824 if (rctx->flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) { 825 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 826 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0)); 827 } 828 829 /* Make sure ME is idle (it executes most packets) before continuing. 830 * This prevents read-after-write hazards between PFP and ME. 831 */ 832 if (cp_coher_cntl || 833 (rctx->flags & (SI_CONTEXT_CS_PARTIAL_FLUSH | 834 SI_CONTEXT_INV_VMEM_L1 | 835 SI_CONTEXT_INV_GLOBAL_L2 | 836 SI_CONTEXT_WRITEBACK_GLOBAL_L2))) { 837 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 838 radeon_emit(cs, 0); 839 } 840 841 /* When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC 842 * waits for idle. Therefore, it should be last. SURFACE_SYNC is done 843 * in PFP. 844 * 845 * cp_coher_cntl should contain all necessary flags except TC flags 846 * at this point. 847 * 848 * SI-CIK don't support L2 write-back. 849 */ 850 if (rctx->flags & SI_CONTEXT_INV_GLOBAL_L2 || 851 (rctx->chip_class <= CIK && 852 (rctx->flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2))) { 853 /* Invalidate L1 & L2. (L1 is always invalidated on SI) 854 * WB must be set on VI+ when TC_ACTION is set. 855 */ 856 si_emit_surface_sync(rctx, cp_coher_cntl | 857 S_0085F0_TC_ACTION_ENA(1) | 858 S_0085F0_TCL1_ACTION_ENA(1) | 859 S_0301F0_TC_WB_ACTION_ENA(rctx->chip_class >= VI)); 860 cp_coher_cntl = 0; 861 sctx->b.num_L2_invalidates++; 862 } else { 863 /* L1 invalidation and L2 writeback must be done separately, 864 * because both operations can't be done together. 865 */ 866 if (rctx->flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2) { 867 /* WB = write-back 868 * NC = apply to non-coherent MTYPEs 869 * (i.e. MTYPE <= 1, which is what we use everywhere) 870 * 871 * WB doesn't work without NC. 872 */ 873 si_emit_surface_sync(rctx, cp_coher_cntl | 874 S_0301F0_TC_WB_ACTION_ENA(1) | 875 S_0301F0_TC_NC_ACTION_ENA(1)); 876 cp_coher_cntl = 0; 877 sctx->b.num_L2_writebacks++; 878 } 879 if (rctx->flags & SI_CONTEXT_INV_VMEM_L1) { 880 /* Invalidate per-CU VMEM L1. */ 881 si_emit_surface_sync(rctx, cp_coher_cntl | 882 S_0085F0_TCL1_ACTION_ENA(1)); 883 cp_coher_cntl = 0; 884 } 885 } 886 887 /* If TC flushes haven't cleared this... */ 888 if (cp_coher_cntl) 889 si_emit_surface_sync(rctx, cp_coher_cntl); 890 891 if (rctx->flags & R600_CONTEXT_START_PIPELINE_STATS) { 892 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 893 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) | 894 EVENT_INDEX(0)); 895 } else if (rctx->flags & R600_CONTEXT_STOP_PIPELINE_STATS) { 896 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 897 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) | 898 EVENT_INDEX(0)); 899 } 900 901 rctx->flags = 0; 902 } 903 904 static void si_get_draw_start_count(struct si_context *sctx, 905 const struct pipe_draw_info *info, 906 unsigned *start, unsigned *count) 907 { 908 if (info->indirect) { 909 unsigned indirect_count; 910 struct pipe_transfer *transfer; 911 unsigned begin, end; 912 unsigned map_size; 913 unsigned *data; 914 915 if (info->indirect_params) { 916 data = pipe_buffer_map_range(&sctx->b.b, 917 info->indirect_params, 918 info->indirect_params_offset, 919 sizeof(unsigned), 920 PIPE_TRANSFER_READ, &transfer); 921 922 indirect_count = *data; 923 924 pipe_buffer_unmap(&sctx->b.b, transfer); 925 } else { 926 indirect_count = info->indirect_count; 927 } 928 929 if (!indirect_count) { 930 *start = *count = 0; 931 return; 932 } 933 934 map_size = (indirect_count - 1) * info->indirect_stride + 3 * sizeof(unsigned); 935 data = pipe_buffer_map_range(&sctx->b.b, info->indirect, 936 info->indirect_offset, map_size, 937 PIPE_TRANSFER_READ, &transfer); 938 939 begin = UINT_MAX; 940 end = 0; 941 942 for (unsigned i = 0; i < indirect_count; ++i) { 943 unsigned count = data[0]; 944 unsigned start = data[2]; 945 946 if (count > 0) { 947 begin = MIN2(begin, start); 948 end = MAX2(end, start + count); 949 } 950 951 data += info->indirect_stride / sizeof(unsigned); 952 } 953 954 pipe_buffer_unmap(&sctx->b.b, transfer); 955 956 if (begin < end) { 957 *start = begin; 958 *count = end - begin; 959 } else { 960 *start = *count = 0; 961 } 962 } else { 963 *start = info->start; 964 *count = info->count; 965 } 966 } 967 968 void si_ce_pre_draw_synchronization(struct si_context *sctx) 969 { 970 if (sctx->ce_need_synchronization) { 971 radeon_emit(sctx->ce_ib, PKT3(PKT3_INCREMENT_CE_COUNTER, 0, 0)); 972 radeon_emit(sctx->ce_ib, 1); 973 974 radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_WAIT_ON_CE_COUNTER, 0, 0)); 975 radeon_emit(sctx->b.gfx.cs, 1); 976 } 977 } 978 979 void si_ce_post_draw_synchronization(struct si_context *sctx) 980 { 981 if (sctx->ce_need_synchronization) { 982 radeon_emit(sctx->b.gfx.cs, PKT3(PKT3_INCREMENT_DE_COUNTER, 0, 0)); 983 radeon_emit(sctx->b.gfx.cs, 0); 984 985 sctx->ce_need_synchronization = false; 986 } 987 } 988 989 static void cik_prefetch_shader_async(struct si_context *sctx, 990 struct si_pm4_state *state) 991 { 992 if (state) { 993 struct pipe_resource *bo = &state->bo[0]->b.b; 994 assert(state->nbo == 1); 995 996 cik_prefetch_TC_L2_async(sctx, bo, 0, bo->width0); 997 } 998 } 999 1000 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info) 1001 { 1002 struct si_context *sctx = (struct si_context *)ctx; 1003 struct si_state_rasterizer *rs = sctx->queued.named.rasterizer; 1004 struct pipe_index_buffer ib = {}; 1005 unsigned mask, dirty_fb_counter, dirty_tex_counter, rast_prim; 1006 1007 if (likely(!info->indirect)) { 1008 /* SI-CI treat instance_count==0 as instance_count==1. There is 1009 * no workaround for indirect draws, but we can at least skip 1010 * direct draws. 1011 */ 1012 if (unlikely(!info->instance_count)) 1013 return; 1014 1015 /* Handle count == 0. */ 1016 if (unlikely(!info->count && 1017 (info->indexed || !info->count_from_stream_output))) 1018 return; 1019 } 1020 1021 if (unlikely(!sctx->vs_shader.cso)) { 1022 assert(0); 1023 return; 1024 } 1025 if (unlikely(!sctx->ps_shader.cso && (!rs || !rs->rasterizer_discard))) { 1026 assert(0); 1027 return; 1028 } 1029 if (unlikely(!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES))) { 1030 assert(0); 1031 return; 1032 } 1033 1034 /* Re-emit the framebuffer state if needed. */ 1035 dirty_fb_counter = p_atomic_read(&sctx->b.screen->dirty_fb_counter); 1036 if (unlikely(dirty_fb_counter != sctx->b.last_dirty_fb_counter)) { 1037 sctx->b.last_dirty_fb_counter = dirty_fb_counter; 1038 sctx->framebuffer.dirty_cbufs |= 1039 ((1 << sctx->framebuffer.state.nr_cbufs) - 1); 1040 sctx->framebuffer.dirty_zsbuf = true; 1041 si_mark_atom_dirty(sctx, &sctx->framebuffer.atom); 1042 } 1043 1044 /* Invalidate & recompute texture descriptors if needed. */ 1045 dirty_tex_counter = p_atomic_read(&sctx->b.screen->dirty_tex_descriptor_counter); 1046 if (unlikely(dirty_tex_counter != sctx->b.last_dirty_tex_descriptor_counter)) { 1047 sctx->b.last_dirty_tex_descriptor_counter = dirty_tex_counter; 1048 si_update_all_texture_descriptors(sctx); 1049 } 1050 1051 si_decompress_graphics_textures(sctx); 1052 1053 /* Set the rasterization primitive type. 1054 * 1055 * This must be done after si_decompress_textures, which can call 1056 * draw_vbo recursively, and before si_update_shaders, which uses 1057 * current_rast_prim for this draw_vbo call. */ 1058 if (sctx->gs_shader.cso) 1059 rast_prim = sctx->gs_shader.cso->gs_output_prim; 1060 else if (sctx->tes_shader.cso) 1061 rast_prim = sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE]; 1062 else 1063 rast_prim = info->mode; 1064 1065 if (rast_prim != sctx->current_rast_prim) { 1066 sctx->current_rast_prim = rast_prim; 1067 sctx->do_update_shaders = true; 1068 } 1069 1070 if (sctx->gs_shader.cso) { 1071 /* Determine whether the GS triangle strip adjacency fix should 1072 * be applied. Rotate every other triangle if 1073 * - triangle strips with adjacency are fed to the GS and 1074 * - primitive restart is disabled (the rotation doesn't help 1075 * when the restart occurs after an odd number of triangles). 1076 */ 1077 bool gs_tri_strip_adj_fix = 1078 !sctx->tes_shader.cso && 1079 info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY && 1080 !info->primitive_restart; 1081 1082 if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) { 1083 sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix; 1084 sctx->do_update_shaders = true; 1085 } 1086 } 1087 1088 if (sctx->do_update_shaders && !si_update_shaders(sctx)) 1089 return; 1090 1091 if (!si_upload_graphics_shader_descriptors(sctx)) 1092 return; 1093 1094 if (info->indexed) { 1095 /* Initialize the index buffer struct. */ 1096 pipe_resource_reference(&ib.buffer, sctx->index_buffer.buffer); 1097 ib.user_buffer = sctx->index_buffer.user_buffer; 1098 ib.index_size = sctx->index_buffer.index_size; 1099 ib.offset = sctx->index_buffer.offset; 1100 1101 /* Translate or upload, if needed. */ 1102 /* 8-bit indices are supported on VI. */ 1103 if (sctx->b.chip_class <= CIK && ib.index_size == 1) { 1104 struct pipe_resource *out_buffer = NULL; 1105 unsigned out_offset, start, count, start_offset; 1106 void *ptr; 1107 1108 si_get_draw_start_count(sctx, info, &start, &count); 1109 start_offset = start * 2; 1110 1111 u_upload_alloc(sctx->b.uploader, start_offset, count * 2, 256, 1112 &out_offset, &out_buffer, &ptr); 1113 if (!out_buffer) { 1114 pipe_resource_reference(&ib.buffer, NULL); 1115 return; 1116 } 1117 1118 util_shorten_ubyte_elts_to_userptr(&sctx->b.b, &ib, 0, 0, 1119 ib.offset + start, 1120 count, ptr); 1121 1122 pipe_resource_reference(&ib.buffer, NULL); 1123 ib.user_buffer = NULL; 1124 ib.buffer = out_buffer; 1125 /* info->start will be added by the drawing code */ 1126 ib.offset = out_offset - start_offset; 1127 ib.index_size = 2; 1128 } else if (ib.user_buffer && !ib.buffer) { 1129 unsigned start, count, start_offset; 1130 1131 si_get_draw_start_count(sctx, info, &start, &count); 1132 start_offset = start * ib.index_size; 1133 1134 u_upload_data(sctx->b.uploader, start_offset, count * ib.index_size, 1135 256, (char*)ib.user_buffer + start_offset, 1136 &ib.offset, &ib.buffer); 1137 if (!ib.buffer) 1138 return; 1139 /* info->start will be added by the drawing code */ 1140 ib.offset -= start_offset; 1141 } 1142 } 1143 1144 /* VI reads index buffers through TC L2. */ 1145 if (info->indexed && sctx->b.chip_class <= CIK && 1146 r600_resource(ib.buffer)->TC_L2_dirty) { 1147 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; 1148 r600_resource(ib.buffer)->TC_L2_dirty = false; 1149 } 1150 1151 if (info->indirect && r600_resource(info->indirect)->TC_L2_dirty) { 1152 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; 1153 r600_resource(info->indirect)->TC_L2_dirty = false; 1154 } 1155 1156 if (info->indirect_params && 1157 r600_resource(info->indirect_params)->TC_L2_dirty) { 1158 sctx->b.flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2; 1159 r600_resource(info->indirect_params)->TC_L2_dirty = false; 1160 } 1161 1162 /* Add buffer sizes for memory checking in need_cs_space. */ 1163 if (sctx->emit_scratch_reloc && sctx->scratch_buffer) 1164 r600_context_add_resource_size(ctx, &sctx->scratch_buffer->b.b); 1165 if (info->indirect) 1166 r600_context_add_resource_size(ctx, info->indirect); 1167 1168 si_need_cs_space(sctx); 1169 1170 /* Since we've called r600_context_add_resource_size for vertex buffers, 1171 * this must be called after si_need_cs_space, because we must let 1172 * need_cs_space flush before we add buffers to the buffer list. 1173 */ 1174 if (!si_upload_vertex_buffer_descriptors(sctx)) 1175 return; 1176 1177 /* Flushed caches prior to prefetching shaders. */ 1178 if (sctx->b.flags) 1179 si_emit_cache_flush(sctx); 1180 1181 /* Prefetch shaders and VBO descriptors to TC L2. */ 1182 if (sctx->b.chip_class >= CIK) { 1183 if (si_pm4_state_changed(sctx, ls)) 1184 cik_prefetch_shader_async(sctx, sctx->queued.named.ls); 1185 if (si_pm4_state_changed(sctx, hs)) 1186 cik_prefetch_shader_async(sctx, sctx->queued.named.hs); 1187 if (si_pm4_state_changed(sctx, es)) 1188 cik_prefetch_shader_async(sctx, sctx->queued.named.es); 1189 if (si_pm4_state_changed(sctx, gs)) 1190 cik_prefetch_shader_async(sctx, sctx->queued.named.gs); 1191 if (si_pm4_state_changed(sctx, vs)) 1192 cik_prefetch_shader_async(sctx, sctx->queued.named.vs); 1193 1194 /* Vertex buffer descriptors are uploaded uncached, so prefetch 1195 * them right after the VS binary. */ 1196 if (sctx->vertex_buffer_pointer_dirty) { 1197 cik_prefetch_TC_L2_async(sctx, &sctx->vertex_buffers.buffer->b.b, 1198 sctx->vertex_buffers.buffer_offset, 1199 sctx->vertex_elements->count * 16); 1200 } 1201 if (si_pm4_state_changed(sctx, ps)) 1202 cik_prefetch_shader_async(sctx, sctx->queued.named.ps); 1203 } 1204 1205 /* Emit states. */ 1206 mask = sctx->dirty_atoms; 1207 while (mask) { 1208 struct r600_atom *atom = sctx->atoms.array[u_bit_scan(&mask)]; 1209 1210 atom->emit(&sctx->b, atom); 1211 } 1212 sctx->dirty_atoms = 0; 1213 1214 si_pm4_emit_dirty(sctx); 1215 si_emit_scratch_reloc(sctx); 1216 si_emit_rasterizer_prim_state(sctx); 1217 si_emit_draw_registers(sctx, info); 1218 1219 si_ce_pre_draw_synchronization(sctx); 1220 1221 si_emit_draw_packets(sctx, info, &ib); 1222 1223 si_ce_post_draw_synchronization(sctx); 1224 1225 if (sctx->trace_buf) 1226 si_trace_emit(sctx); 1227 1228 /* Workaround for a VGT hang when streamout is enabled. 1229 * It must be done after drawing. */ 1230 if ((sctx->b.family == CHIP_HAWAII || 1231 sctx->b.family == CHIP_TONGA || 1232 sctx->b.family == CHIP_FIJI) && 1233 r600_get_strmout_en(&sctx->b)) { 1234 sctx->b.flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC; 1235 } 1236 1237 /* Set the depth buffer as dirty. */ 1238 if (sctx->framebuffer.state.zsbuf) { 1239 struct pipe_surface *surf = sctx->framebuffer.state.zsbuf; 1240 struct r600_texture *rtex = (struct r600_texture *)surf->texture; 1241 1242 if (!rtex->tc_compatible_htile) 1243 rtex->dirty_level_mask |= 1 << surf->u.tex.level; 1244 1245 if (rtex->surface.flags & RADEON_SURF_SBUFFER) 1246 rtex->stencil_dirty_level_mask |= 1 << surf->u.tex.level; 1247 } 1248 if (sctx->framebuffer.compressed_cb_mask) { 1249 struct pipe_surface *surf; 1250 struct r600_texture *rtex; 1251 unsigned mask = sctx->framebuffer.compressed_cb_mask; 1252 1253 do { 1254 unsigned i = u_bit_scan(&mask); 1255 surf = sctx->framebuffer.state.cbufs[i]; 1256 rtex = (struct r600_texture*)surf->texture; 1257 1258 if (rtex->fmask.size) 1259 rtex->dirty_level_mask |= 1 << surf->u.tex.level; 1260 if (rtex->dcc_gather_statistics) 1261 rtex->separate_dcc_dirty = true; 1262 } while (mask); 1263 } 1264 1265 pipe_resource_reference(&ib.buffer, NULL); 1266 sctx->b.num_draw_calls++; 1267 if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size)) 1268 sctx->b.num_spill_draw_calls++; 1269 } 1270 1271 void si_trace_emit(struct si_context *sctx) 1272 { 1273 struct radeon_winsys_cs *cs = sctx->b.gfx.cs; 1274 1275 sctx->trace_id++; 1276 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, sctx->trace_buf, 1277 RADEON_USAGE_READWRITE, RADEON_PRIO_TRACE); 1278 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); 1279 radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) | 1280 S_370_WR_CONFIRM(1) | 1281 S_370_ENGINE_SEL(V_370_ME)); 1282 radeon_emit(cs, sctx->trace_buf->gpu_address); 1283 radeon_emit(cs, sctx->trace_buf->gpu_address >> 32); 1284 radeon_emit(cs, sctx->trace_id); 1285 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1286 radeon_emit(cs, AC_ENCODE_TRACE_POINT(sctx->trace_id)); 1287 } 1288