1 /* 2 * Copyright 2016 Red Hat. 3 * Copyright 2016 Bas Nieuwenhuizen 4 * 5 * based in part on anv driver which is: 6 * Copyright 2015 Intel Corporation 7 * 8 * Permission is hereby granted, free of charge, to any person obtaining a 9 * copy of this software and associated documentation files (the "Software"), 10 * to deal in the Software without restriction, including without limitation 11 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 12 * and/or sell copies of the Software, and to permit persons to whom the 13 * Software is furnished to do so, subject to the following conditions: 14 * 15 * The above copyright notice and this permission notice (including the next 16 * paragraph) shall be included in all copies or substantial portions of the 17 * Software. 18 * 19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 * IN THE SOFTWARE. 26 */ 27 28 #include "radv_private.h" 29 #include "radv_radeon_winsys.h" 30 #include "radv_cs.h" 31 #include "sid.h" 32 #include "vk_format.h" 33 #include "radv_meta.h" 34 35 #include "ac_debug.h" 36 37 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 38 struct radv_image *image, 39 VkImageLayout src_layout, 40 VkImageLayout dst_layout, 41 int src_family, 42 int dst_family, 43 VkImageSubresourceRange range, 44 VkImageAspectFlags pending_clears); 45 46 const struct radv_dynamic_state default_dynamic_state = { 47 .viewport = { 48 .count = 0, 49 }, 50 .scissor = { 51 .count = 0, 52 }, 53 .line_width = 1.0f, 54 .depth_bias = { 55 .bias = 0.0f, 56 .clamp = 0.0f, 57 .slope = 0.0f, 58 }, 59 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f }, 60 .depth_bounds = { 61 .min = 0.0f, 62 .max = 1.0f, 63 }, 64 .stencil_compare_mask = { 65 .front = ~0u, 66 .back = ~0u, 67 }, 68 .stencil_write_mask = { 69 .front = ~0u, 70 .back = ~0u, 71 }, 72 .stencil_reference = { 73 .front = 0u, 74 .back = 0u, 75 }, 76 }; 77 78 void 79 radv_dynamic_state_copy(struct radv_dynamic_state *dest, 80 const struct radv_dynamic_state *src, 81 uint32_t copy_mask) 82 { 83 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) { 84 dest->viewport.count = src->viewport.count; 85 typed_memcpy(dest->viewport.viewports, src->viewport.viewports, 86 src->viewport.count); 87 } 88 89 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) { 90 dest->scissor.count = src->scissor.count; 91 typed_memcpy(dest->scissor.scissors, src->scissor.scissors, 92 src->scissor.count); 93 } 94 95 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) 96 dest->line_width = src->line_width; 97 98 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) 99 dest->depth_bias = src->depth_bias; 100 101 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) 102 typed_memcpy(dest->blend_constants, src->blend_constants, 4); 103 104 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) 105 dest->depth_bounds = src->depth_bounds; 106 107 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) 108 dest->stencil_compare_mask = src->stencil_compare_mask; 109 110 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) 111 dest->stencil_write_mask = src->stencil_write_mask; 112 113 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) 114 dest->stencil_reference = src->stencil_reference; 115 } 116 117 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer) 118 { 119 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && 120 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK; 121 } 122 123 enum ring_type radv_queue_family_to_ring(int f) { 124 switch (f) { 125 case RADV_QUEUE_GENERAL: 126 return RING_GFX; 127 case RADV_QUEUE_COMPUTE: 128 return RING_COMPUTE; 129 case RADV_QUEUE_TRANSFER: 130 return RING_DMA; 131 default: 132 unreachable("Unknown queue family"); 133 } 134 } 135 136 static VkResult radv_create_cmd_buffer( 137 struct radv_device * device, 138 struct radv_cmd_pool * pool, 139 VkCommandBufferLevel level, 140 VkCommandBuffer* pCommandBuffer) 141 { 142 struct radv_cmd_buffer *cmd_buffer; 143 VkResult result; 144 unsigned ring; 145 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8, 146 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 147 if (cmd_buffer == NULL) 148 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 149 150 memset(cmd_buffer, 0, sizeof(*cmd_buffer)); 151 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC; 152 cmd_buffer->device = device; 153 cmd_buffer->pool = pool; 154 cmd_buffer->level = level; 155 156 if (pool) { 157 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers); 158 cmd_buffer->queue_family_index = pool->queue_family_index; 159 160 } else { 161 /* Init the pool_link so we can safefly call list_del when we destroy 162 * the command buffer 163 */ 164 list_inithead(&cmd_buffer->pool_link); 165 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL; 166 } 167 168 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index); 169 170 cmd_buffer->cs = device->ws->cs_create(device->ws, ring); 171 if (!cmd_buffer->cs) { 172 result = VK_ERROR_OUT_OF_HOST_MEMORY; 173 goto fail; 174 } 175 176 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer); 177 178 cmd_buffer->upload.offset = 0; 179 cmd_buffer->upload.size = 0; 180 list_inithead(&cmd_buffer->upload.list); 181 182 return VK_SUCCESS; 183 184 fail: 185 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 186 187 return result; 188 } 189 190 static bool 191 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer, 192 uint64_t min_needed) 193 { 194 uint64_t new_size; 195 struct radeon_winsys_bo *bo; 196 struct radv_cmd_buffer_upload *upload; 197 struct radv_device *device = cmd_buffer->device; 198 199 new_size = MAX2(min_needed, 16 * 1024); 200 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size); 201 202 bo = device->ws->buffer_create(device->ws, 203 new_size, 4096, 204 RADEON_DOMAIN_GTT, 205 RADEON_FLAG_CPU_ACCESS); 206 207 if (!bo) { 208 cmd_buffer->record_fail = true; 209 return false; 210 } 211 212 device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8); 213 if (cmd_buffer->upload.upload_bo) { 214 upload = malloc(sizeof(*upload)); 215 216 if (!upload) { 217 cmd_buffer->record_fail = true; 218 device->ws->buffer_destroy(bo); 219 return false; 220 } 221 222 memcpy(upload, &cmd_buffer->upload, sizeof(*upload)); 223 list_add(&upload->list, &cmd_buffer->upload.list); 224 } 225 226 cmd_buffer->upload.upload_bo = bo; 227 cmd_buffer->upload.size = new_size; 228 cmd_buffer->upload.offset = 0; 229 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo); 230 231 if (!cmd_buffer->upload.map) { 232 cmd_buffer->record_fail = true; 233 return false; 234 } 235 236 return true; 237 } 238 239 bool 240 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer, 241 unsigned size, 242 unsigned alignment, 243 unsigned *out_offset, 244 void **ptr) 245 { 246 uint64_t offset = align(cmd_buffer->upload.offset, alignment); 247 if (offset + size > cmd_buffer->upload.size) { 248 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size)) 249 return false; 250 offset = 0; 251 } 252 253 *out_offset = offset; 254 *ptr = cmd_buffer->upload.map + offset; 255 256 cmd_buffer->upload.offset = offset + size; 257 return true; 258 } 259 260 bool 261 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer, 262 unsigned size, unsigned alignment, 263 const void *data, unsigned *out_offset) 264 { 265 uint8_t *ptr; 266 267 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment, 268 out_offset, (void **)&ptr)) 269 return false; 270 271 if (ptr) 272 memcpy(ptr, data, size); 273 274 return true; 275 } 276 277 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer) 278 { 279 struct radv_device *device = cmd_buffer->device; 280 struct radeon_winsys_cs *cs = cmd_buffer->cs; 281 uint64_t va; 282 283 if (!device->trace_bo) 284 return; 285 286 va = device->ws->buffer_get_va(device->trace_bo); 287 288 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7); 289 290 ++cmd_buffer->state.trace_id; 291 device->ws->cs_add_buffer(cs, device->trace_bo, 8); 292 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0)); 293 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 294 S_370_WR_CONFIRM(1) | 295 S_370_ENGINE_SEL(V_370_ME)); 296 radeon_emit(cs, va); 297 radeon_emit(cs, va >> 32); 298 radeon_emit(cs, cmd_buffer->state.trace_id); 299 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 300 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id)); 301 } 302 303 static void 304 radv_emit_graphics_blend_state(struct radv_cmd_buffer *cmd_buffer, 305 struct radv_pipeline *pipeline) 306 { 307 radeon_set_context_reg_seq(cmd_buffer->cs, R_028780_CB_BLEND0_CONTROL, 8); 308 radeon_emit_array(cmd_buffer->cs, pipeline->graphics.blend.cb_blend_control, 309 8); 310 radeon_set_context_reg(cmd_buffer->cs, R_028808_CB_COLOR_CONTROL, pipeline->graphics.blend.cb_color_control); 311 radeon_set_context_reg(cmd_buffer->cs, R_028B70_DB_ALPHA_TO_MASK, pipeline->graphics.blend.db_alpha_to_mask); 312 } 313 314 static void 315 radv_emit_graphics_depth_stencil_state(struct radv_cmd_buffer *cmd_buffer, 316 struct radv_pipeline *pipeline) 317 { 318 struct radv_depth_stencil_state *ds = &pipeline->graphics.ds; 319 radeon_set_context_reg(cmd_buffer->cs, R_028800_DB_DEPTH_CONTROL, ds->db_depth_control); 320 radeon_set_context_reg(cmd_buffer->cs, R_02842C_DB_STENCIL_CONTROL, ds->db_stencil_control); 321 322 radeon_set_context_reg(cmd_buffer->cs, R_028000_DB_RENDER_CONTROL, ds->db_render_control); 323 radeon_set_context_reg(cmd_buffer->cs, R_028010_DB_RENDER_OVERRIDE2, ds->db_render_override2); 324 } 325 326 /* 12.4 fixed-point */ 327 static unsigned radv_pack_float_12p4(float x) 328 { 329 return x <= 0 ? 0 : 330 x >= 4096 ? 0xffff : x * 16; 331 } 332 333 static uint32_t 334 shader_stage_to_user_data_0(gl_shader_stage stage) 335 { 336 switch (stage) { 337 case MESA_SHADER_FRAGMENT: 338 return R_00B030_SPI_SHADER_USER_DATA_PS_0; 339 case MESA_SHADER_VERTEX: 340 return R_00B130_SPI_SHADER_USER_DATA_VS_0; 341 case MESA_SHADER_COMPUTE: 342 return R_00B900_COMPUTE_USER_DATA_0; 343 default: 344 unreachable("unknown shader"); 345 } 346 } 347 348 static struct ac_userdata_info * 349 radv_lookup_user_sgpr(struct radv_pipeline *pipeline, 350 gl_shader_stage stage, 351 int idx) 352 { 353 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx]; 354 } 355 356 static void 357 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer, 358 struct radv_pipeline *pipeline, 359 gl_shader_stage stage, 360 int idx, uint64_t va) 361 { 362 struct ac_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx); 363 uint32_t base_reg = shader_stage_to_user_data_0(stage); 364 if (loc->sgpr_idx == -1) 365 return; 366 assert(loc->num_sgprs == 2); 367 assert(!loc->indirect); 368 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2); 369 radeon_emit(cmd_buffer->cs, va); 370 radeon_emit(cmd_buffer->cs, va >> 32); 371 } 372 373 static void 374 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer, 375 struct radv_pipeline *pipeline) 376 { 377 int num_samples = pipeline->graphics.ms.num_samples; 378 struct radv_multisample_state *ms = &pipeline->graphics.ms; 379 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline; 380 381 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C38_PA_SC_AA_MASK_X0Y0_X1Y0, 2); 382 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[0]); 383 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_mask[1]); 384 385 radeon_set_context_reg(cmd_buffer->cs, CM_R_028804_DB_EQAA, ms->db_eqaa); 386 radeon_set_context_reg(cmd_buffer->cs, EG_R_028A4C_PA_SC_MODE_CNTL_1, ms->pa_sc_mode_cntl_1); 387 388 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples) 389 return; 390 391 radeon_set_context_reg_seq(cmd_buffer->cs, CM_R_028BDC_PA_SC_LINE_CNTL, 2); 392 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl); 393 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config); 394 395 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples); 396 397 uint32_t samples_offset; 398 void *samples_ptr; 399 void *src; 400 radv_cmd_buffer_upload_alloc(cmd_buffer, num_samples * 4 * 2, 256, &samples_offset, 401 &samples_ptr); 402 switch (num_samples) { 403 case 1: 404 src = cmd_buffer->device->sample_locations_1x; 405 break; 406 case 2: 407 src = cmd_buffer->device->sample_locations_2x; 408 break; 409 case 4: 410 src = cmd_buffer->device->sample_locations_4x; 411 break; 412 case 8: 413 src = cmd_buffer->device->sample_locations_8x; 414 break; 415 case 16: 416 src = cmd_buffer->device->sample_locations_16x; 417 break; 418 } 419 memcpy(samples_ptr, src, num_samples * 4 * 2); 420 421 uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 422 va += samples_offset; 423 424 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT, 425 AC_UD_PS_SAMPLE_POS, va); 426 } 427 428 static void 429 radv_emit_graphics_raster_state(struct radv_cmd_buffer *cmd_buffer, 430 struct radv_pipeline *pipeline) 431 { 432 struct radv_raster_state *raster = &pipeline->graphics.raster; 433 434 radeon_set_context_reg(cmd_buffer->cs, R_028810_PA_CL_CLIP_CNTL, 435 raster->pa_cl_clip_cntl); 436 437 radeon_set_context_reg(cmd_buffer->cs, R_0286D4_SPI_INTERP_CONTROL_0, 438 raster->spi_interp_control); 439 440 radeon_set_context_reg_seq(cmd_buffer->cs, R_028A00_PA_SU_POINT_SIZE, 2); 441 unsigned tmp = (unsigned)(1.0 * 8.0); 442 radeon_emit(cmd_buffer->cs, S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp)); 443 radeon_emit(cmd_buffer->cs, S_028A04_MIN_SIZE(radv_pack_float_12p4(0)) | 444 S_028A04_MAX_SIZE(radv_pack_float_12p4(8192/2))); /* R_028A04_PA_SU_POINT_MINMAX */ 445 446 radeon_set_context_reg(cmd_buffer->cs, R_028BE4_PA_SU_VTX_CNTL, 447 raster->pa_su_vtx_cntl); 448 449 radeon_set_context_reg(cmd_buffer->cs, R_028814_PA_SU_SC_MODE_CNTL, 450 raster->pa_su_sc_mode_cntl); 451 } 452 453 static void 454 radv_emit_vertex_shader(struct radv_cmd_buffer *cmd_buffer, 455 struct radv_pipeline *pipeline) 456 { 457 struct radeon_winsys *ws = cmd_buffer->device->ws; 458 struct radv_shader_variant *vs; 459 uint64_t va; 460 unsigned export_count; 461 unsigned clip_dist_mask, cull_dist_mask, total_mask; 462 463 assert (pipeline->shaders[MESA_SHADER_VERTEX]); 464 465 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 466 va = ws->buffer_get_va(vs->bo); 467 ws->cs_add_buffer(cmd_buffer->cs, vs->bo, 8); 468 469 clip_dist_mask = vs->info.vs.clip_dist_mask; 470 cull_dist_mask = vs->info.vs.cull_dist_mask; 471 total_mask = clip_dist_mask | cull_dist_mask; 472 radeon_set_context_reg(cmd_buffer->cs, R_028A40_VGT_GS_MODE, 0); 473 radeon_set_context_reg(cmd_buffer->cs, R_028A84_VGT_PRIMITIVEID_EN, 0); 474 475 export_count = MAX2(1, vs->info.vs.param_exports); 476 radeon_set_context_reg(cmd_buffer->cs, R_0286C4_SPI_VS_OUT_CONFIG, 477 S_0286C4_VS_EXPORT_COUNT(export_count - 1)); 478 radeon_set_context_reg(cmd_buffer->cs, R_02870C_SPI_SHADER_POS_FORMAT, 479 S_02870C_POS0_EXPORT_FORMAT(V_02870C_SPI_SHADER_4COMP) | 480 S_02870C_POS1_EXPORT_FORMAT(vs->info.vs.pos_exports > 1 ? 481 V_02870C_SPI_SHADER_4COMP : 482 V_02870C_SPI_SHADER_NONE) | 483 S_02870C_POS2_EXPORT_FORMAT(vs->info.vs.pos_exports > 2 ? 484 V_02870C_SPI_SHADER_4COMP : 485 V_02870C_SPI_SHADER_NONE) | 486 S_02870C_POS3_EXPORT_FORMAT(vs->info.vs.pos_exports > 3 ? 487 V_02870C_SPI_SHADER_4COMP : 488 V_02870C_SPI_SHADER_NONE)); 489 490 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B120_SPI_SHADER_PGM_LO_VS, 4); 491 radeon_emit(cmd_buffer->cs, va >> 8); 492 radeon_emit(cmd_buffer->cs, va >> 40); 493 radeon_emit(cmd_buffer->cs, vs->rsrc1); 494 radeon_emit(cmd_buffer->cs, vs->rsrc2); 495 496 radeon_set_context_reg(cmd_buffer->cs, R_028818_PA_CL_VTE_CNTL, 497 S_028818_VTX_W0_FMT(1) | 498 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 499 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 500 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 501 502 radeon_set_context_reg(cmd_buffer->cs, R_02881C_PA_CL_VS_OUT_CNTL, 503 S_02881C_USE_VTX_POINT_SIZE(vs->info.vs.writes_pointsize) | 504 S_02881C_USE_VTX_RENDER_TARGET_INDX(vs->info.vs.writes_layer) | 505 S_02881C_USE_VTX_VIEWPORT_INDX(vs->info.vs.writes_viewport_index) | 506 S_02881C_VS_OUT_MISC_VEC_ENA(vs->info.vs.writes_pointsize || 507 vs->info.vs.writes_layer || 508 vs->info.vs.writes_viewport_index) | 509 S_02881C_VS_OUT_CCDIST0_VEC_ENA((total_mask & 0x0f) != 0) | 510 S_02881C_VS_OUT_CCDIST1_VEC_ENA((total_mask & 0xf0) != 0) | 511 pipeline->graphics.raster.pa_cl_vs_out_cntl | 512 cull_dist_mask << 8 | 513 clip_dist_mask); 514 515 radeon_set_context_reg(cmd_buffer->cs, R_028AB4_VGT_REUSE_OFF, 516 S_028AB4_REUSE_OFF(vs->info.vs.writes_viewport_index)); 517 } 518 519 520 521 static void 522 radv_emit_fragment_shader(struct radv_cmd_buffer *cmd_buffer, 523 struct radv_pipeline *pipeline) 524 { 525 struct radeon_winsys *ws = cmd_buffer->device->ws; 526 struct radv_shader_variant *ps, *vs; 527 uint64_t va; 528 unsigned spi_baryc_cntl = S_0286E0_FRONT_FACE_ALL_BITS(1); 529 struct radv_blend_state *blend = &pipeline->graphics.blend; 530 unsigned ps_offset = 0; 531 unsigned z_order; 532 assert (pipeline->shaders[MESA_SHADER_FRAGMENT]); 533 534 ps = pipeline->shaders[MESA_SHADER_FRAGMENT]; 535 vs = pipeline->shaders[MESA_SHADER_VERTEX]; 536 va = ws->buffer_get_va(ps->bo); 537 ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8); 538 539 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B020_SPI_SHADER_PGM_LO_PS, 4); 540 radeon_emit(cmd_buffer->cs, va >> 8); 541 radeon_emit(cmd_buffer->cs, va >> 40); 542 radeon_emit(cmd_buffer->cs, ps->rsrc1); 543 radeon_emit(cmd_buffer->cs, ps->rsrc2); 544 545 if (ps->info.fs.early_fragment_test || !ps->info.fs.writes_memory) 546 z_order = V_02880C_EARLY_Z_THEN_LATE_Z; 547 else 548 z_order = V_02880C_LATE_Z; 549 550 551 radeon_set_context_reg(cmd_buffer->cs, R_02880C_DB_SHADER_CONTROL, 552 S_02880C_Z_EXPORT_ENABLE(ps->info.fs.writes_z) | 553 S_02880C_STENCIL_TEST_VAL_EXPORT_ENABLE(ps->info.fs.writes_stencil) | 554 S_02880C_KILL_ENABLE(!!ps->info.fs.can_discard) | 555 S_02880C_Z_ORDER(z_order) | 556 S_02880C_DEPTH_BEFORE_SHADER(ps->info.fs.early_fragment_test) | 557 S_02880C_EXEC_ON_HIER_FAIL(ps->info.fs.writes_memory) | 558 S_02880C_EXEC_ON_NOOP(ps->info.fs.writes_memory)); 559 560 radeon_set_context_reg(cmd_buffer->cs, R_0286CC_SPI_PS_INPUT_ENA, 561 ps->config.spi_ps_input_ena); 562 563 radeon_set_context_reg(cmd_buffer->cs, R_0286D0_SPI_PS_INPUT_ADDR, 564 ps->config.spi_ps_input_addr); 565 566 spi_baryc_cntl |= S_0286E0_POS_FLOAT_LOCATION(0); 567 radeon_set_context_reg(cmd_buffer->cs, R_0286D8_SPI_PS_IN_CONTROL, 568 S_0286D8_NUM_INTERP(ps->info.fs.num_interp)); 569 570 radeon_set_context_reg(cmd_buffer->cs, R_0286E0_SPI_BARYC_CNTL, spi_baryc_cntl); 571 572 radeon_set_context_reg(cmd_buffer->cs, R_028710_SPI_SHADER_Z_FORMAT, 573 ps->info.fs.writes_stencil ? V_028710_SPI_SHADER_32_GR : 574 ps->info.fs.writes_z ? V_028710_SPI_SHADER_32_R : 575 V_028710_SPI_SHADER_ZERO); 576 577 radeon_set_context_reg(cmd_buffer->cs, R_028714_SPI_SHADER_COL_FORMAT, blend->spi_shader_col_format); 578 579 radeon_set_context_reg(cmd_buffer->cs, R_028238_CB_TARGET_MASK, blend->cb_target_mask); 580 radeon_set_context_reg(cmd_buffer->cs, R_02823C_CB_SHADER_MASK, blend->cb_shader_mask); 581 582 if (ps->info.fs.has_pcoord) { 583 unsigned val; 584 val = S_028644_PT_SPRITE_TEX(1) | S_028644_OFFSET(0x20); 585 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 586 ps_offset = 1; 587 } 588 589 for (unsigned i = 0; i < 32 && (1u << i) <= ps->info.fs.input_mask; ++i) { 590 unsigned vs_offset, flat_shade; 591 unsigned val; 592 593 if (!(ps->info.fs.input_mask & (1u << i))) 594 continue; 595 596 597 if (!(vs->info.vs.export_mask & (1u << i))) { 598 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, 599 S_028644_OFFSET(0x20)); 600 ++ps_offset; 601 continue; 602 } 603 604 vs_offset = util_bitcount(vs->info.vs.export_mask & ((1u << i) - 1)); 605 flat_shade = !!(ps->info.fs.flat_shaded_mask & (1u << ps_offset)); 606 607 val = S_028644_OFFSET(vs_offset) | S_028644_FLAT_SHADE(flat_shade); 608 radeon_set_context_reg(cmd_buffer->cs, R_028644_SPI_PS_INPUT_CNTL_0 + 4 * ps_offset, val); 609 ++ps_offset; 610 } 611 } 612 613 static void 614 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer, 615 struct radv_pipeline *pipeline) 616 { 617 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline) 618 return; 619 620 radv_emit_graphics_depth_stencil_state(cmd_buffer, pipeline); 621 radv_emit_graphics_blend_state(cmd_buffer, pipeline); 622 radv_emit_graphics_raster_state(cmd_buffer, pipeline); 623 radv_update_multisample_state(cmd_buffer, pipeline); 624 radv_emit_vertex_shader(cmd_buffer, pipeline); 625 radv_emit_fragment_shader(cmd_buffer, pipeline); 626 627 radeon_set_context_reg(cmd_buffer->cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN, 628 pipeline->graphics.prim_restart_enable); 629 630 cmd_buffer->state.emitted_pipeline = pipeline; 631 } 632 633 static void 634 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer) 635 { 636 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count, 637 cmd_buffer->state.dynamic.viewport.viewports); 638 } 639 640 static void 641 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer) 642 { 643 uint32_t count = cmd_buffer->state.dynamic.scissor.count; 644 si_write_scissors(cmd_buffer->cs, 0, count, 645 cmd_buffer->state.dynamic.scissor.scissors); 646 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, 647 cmd_buffer->state.pipeline->graphics.ms.pa_sc_mode_cntl_0 | S_028A48_VPORT_SCISSOR_ENABLE(count ? 1 : 0)); 648 } 649 650 static void 651 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer, 652 int index, 653 struct radv_color_buffer_info *cb) 654 { 655 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI; 656 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11); 657 radeon_emit(cmd_buffer->cs, cb->cb_color_base); 658 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch); 659 radeon_emit(cmd_buffer->cs, cb->cb_color_slice); 660 radeon_emit(cmd_buffer->cs, cb->cb_color_view); 661 radeon_emit(cmd_buffer->cs, cb->cb_color_info); 662 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib); 663 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control); 664 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask); 665 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice); 666 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask); 667 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice); 668 669 if (is_vi) { /* DCC BASE */ 670 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base); 671 } 672 } 673 674 static void 675 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer, 676 struct radv_ds_buffer_info *ds, 677 struct radv_image *image, 678 VkImageLayout layout) 679 { 680 uint32_t db_z_info = ds->db_z_info; 681 682 if (!radv_layout_has_htile(image, layout)) 683 db_z_info &= C_028040_TILE_SURFACE_ENABLE; 684 685 if (!radv_layout_can_expclear(image, layout)) 686 db_z_info &= C_028040_ALLOW_EXPCLEAR & C_028044_ALLOW_EXPCLEAR; 687 688 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view); 689 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base); 690 691 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9); 692 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */ 693 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */ 694 radeon_emit(cmd_buffer->cs, ds->db_stencil_info); /* R_028044_DB_STENCIL_INFO */ 695 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */ 696 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */ 697 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */ 698 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */ 699 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */ 700 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */ 701 702 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface); 703 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 704 ds->pa_su_poly_offset_db_fmt_cntl); 705 } 706 707 /* 708 * To hw resolve multisample images both src and dst need to have the same 709 * micro tiling mode. However we don't always know in advance when creating 710 * the images. This function gets called if we have a resolve attachment, 711 * and tests if the attachment image has the same tiling mode, then it 712 * checks if the generated framebuffer data has the same tiling mode, and 713 * updates it if not. 714 */ 715 static void radv_set_optimal_micro_tile_mode(struct radv_device *device, 716 struct radv_attachment_info *att, 717 uint32_t micro_tile_mode) 718 { 719 struct radv_image *image = att->attachment->image; 720 uint32_t tile_mode_index; 721 if (image->surface.nsamples <= 1) 722 return; 723 724 if (image->surface.micro_tile_mode != micro_tile_mode) { 725 radv_image_set_optimal_micro_tile_mode(device, image, micro_tile_mode); 726 } 727 728 if (att->cb.micro_tile_mode != micro_tile_mode) { 729 tile_mode_index = image->surface.tiling_index[0]; 730 731 att->cb.cb_color_attrib &= C_028C74_TILE_MODE_INDEX; 732 att->cb.cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index); 733 att->cb.micro_tile_mode = micro_tile_mode; 734 } 735 } 736 737 void 738 radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 739 struct radv_image *image, 740 VkClearDepthStencilValue ds_clear_value, 741 VkImageAspectFlags aspects) 742 { 743 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 744 va += image->offset + image->clear_value_offset; 745 unsigned reg_offset = 0, reg_count = 0; 746 747 if (!image->htile.size || !aspects) 748 return; 749 750 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) { 751 ++reg_count; 752 } else { 753 ++reg_offset; 754 va += 4; 755 } 756 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 757 ++reg_count; 758 759 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 760 761 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0)); 762 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 763 S_370_WR_CONFIRM(1) | 764 S_370_ENGINE_SEL(V_370_PFP)); 765 radeon_emit(cmd_buffer->cs, va); 766 radeon_emit(cmd_buffer->cs, va >> 32); 767 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 768 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); 769 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 770 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); 771 772 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count); 773 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) 774 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */ 775 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT) 776 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */ 777 } 778 779 static void 780 radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer, 781 struct radv_image *image) 782 { 783 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 784 va += image->offset + image->clear_value_offset; 785 786 if (!image->htile.size) 787 return; 788 789 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 790 791 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 792 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 793 COPY_DATA_DST_SEL(COPY_DATA_REG) | 794 COPY_DATA_COUNT_SEL); 795 radeon_emit(cmd_buffer->cs, va); 796 radeon_emit(cmd_buffer->cs, va >> 32); 797 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2); 798 radeon_emit(cmd_buffer->cs, 0); 799 800 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 801 radeon_emit(cmd_buffer->cs, 0); 802 } 803 804 void 805 radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 806 struct radv_image *image, 807 int idx, 808 uint32_t color_values[2]) 809 { 810 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 811 va += image->offset + image->clear_value_offset; 812 813 if (!image->cmask.size && !image->surface.dcc_size) 814 return; 815 816 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 817 818 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0)); 819 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) | 820 S_370_WR_CONFIRM(1) | 821 S_370_ENGINE_SEL(V_370_PFP)); 822 radeon_emit(cmd_buffer->cs, va); 823 radeon_emit(cmd_buffer->cs, va >> 32); 824 radeon_emit(cmd_buffer->cs, color_values[0]); 825 radeon_emit(cmd_buffer->cs, color_values[1]); 826 827 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2); 828 radeon_emit(cmd_buffer->cs, color_values[0]); 829 radeon_emit(cmd_buffer->cs, color_values[1]); 830 } 831 832 static void 833 radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer, 834 struct radv_image *image, 835 int idx) 836 { 837 uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo); 838 va += image->offset + image->clear_value_offset; 839 840 if (!image->cmask.size && !image->surface.dcc_size) 841 return; 842 843 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c; 844 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, image->bo, 8); 845 846 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 847 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 848 COPY_DATA_DST_SEL(COPY_DATA_REG) | 849 COPY_DATA_COUNT_SEL); 850 radeon_emit(cmd_buffer->cs, va); 851 radeon_emit(cmd_buffer->cs, va >> 32); 852 radeon_emit(cmd_buffer->cs, reg >> 2); 853 radeon_emit(cmd_buffer->cs, 0); 854 855 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0)); 856 radeon_emit(cmd_buffer->cs, 0); 857 } 858 859 void 860 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer) 861 { 862 int i; 863 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer; 864 const struct radv_subpass *subpass = cmd_buffer->state.subpass; 865 int dst_resolve_micro_tile_mode = -1; 866 867 if (subpass->has_resolve) { 868 uint32_t a = subpass->resolve_attachments[0].attachment; 869 const struct radv_image *image = framebuffer->attachments[a].attachment->image; 870 dst_resolve_micro_tile_mode = image->surface.micro_tile_mode; 871 } 872 for (i = 0; i < subpass->color_count; ++i) { 873 int idx = subpass->color_attachments[i].attachment; 874 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 875 876 if (dst_resolve_micro_tile_mode != -1) { 877 radv_set_optimal_micro_tile_mode(cmd_buffer->device, 878 att, dst_resolve_micro_tile_mode); 879 } 880 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 881 882 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT); 883 radv_emit_fb_color_state(cmd_buffer, i, &att->cb); 884 885 radv_load_color_clear_regs(cmd_buffer, att->attachment->image, i); 886 } 887 888 for (i = subpass->color_count; i < 8; i++) 889 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C, 890 S_028C70_FORMAT(V_028C70_COLOR_INVALID)); 891 892 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 893 int idx = subpass->depth_stencil_attachment.attachment; 894 VkImageLayout layout = subpass->depth_stencil_attachment.layout; 895 struct radv_attachment_info *att = &framebuffer->attachments[idx]; 896 struct radv_image *image = att->attachment->image; 897 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, att->attachment->bo, 8); 898 899 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout); 900 901 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) { 902 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 903 cmd_buffer->state.offset_scale = att->ds.offset_scale; 904 } 905 radv_load_depth_clear_regs(cmd_buffer, image); 906 } else { 907 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2); 908 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* R_028040_DB_Z_INFO */ 909 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* R_028044_DB_STENCIL_INFO */ 910 } 911 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR, 912 S_028208_BR_X(framebuffer->width) | 913 S_028208_BR_Y(framebuffer->height)); 914 } 915 916 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer) 917 { 918 uint32_t db_count_control; 919 920 if(!cmd_buffer->state.active_occlusion_queries) { 921 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { 922 db_count_control = 0; 923 } else { 924 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1); 925 } 926 } else { 927 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { 928 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 929 S_028004_SAMPLE_RATE(0) | /* TODO: set this to the number of samples of the current framebuffer */ 930 S_028004_ZPASS_ENABLE(1) | 931 S_028004_SLICE_EVEN_ENABLE(1) | 932 S_028004_SLICE_ODD_ENABLE(1); 933 } else { 934 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) | 935 S_028004_SAMPLE_RATE(0); /* TODO: set this to the number of samples of the current framebuffer */ 936 } 937 } 938 939 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control); 940 } 941 942 static void 943 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer) 944 { 945 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic; 946 947 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH) { 948 unsigned width = cmd_buffer->state.dynamic.line_width * 8; 949 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL, 950 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF))); 951 } 952 953 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS) { 954 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4); 955 radeon_emit_array(cmd_buffer->cs, (uint32_t*)d->blend_constants, 4); 956 } 957 958 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE | 959 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK | 960 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK)) { 961 radeon_set_context_reg_seq(cmd_buffer->cs, R_028430_DB_STENCILREFMASK, 2); 962 radeon_emit(cmd_buffer->cs, S_028430_STENCILTESTVAL(d->stencil_reference.front) | 963 S_028430_STENCILMASK(d->stencil_compare_mask.front) | 964 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) | 965 S_028430_STENCILOPVAL(1)); 966 radeon_emit(cmd_buffer->cs, S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) | 967 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) | 968 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) | 969 S_028434_STENCILOPVAL_BF(1)); 970 } 971 972 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 973 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)) { 974 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN, fui(d->depth_bounds.min)); 975 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX, fui(d->depth_bounds.max)); 976 } 977 978 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_PIPELINE | 979 RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) { 980 struct radv_raster_state *raster = &cmd_buffer->state.pipeline->graphics.raster; 981 unsigned slope = fui(d->depth_bias.slope * 16.0f); 982 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale); 983 984 if (G_028814_POLY_OFFSET_FRONT_ENABLE(raster->pa_su_sc_mode_cntl)) { 985 radeon_set_context_reg_seq(cmd_buffer->cs, R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5); 986 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */ 987 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */ 988 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */ 989 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */ 990 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */ 991 } 992 } 993 994 cmd_buffer->state.dirty = 0; 995 } 996 997 static void 998 emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer, 999 struct radv_pipeline *pipeline, 1000 int idx, 1001 uint64_t va, 1002 gl_shader_stage stage) 1003 { 1004 struct ac_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx]; 1005 uint32_t base_reg = shader_stage_to_user_data_0(stage); 1006 1007 if (desc_set_loc->sgpr_idx == -1) 1008 return; 1009 1010 assert(!desc_set_loc->indirect); 1011 assert(desc_set_loc->num_sgprs == 2); 1012 radeon_set_sh_reg_seq(cmd_buffer->cs, 1013 base_reg + desc_set_loc->sgpr_idx * 4, 2); 1014 radeon_emit(cmd_buffer->cs, va); 1015 radeon_emit(cmd_buffer->cs, va >> 32); 1016 } 1017 1018 static void 1019 radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer, 1020 struct radv_pipeline *pipeline, 1021 VkShaderStageFlags stages, 1022 struct radv_descriptor_set *set, 1023 unsigned idx) 1024 { 1025 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) 1026 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 1027 idx, set->va, 1028 MESA_SHADER_FRAGMENT); 1029 1030 if (stages & VK_SHADER_STAGE_VERTEX_BIT) 1031 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 1032 idx, set->va, 1033 MESA_SHADER_VERTEX); 1034 1035 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) 1036 emit_stage_descriptor_set_userdata(cmd_buffer, pipeline, 1037 idx, set->va, 1038 MESA_SHADER_COMPUTE); 1039 } 1040 1041 static void 1042 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer, 1043 struct radv_pipeline *pipeline, 1044 VkShaderStageFlags stages) 1045 { 1046 unsigned i; 1047 if (!cmd_buffer->state.descriptors_dirty) 1048 return; 1049 1050 for (i = 0; i < MAX_SETS; i++) { 1051 if (!(cmd_buffer->state.descriptors_dirty & (1 << i))) 1052 continue; 1053 struct radv_descriptor_set *set = cmd_buffer->state.descriptors[i]; 1054 if (!set) 1055 continue; 1056 1057 radv_emit_descriptor_set_userdata(cmd_buffer, pipeline, stages, set, i); 1058 } 1059 cmd_buffer->state.descriptors_dirty = 0; 1060 } 1061 1062 static void 1063 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer, 1064 struct radv_pipeline *pipeline, 1065 VkShaderStageFlags stages) 1066 { 1067 struct radv_pipeline_layout *layout = pipeline->layout; 1068 unsigned offset; 1069 void *ptr; 1070 uint64_t va; 1071 1072 stages &= cmd_buffer->push_constant_stages; 1073 if (!stages || !layout || (!layout->push_constant_size && !layout->dynamic_offset_count)) 1074 return; 1075 1076 radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size + 1077 16 * layout->dynamic_offset_count, 1078 256, &offset, &ptr); 1079 1080 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size); 1081 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers, 1082 16 * layout->dynamic_offset_count); 1083 1084 va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 1085 va += offset; 1086 1087 if (stages & VK_SHADER_STAGE_VERTEX_BIT) 1088 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX, 1089 AC_UD_PUSH_CONSTANTS, va); 1090 1091 if (stages & VK_SHADER_STAGE_FRAGMENT_BIT) 1092 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_FRAGMENT, 1093 AC_UD_PUSH_CONSTANTS, va); 1094 1095 if (stages & VK_SHADER_STAGE_COMPUTE_BIT) 1096 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_COMPUTE, 1097 AC_UD_PUSH_CONSTANTS, va); 1098 1099 cmd_buffer->push_constant_stages &= ~stages; 1100 } 1101 1102 static void 1103 radv_cmd_buffer_flush_state(struct radv_cmd_buffer *cmd_buffer) 1104 { 1105 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline; 1106 struct radv_device *device = cmd_buffer->device; 1107 uint32_t ia_multi_vgt_param; 1108 uint32_t ls_hs_config = 0; 1109 1110 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1111 cmd_buffer->cs, 4096); 1112 1113 if ((cmd_buffer->state.vertex_descriptors_dirty || cmd_buffer->state.vb_dirty) && 1114 cmd_buffer->state.pipeline->num_vertex_attribs) { 1115 unsigned vb_offset; 1116 void *vb_ptr; 1117 uint32_t i = 0; 1118 uint32_t num_attribs = cmd_buffer->state.pipeline->num_vertex_attribs; 1119 uint64_t va; 1120 1121 /* allocate some descriptor state for vertex buffers */ 1122 radv_cmd_buffer_upload_alloc(cmd_buffer, num_attribs * 16, 256, 1123 &vb_offset, &vb_ptr); 1124 1125 for (i = 0; i < num_attribs; i++) { 1126 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4]; 1127 uint32_t offset; 1128 int vb = cmd_buffer->state.pipeline->va_binding[i]; 1129 struct radv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer; 1130 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb]; 1131 1132 device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 1133 va = device->ws->buffer_get_va(buffer->bo); 1134 1135 offset = cmd_buffer->state.vertex_bindings[vb].offset + cmd_buffer->state.pipeline->va_offset[i]; 1136 va += offset + buffer->offset; 1137 desc[0] = va; 1138 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride); 1139 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride) 1140 desc[2] = (buffer->size - offset - cmd_buffer->state.pipeline->va_format_size[i]) / stride + 1; 1141 else 1142 desc[2] = buffer->size - offset; 1143 desc[3] = cmd_buffer->state.pipeline->va_rsrc_word3[i]; 1144 } 1145 1146 va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo); 1147 va += vb_offset; 1148 1149 radv_emit_userdata_address(cmd_buffer, pipeline, MESA_SHADER_VERTEX, 1150 AC_UD_VS_VERTEX_BUFFERS, va); 1151 } 1152 1153 cmd_buffer->state.vertex_descriptors_dirty = false; 1154 cmd_buffer->state.vb_dirty = 0; 1155 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) 1156 radv_emit_graphics_pipeline(cmd_buffer, pipeline); 1157 1158 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_RENDER_TARGETS) 1159 radv_emit_framebuffer_state(cmd_buffer); 1160 1161 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT)) 1162 radv_emit_viewport(cmd_buffer); 1163 1164 if (cmd_buffer->state.dirty & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) 1165 radv_emit_scissor(cmd_buffer); 1166 1167 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) { 1168 radeon_set_context_reg(cmd_buffer->cs, R_028B54_VGT_SHADER_STAGES_EN, 0); 1169 ia_multi_vgt_param = si_get_ia_multi_vgt_param(cmd_buffer); 1170 1171 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) { 1172 radeon_set_context_reg_idx(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param); 1173 radeon_set_context_reg_idx(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config); 1174 radeon_set_uconfig_reg_idx(cmd_buffer->cs, R_030908_VGT_PRIMITIVE_TYPE, 1, cmd_buffer->state.pipeline->graphics.prim); 1175 } else { 1176 radeon_set_config_reg(cmd_buffer->cs, R_008958_VGT_PRIMITIVE_TYPE, cmd_buffer->state.pipeline->graphics.prim); 1177 radeon_set_context_reg(cmd_buffer->cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param); 1178 radeon_set_context_reg(cmd_buffer->cs, R_028B58_VGT_LS_HS_CONFIG, ls_hs_config); 1179 } 1180 radeon_set_context_reg(cmd_buffer->cs, R_028A6C_VGT_GS_OUT_PRIM_TYPE, cmd_buffer->state.pipeline->graphics.gs_out); 1181 } 1182 1183 radv_cmd_buffer_flush_dynamic_state(cmd_buffer); 1184 1185 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.pipeline, 1186 VK_SHADER_STAGE_ALL_GRAPHICS); 1187 radv_flush_constants(cmd_buffer, cmd_buffer->state.pipeline, 1188 VK_SHADER_STAGE_ALL_GRAPHICS); 1189 1190 assert(cmd_buffer->cs->cdw <= cdw_max); 1191 1192 si_emit_cache_flush(cmd_buffer); 1193 } 1194 1195 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer, 1196 VkPipelineStageFlags src_stage_mask) 1197 { 1198 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | 1199 VK_PIPELINE_STAGE_TRANSFER_BIT | 1200 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1201 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1202 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH; 1203 } 1204 1205 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | 1206 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | 1207 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | 1208 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | 1209 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | 1210 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | 1211 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | 1212 VK_PIPELINE_STAGE_TRANSFER_BIT | 1213 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | 1214 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | 1215 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) { 1216 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 1217 } else if (src_stage_mask & (VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT | 1218 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT | 1219 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | 1220 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) { 1221 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH; 1222 } 1223 } 1224 1225 static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier) 1226 { 1227 radv_stage_flush(cmd_buffer, barrier->src_stage_mask); 1228 1229 /* TODO: actual cache flushes */ 1230 } 1231 1232 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer, 1233 VkAttachmentReference att) 1234 { 1235 unsigned idx = att.attachment; 1236 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment; 1237 VkImageSubresourceRange range; 1238 range.aspectMask = 0; 1239 range.baseMipLevel = view->base_mip; 1240 range.levelCount = 1; 1241 range.baseArrayLayer = view->base_layer; 1242 range.layerCount = cmd_buffer->state.framebuffer->layers; 1243 1244 radv_handle_image_transition(cmd_buffer, 1245 view->image, 1246 cmd_buffer->state.attachments[idx].current_layout, 1247 att.layout, 0, 0, range, 1248 cmd_buffer->state.attachments[idx].pending_clear_aspects); 1249 1250 cmd_buffer->state.attachments[idx].current_layout = att.layout; 1251 1252 1253 } 1254 1255 void 1256 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer, 1257 const struct radv_subpass *subpass, bool transitions) 1258 { 1259 if (transitions) { 1260 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier); 1261 1262 for (unsigned i = 0; i < subpass->color_count; ++i) { 1263 radv_handle_subpass_image_transition(cmd_buffer, 1264 subpass->color_attachments[i]); 1265 } 1266 1267 for (unsigned i = 0; i < subpass->input_count; ++i) { 1268 radv_handle_subpass_image_transition(cmd_buffer, 1269 subpass->input_attachments[i]); 1270 } 1271 1272 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) { 1273 radv_handle_subpass_image_transition(cmd_buffer, 1274 subpass->depth_stencil_attachment); 1275 } 1276 } 1277 1278 cmd_buffer->state.subpass = subpass; 1279 1280 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_RENDER_TARGETS; 1281 } 1282 1283 static void 1284 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer, 1285 struct radv_render_pass *pass, 1286 const VkRenderPassBeginInfo *info) 1287 { 1288 struct radv_cmd_state *state = &cmd_buffer->state; 1289 1290 if (pass->attachment_count == 0) { 1291 state->attachments = NULL; 1292 return; 1293 } 1294 1295 state->attachments = vk_alloc(&cmd_buffer->pool->alloc, 1296 pass->attachment_count * 1297 sizeof(state->attachments[0]), 1298 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1299 if (state->attachments == NULL) { 1300 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */ 1301 abort(); 1302 } 1303 1304 for (uint32_t i = 0; i < pass->attachment_count; ++i) { 1305 struct radv_render_pass_attachment *att = &pass->attachments[i]; 1306 VkImageAspectFlags att_aspects = vk_format_aspects(att->format); 1307 VkImageAspectFlags clear_aspects = 0; 1308 1309 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { 1310 /* color attachment */ 1311 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1312 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT; 1313 } 1314 } else { 1315 /* depthstencil attachment */ 1316 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && 1317 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1318 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT; 1319 } 1320 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && 1321 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 1322 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; 1323 } 1324 } 1325 1326 state->attachments[i].pending_clear_aspects = clear_aspects; 1327 if (clear_aspects && info) { 1328 assert(info->clearValueCount > i); 1329 state->attachments[i].clear_value = info->pClearValues[i]; 1330 } 1331 1332 state->attachments[i].current_layout = att->initial_layout; 1333 } 1334 } 1335 1336 VkResult radv_AllocateCommandBuffers( 1337 VkDevice _device, 1338 const VkCommandBufferAllocateInfo *pAllocateInfo, 1339 VkCommandBuffer *pCommandBuffers) 1340 { 1341 RADV_FROM_HANDLE(radv_device, device, _device); 1342 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool); 1343 1344 VkResult result = VK_SUCCESS; 1345 uint32_t i; 1346 1347 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) { 1348 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level, 1349 &pCommandBuffers[i]); 1350 if (result != VK_SUCCESS) 1351 break; 1352 } 1353 1354 if (result != VK_SUCCESS) 1355 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool, 1356 i, pCommandBuffers); 1357 1358 return result; 1359 } 1360 1361 static void 1362 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer) 1363 { 1364 list_del(&cmd_buffer->pool_link); 1365 1366 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1367 &cmd_buffer->upload.list, list) { 1368 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1369 list_del(&up->list); 1370 free(up); 1371 } 1372 1373 if (cmd_buffer->upload.upload_bo) 1374 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo); 1375 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs); 1376 vk_free(&cmd_buffer->pool->alloc, cmd_buffer); 1377 } 1378 1379 void radv_FreeCommandBuffers( 1380 VkDevice device, 1381 VkCommandPool commandPool, 1382 uint32_t commandBufferCount, 1383 const VkCommandBuffer *pCommandBuffers) 1384 { 1385 for (uint32_t i = 0; i < commandBufferCount; i++) { 1386 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]); 1387 1388 if (cmd_buffer) 1389 radv_cmd_buffer_destroy(cmd_buffer); 1390 } 1391 } 1392 1393 static void radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer) 1394 { 1395 1396 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs); 1397 1398 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up, 1399 &cmd_buffer->upload.list, list) { 1400 cmd_buffer->device->ws->buffer_destroy(up->upload_bo); 1401 list_del(&up->list); 1402 free(up); 1403 } 1404 1405 if (cmd_buffer->upload.upload_bo) 1406 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, 1407 cmd_buffer->upload.upload_bo, 8); 1408 cmd_buffer->upload.offset = 0; 1409 1410 cmd_buffer->record_fail = false; 1411 } 1412 1413 VkResult radv_ResetCommandBuffer( 1414 VkCommandBuffer commandBuffer, 1415 VkCommandBufferResetFlags flags) 1416 { 1417 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1418 radv_reset_cmd_buffer(cmd_buffer); 1419 return VK_SUCCESS; 1420 } 1421 1422 VkResult radv_BeginCommandBuffer( 1423 VkCommandBuffer commandBuffer, 1424 const VkCommandBufferBeginInfo *pBeginInfo) 1425 { 1426 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1427 radv_reset_cmd_buffer(cmd_buffer); 1428 1429 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state)); 1430 1431 /* setup initial configuration into command buffer */ 1432 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) { 1433 switch (cmd_buffer->queue_family_index) { 1434 case RADV_QUEUE_GENERAL: 1435 /* Flush read caches at the beginning of CS not flushed by the kernel. */ 1436 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_INV_ICACHE | 1437 RADV_CMD_FLAG_PS_PARTIAL_FLUSH | 1438 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 1439 RADV_CMD_FLAG_INV_VMEM_L1 | 1440 RADV_CMD_FLAG_INV_SMEM_L1 | 1441 RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 1442 RADV_CMD_FLAG_INV_GLOBAL_L2; 1443 si_init_config(cmd_buffer->device->physical_device, cmd_buffer); 1444 radv_set_db_count_control(cmd_buffer); 1445 si_emit_cache_flush(cmd_buffer); 1446 break; 1447 case RADV_QUEUE_COMPUTE: 1448 cmd_buffer->state.flush_bits = RADV_CMD_FLAG_INV_ICACHE | 1449 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 1450 RADV_CMD_FLAG_INV_VMEM_L1 | 1451 RADV_CMD_FLAG_INV_SMEM_L1 | 1452 RADV_CMD_FLAG_INV_GLOBAL_L2; 1453 si_init_compute(cmd_buffer->device->physical_device, cmd_buffer); 1454 si_emit_cache_flush(cmd_buffer); 1455 break; 1456 case RADV_QUEUE_TRANSFER: 1457 default: 1458 break; 1459 } 1460 } 1461 1462 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 1463 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer); 1464 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass); 1465 1466 struct radv_subpass *subpass = 1467 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; 1468 1469 radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL); 1470 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false); 1471 } 1472 1473 return VK_SUCCESS; 1474 } 1475 1476 void radv_CmdBindVertexBuffers( 1477 VkCommandBuffer commandBuffer, 1478 uint32_t firstBinding, 1479 uint32_t bindingCount, 1480 const VkBuffer* pBuffers, 1481 const VkDeviceSize* pOffsets) 1482 { 1483 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1484 struct radv_vertex_binding *vb = cmd_buffer->state.vertex_bindings; 1485 1486 /* We have to defer setting up vertex buffer since we need the buffer 1487 * stride from the pipeline. */ 1488 1489 assert(firstBinding + bindingCount < MAX_VBS); 1490 for (uint32_t i = 0; i < bindingCount; i++) { 1491 vb[firstBinding + i].buffer = radv_buffer_from_handle(pBuffers[i]); 1492 vb[firstBinding + i].offset = pOffsets[i]; 1493 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i); 1494 } 1495 } 1496 1497 void radv_CmdBindIndexBuffer( 1498 VkCommandBuffer commandBuffer, 1499 VkBuffer buffer, 1500 VkDeviceSize offset, 1501 VkIndexType indexType) 1502 { 1503 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1504 1505 cmd_buffer->state.index_buffer = radv_buffer_from_handle(buffer); 1506 cmd_buffer->state.index_offset = offset; 1507 cmd_buffer->state.index_type = indexType; /* vk matches hw */ 1508 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER; 1509 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, cmd_buffer->state.index_buffer->bo, 8); 1510 } 1511 1512 1513 void radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer, 1514 struct radv_descriptor_set *set, 1515 unsigned idx) 1516 { 1517 struct radeon_winsys *ws = cmd_buffer->device->ws; 1518 1519 cmd_buffer->state.descriptors[idx] = set; 1520 cmd_buffer->state.descriptors_dirty |= (1 << idx); 1521 if (!set) 1522 return; 1523 1524 for (unsigned j = 0; j < set->layout->buffer_count; ++j) 1525 if (set->descriptors[j]) 1526 ws->cs_add_buffer(cmd_buffer->cs, set->descriptors[j], 7); 1527 1528 if(set->bo) 1529 ws->cs_add_buffer(cmd_buffer->cs, set->bo, 8); 1530 } 1531 1532 void radv_CmdBindDescriptorSets( 1533 VkCommandBuffer commandBuffer, 1534 VkPipelineBindPoint pipelineBindPoint, 1535 VkPipelineLayout _layout, 1536 uint32_t firstSet, 1537 uint32_t descriptorSetCount, 1538 const VkDescriptorSet* pDescriptorSets, 1539 uint32_t dynamicOffsetCount, 1540 const uint32_t* pDynamicOffsets) 1541 { 1542 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1543 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout); 1544 unsigned dyn_idx = 0; 1545 1546 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1547 cmd_buffer->cs, MAX_SETS * 4 * 6); 1548 1549 for (unsigned i = 0; i < descriptorSetCount; ++i) { 1550 unsigned idx = i + firstSet; 1551 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]); 1552 radv_bind_descriptor_set(cmd_buffer, set, idx); 1553 1554 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) { 1555 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start; 1556 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4; 1557 assert(dyn_idx < dynamicOffsetCount); 1558 1559 struct radv_descriptor_range *range = set->dynamic_descriptors + j; 1560 uint64_t va = range->va + pDynamicOffsets[dyn_idx]; 1561 dst[0] = va; 1562 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32); 1563 dst[2] = range->size; 1564 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | 1565 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) | 1566 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | 1567 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) | 1568 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) | 1569 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32); 1570 cmd_buffer->push_constant_stages |= 1571 set->layout->dynamic_shader_stages; 1572 } 1573 } 1574 1575 assert(cmd_buffer->cs->cdw <= cdw_max); 1576 } 1577 1578 void radv_CmdPushConstants(VkCommandBuffer commandBuffer, 1579 VkPipelineLayout layout, 1580 VkShaderStageFlags stageFlags, 1581 uint32_t offset, 1582 uint32_t size, 1583 const void* pValues) 1584 { 1585 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1586 memcpy(cmd_buffer->push_constants + offset, pValues, size); 1587 cmd_buffer->push_constant_stages |= stageFlags; 1588 } 1589 1590 VkResult radv_EndCommandBuffer( 1591 VkCommandBuffer commandBuffer) 1592 { 1593 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1594 1595 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) 1596 si_emit_cache_flush(cmd_buffer); 1597 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs) || 1598 cmd_buffer->record_fail) 1599 return VK_ERROR_OUT_OF_DEVICE_MEMORY; 1600 return VK_SUCCESS; 1601 } 1602 1603 static void 1604 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer) 1605 { 1606 struct radeon_winsys *ws = cmd_buffer->device->ws; 1607 struct radv_shader_variant *compute_shader; 1608 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 1609 uint64_t va; 1610 1611 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline) 1612 return; 1613 1614 cmd_buffer->state.emitted_compute_pipeline = pipeline; 1615 1616 compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 1617 va = ws->buffer_get_va(compute_shader->bo); 1618 1619 ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8); 1620 1621 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1622 cmd_buffer->cs, 16); 1623 1624 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B830_COMPUTE_PGM_LO, 2); 1625 radeon_emit(cmd_buffer->cs, va >> 8); 1626 radeon_emit(cmd_buffer->cs, va >> 40); 1627 1628 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B848_COMPUTE_PGM_RSRC1, 2); 1629 radeon_emit(cmd_buffer->cs, compute_shader->rsrc1); 1630 radeon_emit(cmd_buffer->cs, compute_shader->rsrc2); 1631 1632 /* change these once we have scratch support */ 1633 radeon_set_sh_reg(cmd_buffer->cs, R_00B860_COMPUTE_TMPRING_SIZE, 1634 S_00B860_WAVES(32) | S_00B860_WAVESIZE(0)); 1635 1636 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 1637 radeon_emit(cmd_buffer->cs, 1638 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0])); 1639 radeon_emit(cmd_buffer->cs, 1640 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1])); 1641 radeon_emit(cmd_buffer->cs, 1642 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2])); 1643 1644 assert(cmd_buffer->cs->cdw <= cdw_max); 1645 } 1646 1647 1648 void radv_CmdBindPipeline( 1649 VkCommandBuffer commandBuffer, 1650 VkPipelineBindPoint pipelineBindPoint, 1651 VkPipeline _pipeline) 1652 { 1653 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1654 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline); 1655 1656 for (unsigned i = 0; i < MAX_SETS; i++) { 1657 if (cmd_buffer->state.descriptors[i]) 1658 cmd_buffer->state.descriptors_dirty |= (1 << i); 1659 } 1660 1661 switch (pipelineBindPoint) { 1662 case VK_PIPELINE_BIND_POINT_COMPUTE: 1663 cmd_buffer->state.compute_pipeline = pipeline; 1664 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT; 1665 break; 1666 case VK_PIPELINE_BIND_POINT_GRAPHICS: 1667 cmd_buffer->state.pipeline = pipeline; 1668 cmd_buffer->state.vertex_descriptors_dirty = true; 1669 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1670 cmd_buffer->push_constant_stages |= pipeline->active_stages; 1671 1672 /* Apply the dynamic state from the pipeline */ 1673 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask; 1674 radv_dynamic_state_copy(&cmd_buffer->state.dynamic, 1675 &pipeline->dynamic_state, 1676 pipeline->dynamic_state_mask); 1677 break; 1678 default: 1679 assert(!"invalid bind point"); 1680 break; 1681 } 1682 } 1683 1684 void radv_CmdSetViewport( 1685 VkCommandBuffer commandBuffer, 1686 uint32_t firstViewport, 1687 uint32_t viewportCount, 1688 const VkViewport* pViewports) 1689 { 1690 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1691 1692 const uint32_t total_count = firstViewport + viewportCount; 1693 if (cmd_buffer->state.dynamic.viewport.count < total_count) 1694 cmd_buffer->state.dynamic.viewport.count = total_count; 1695 1696 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport, 1697 pViewports, viewportCount * sizeof(*pViewports)); 1698 1699 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT; 1700 } 1701 1702 void radv_CmdSetScissor( 1703 VkCommandBuffer commandBuffer, 1704 uint32_t firstScissor, 1705 uint32_t scissorCount, 1706 const VkRect2D* pScissors) 1707 { 1708 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1709 1710 const uint32_t total_count = firstScissor + scissorCount; 1711 if (cmd_buffer->state.dynamic.scissor.count < total_count) 1712 cmd_buffer->state.dynamic.scissor.count = total_count; 1713 1714 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor, 1715 pScissors, scissorCount * sizeof(*pScissors)); 1716 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR; 1717 } 1718 1719 void radv_CmdSetLineWidth( 1720 VkCommandBuffer commandBuffer, 1721 float lineWidth) 1722 { 1723 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1724 cmd_buffer->state.dynamic.line_width = lineWidth; 1725 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH; 1726 } 1727 1728 void radv_CmdSetDepthBias( 1729 VkCommandBuffer commandBuffer, 1730 float depthBiasConstantFactor, 1731 float depthBiasClamp, 1732 float depthBiasSlopeFactor) 1733 { 1734 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1735 1736 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor; 1737 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp; 1738 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor; 1739 1740 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS; 1741 } 1742 1743 void radv_CmdSetBlendConstants( 1744 VkCommandBuffer commandBuffer, 1745 const float blendConstants[4]) 1746 { 1747 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1748 1749 memcpy(cmd_buffer->state.dynamic.blend_constants, 1750 blendConstants, sizeof(float) * 4); 1751 1752 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS; 1753 } 1754 1755 void radv_CmdSetDepthBounds( 1756 VkCommandBuffer commandBuffer, 1757 float minDepthBounds, 1758 float maxDepthBounds) 1759 { 1760 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1761 1762 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds; 1763 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds; 1764 1765 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS; 1766 } 1767 1768 void radv_CmdSetStencilCompareMask( 1769 VkCommandBuffer commandBuffer, 1770 VkStencilFaceFlags faceMask, 1771 uint32_t compareMask) 1772 { 1773 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1774 1775 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1776 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask; 1777 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1778 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask; 1779 1780 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK; 1781 } 1782 1783 void radv_CmdSetStencilWriteMask( 1784 VkCommandBuffer commandBuffer, 1785 VkStencilFaceFlags faceMask, 1786 uint32_t writeMask) 1787 { 1788 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1789 1790 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1791 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask; 1792 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1793 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask; 1794 1795 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK; 1796 } 1797 1798 void radv_CmdSetStencilReference( 1799 VkCommandBuffer commandBuffer, 1800 VkStencilFaceFlags faceMask, 1801 uint32_t reference) 1802 { 1803 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1804 1805 if (faceMask & VK_STENCIL_FACE_FRONT_BIT) 1806 cmd_buffer->state.dynamic.stencil_reference.front = reference; 1807 if (faceMask & VK_STENCIL_FACE_BACK_BIT) 1808 cmd_buffer->state.dynamic.stencil_reference.back = reference; 1809 1810 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE; 1811 } 1812 1813 1814 void radv_CmdExecuteCommands( 1815 VkCommandBuffer commandBuffer, 1816 uint32_t commandBufferCount, 1817 const VkCommandBuffer* pCmdBuffers) 1818 { 1819 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer); 1820 1821 /* Emit pending flushes on primary prior to executing secondary */ 1822 si_emit_cache_flush(primary); 1823 1824 for (uint32_t i = 0; i < commandBufferCount; i++) { 1825 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]); 1826 1827 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs); 1828 } 1829 1830 /* if we execute secondary we need to re-emit out pipelines */ 1831 if (commandBufferCount) { 1832 primary->state.emitted_pipeline = NULL; 1833 primary->state.emitted_compute_pipeline = NULL; 1834 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE; 1835 primary->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_ALL; 1836 } 1837 } 1838 1839 VkResult radv_CreateCommandPool( 1840 VkDevice _device, 1841 const VkCommandPoolCreateInfo* pCreateInfo, 1842 const VkAllocationCallbacks* pAllocator, 1843 VkCommandPool* pCmdPool) 1844 { 1845 RADV_FROM_HANDLE(radv_device, device, _device); 1846 struct radv_cmd_pool *pool; 1847 1848 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8, 1849 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 1850 if (pool == NULL) 1851 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY); 1852 1853 if (pAllocator) 1854 pool->alloc = *pAllocator; 1855 else 1856 pool->alloc = device->alloc; 1857 1858 list_inithead(&pool->cmd_buffers); 1859 1860 pool->queue_family_index = pCreateInfo->queueFamilyIndex; 1861 1862 *pCmdPool = radv_cmd_pool_to_handle(pool); 1863 1864 return VK_SUCCESS; 1865 1866 } 1867 1868 void radv_DestroyCommandPool( 1869 VkDevice _device, 1870 VkCommandPool commandPool, 1871 const VkAllocationCallbacks* pAllocator) 1872 { 1873 RADV_FROM_HANDLE(radv_device, device, _device); 1874 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1875 1876 if (!pool) 1877 return; 1878 1879 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer, 1880 &pool->cmd_buffers, pool_link) { 1881 radv_cmd_buffer_destroy(cmd_buffer); 1882 } 1883 1884 vk_free2(&device->alloc, pAllocator, pool); 1885 } 1886 1887 VkResult radv_ResetCommandPool( 1888 VkDevice device, 1889 VkCommandPool commandPool, 1890 VkCommandPoolResetFlags flags) 1891 { 1892 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool); 1893 1894 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer, 1895 &pool->cmd_buffers, pool_link) { 1896 radv_reset_cmd_buffer(cmd_buffer); 1897 } 1898 1899 return VK_SUCCESS; 1900 } 1901 1902 void radv_CmdBeginRenderPass( 1903 VkCommandBuffer commandBuffer, 1904 const VkRenderPassBeginInfo* pRenderPassBegin, 1905 VkSubpassContents contents) 1906 { 1907 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1908 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass); 1909 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); 1910 1911 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 1912 cmd_buffer->cs, 2048); 1913 1914 cmd_buffer->state.framebuffer = framebuffer; 1915 cmd_buffer->state.pass = pass; 1916 cmd_buffer->state.render_area = pRenderPassBegin->renderArea; 1917 radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin); 1918 1919 si_emit_cache_flush(cmd_buffer); 1920 1921 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true); 1922 assert(cmd_buffer->cs->cdw <= cdw_max); 1923 1924 radv_cmd_buffer_clear_subpass(cmd_buffer); 1925 } 1926 1927 void radv_CmdNextSubpass( 1928 VkCommandBuffer commandBuffer, 1929 VkSubpassContents contents) 1930 { 1931 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1932 1933 si_emit_cache_flush(cmd_buffer); 1934 radv_cmd_buffer_resolve_subpass(cmd_buffer); 1935 1936 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 1937 2048); 1938 1939 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true); 1940 radv_cmd_buffer_clear_subpass(cmd_buffer); 1941 } 1942 1943 void radv_CmdDraw( 1944 VkCommandBuffer commandBuffer, 1945 uint32_t vertexCount, 1946 uint32_t instanceCount, 1947 uint32_t firstVertex, 1948 uint32_t firstInstance) 1949 { 1950 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1951 radv_cmd_buffer_flush_state(cmd_buffer); 1952 1953 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 9); 1954 1955 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 1956 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 1957 if (loc->sgpr_idx != -1) { 1958 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2); 1959 radeon_emit(cmd_buffer->cs, firstVertex); 1960 radeon_emit(cmd_buffer->cs, firstInstance); 1961 } 1962 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 1963 radeon_emit(cmd_buffer->cs, instanceCount); 1964 1965 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, 0)); 1966 radeon_emit(cmd_buffer->cs, vertexCount); 1967 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX | 1968 S_0287F0_USE_OPAQUE(0)); 1969 1970 assert(cmd_buffer->cs->cdw <= cdw_max); 1971 1972 radv_cmd_buffer_trace_emit(cmd_buffer); 1973 } 1974 1975 static void radv_emit_primitive_reset_index(struct radv_cmd_buffer *cmd_buffer) 1976 { 1977 uint32_t primitive_reset_index = cmd_buffer->state.index_type ? 0xffffffffu : 0xffffu; 1978 1979 if (cmd_buffer->state.pipeline->graphics.prim_restart_enable && 1980 primitive_reset_index != cmd_buffer->state.last_primitive_reset_index) { 1981 cmd_buffer->state.last_primitive_reset_index = primitive_reset_index; 1982 radeon_set_context_reg(cmd_buffer->cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX, 1983 primitive_reset_index); 1984 } 1985 } 1986 1987 void radv_CmdDrawIndexed( 1988 VkCommandBuffer commandBuffer, 1989 uint32_t indexCount, 1990 uint32_t instanceCount, 1991 uint32_t firstIndex, 1992 int32_t vertexOffset, 1993 uint32_t firstInstance) 1994 { 1995 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 1996 int index_size = cmd_buffer->state.index_type ? 4 : 2; 1997 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 1998 uint64_t index_va; 1999 2000 radv_cmd_buffer_flush_state(cmd_buffer); 2001 radv_emit_primitive_reset_index(cmd_buffer); 2002 2003 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 14); 2004 2005 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 2006 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 2007 2008 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 2009 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 2010 if (loc->sgpr_idx != -1) { 2011 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4, 2); 2012 radeon_emit(cmd_buffer->cs, vertexOffset); 2013 radeon_emit(cmd_buffer->cs, firstInstance); 2014 } 2015 radeon_emit(cmd_buffer->cs, PKT3(PKT3_NUM_INSTANCES, 0, 0)); 2016 radeon_emit(cmd_buffer->cs, instanceCount); 2017 2018 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 2019 index_va += firstIndex * index_size + cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 2020 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false)); 2021 radeon_emit(cmd_buffer->cs, index_max_size); 2022 radeon_emit(cmd_buffer->cs, index_va); 2023 radeon_emit(cmd_buffer->cs, (index_va >> 32UL) & 0xFF); 2024 radeon_emit(cmd_buffer->cs, indexCount); 2025 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA); 2026 2027 assert(cmd_buffer->cs->cdw <= cdw_max); 2028 radv_cmd_buffer_trace_emit(cmd_buffer); 2029 } 2030 2031 static void 2032 radv_emit_indirect_draw(struct radv_cmd_buffer *cmd_buffer, 2033 VkBuffer _buffer, 2034 VkDeviceSize offset, 2035 VkBuffer _count_buffer, 2036 VkDeviceSize count_offset, 2037 uint32_t draw_count, 2038 uint32_t stride, 2039 bool indexed) 2040 { 2041 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 2042 RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer); 2043 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2044 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA 2045 : V_0287F0_DI_SRC_SEL_AUTO_INDEX; 2046 uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 2047 indirect_va += offset + buffer->offset; 2048 uint64_t count_va = 0; 2049 2050 if (count_buffer) { 2051 count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo); 2052 count_va += count_offset + count_buffer->offset; 2053 } 2054 2055 if (!draw_count) 2056 return; 2057 2058 cmd_buffer->device->ws->cs_add_buffer(cs, buffer->bo, 8); 2059 2060 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX, 2061 AC_UD_VS_BASE_VERTEX_START_INSTANCE); 2062 assert(loc->sgpr_idx != -1); 2063 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0)); 2064 radeon_emit(cs, 1); 2065 radeon_emit(cs, indirect_va); 2066 radeon_emit(cs, indirect_va >> 32); 2067 2068 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI : 2069 PKT3_DRAW_INDIRECT_MULTI, 2070 8, false)); 2071 radeon_emit(cs, 0); 2072 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + loc->sgpr_idx * 4) - SI_SH_REG_OFFSET) >> 2); 2073 radeon_emit(cs, ((R_00B130_SPI_SHADER_USER_DATA_VS_0 + (loc->sgpr_idx + 1) * 4) - SI_SH_REG_OFFSET) >> 2); 2074 radeon_emit(cs, S_2C3_COUNT_INDIRECT_ENABLE(!!count_va)); /* draw_index and count_indirect enable */ 2075 radeon_emit(cs, draw_count); /* count */ 2076 radeon_emit(cs, count_va); /* count_addr */ 2077 radeon_emit(cs, count_va >> 32); 2078 radeon_emit(cs, stride); /* stride */ 2079 radeon_emit(cs, di_src_sel); 2080 radv_cmd_buffer_trace_emit(cmd_buffer); 2081 } 2082 2083 static void 2084 radv_cmd_draw_indirect_count(VkCommandBuffer commandBuffer, 2085 VkBuffer buffer, 2086 VkDeviceSize offset, 2087 VkBuffer countBuffer, 2088 VkDeviceSize countBufferOffset, 2089 uint32_t maxDrawCount, 2090 uint32_t stride) 2091 { 2092 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2093 radv_cmd_buffer_flush_state(cmd_buffer); 2094 2095 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, 2096 cmd_buffer->cs, 14); 2097 2098 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 2099 countBuffer, countBufferOffset, maxDrawCount, stride, false); 2100 2101 assert(cmd_buffer->cs->cdw <= cdw_max); 2102 } 2103 2104 static void 2105 radv_cmd_draw_indexed_indirect_count( 2106 VkCommandBuffer commandBuffer, 2107 VkBuffer buffer, 2108 VkDeviceSize offset, 2109 VkBuffer countBuffer, 2110 VkDeviceSize countBufferOffset, 2111 uint32_t maxDrawCount, 2112 uint32_t stride) 2113 { 2114 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2115 int index_size = cmd_buffer->state.index_type ? 4 : 2; 2116 uint32_t index_max_size = (cmd_buffer->state.index_buffer->size - cmd_buffer->state.index_offset) / index_size; 2117 uint64_t index_va; 2118 radv_cmd_buffer_flush_state(cmd_buffer); 2119 radv_emit_primitive_reset_index(cmd_buffer); 2120 2121 index_va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->state.index_buffer->bo); 2122 index_va += cmd_buffer->state.index_buffer->offset + cmd_buffer->state.index_offset; 2123 2124 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 21); 2125 2126 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_TYPE, 0, 0)); 2127 radeon_emit(cmd_buffer->cs, cmd_buffer->state.index_type); 2128 2129 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BASE, 1, 0)); 2130 radeon_emit(cmd_buffer->cs, index_va); 2131 radeon_emit(cmd_buffer->cs, index_va >> 32); 2132 2133 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0)); 2134 radeon_emit(cmd_buffer->cs, index_max_size); 2135 2136 radv_emit_indirect_draw(cmd_buffer, buffer, offset, 2137 countBuffer, countBufferOffset, maxDrawCount, stride, true); 2138 2139 assert(cmd_buffer->cs->cdw <= cdw_max); 2140 } 2141 2142 void radv_CmdDrawIndirect( 2143 VkCommandBuffer commandBuffer, 2144 VkBuffer buffer, 2145 VkDeviceSize offset, 2146 uint32_t drawCount, 2147 uint32_t stride) 2148 { 2149 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2150 VK_NULL_HANDLE, 0, drawCount, stride); 2151 } 2152 2153 void radv_CmdDrawIndexedIndirect( 2154 VkCommandBuffer commandBuffer, 2155 VkBuffer buffer, 2156 VkDeviceSize offset, 2157 uint32_t drawCount, 2158 uint32_t stride) 2159 { 2160 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2161 VK_NULL_HANDLE, 0, drawCount, stride); 2162 } 2163 2164 void radv_CmdDrawIndirectCountAMD( 2165 VkCommandBuffer commandBuffer, 2166 VkBuffer buffer, 2167 VkDeviceSize offset, 2168 VkBuffer countBuffer, 2169 VkDeviceSize countBufferOffset, 2170 uint32_t maxDrawCount, 2171 uint32_t stride) 2172 { 2173 radv_cmd_draw_indirect_count(commandBuffer, buffer, offset, 2174 countBuffer, countBufferOffset, 2175 maxDrawCount, stride); 2176 } 2177 2178 void radv_CmdDrawIndexedIndirectCountAMD( 2179 VkCommandBuffer commandBuffer, 2180 VkBuffer buffer, 2181 VkDeviceSize offset, 2182 VkBuffer countBuffer, 2183 VkDeviceSize countBufferOffset, 2184 uint32_t maxDrawCount, 2185 uint32_t stride) 2186 { 2187 radv_cmd_draw_indexed_indirect_count(commandBuffer, buffer, offset, 2188 countBuffer, countBufferOffset, 2189 maxDrawCount, stride); 2190 } 2191 2192 static void 2193 radv_flush_compute_state(struct radv_cmd_buffer *cmd_buffer) 2194 { 2195 radv_emit_compute_pipeline(cmd_buffer); 2196 radv_flush_descriptors(cmd_buffer, cmd_buffer->state.compute_pipeline, 2197 VK_SHADER_STAGE_COMPUTE_BIT); 2198 radv_flush_constants(cmd_buffer, cmd_buffer->state.compute_pipeline, 2199 VK_SHADER_STAGE_COMPUTE_BIT); 2200 si_emit_cache_flush(cmd_buffer); 2201 } 2202 2203 void radv_CmdDispatch( 2204 VkCommandBuffer commandBuffer, 2205 uint32_t x, 2206 uint32_t y, 2207 uint32_t z) 2208 { 2209 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2210 2211 radv_flush_compute_state(cmd_buffer); 2212 2213 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 10); 2214 2215 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2216 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2217 if (loc->sgpr_idx != -1) { 2218 assert(!loc->indirect); 2219 assert(loc->num_sgprs == 3); 2220 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3); 2221 radeon_emit(cmd_buffer->cs, x); 2222 radeon_emit(cmd_buffer->cs, y); 2223 radeon_emit(cmd_buffer->cs, z); 2224 } 2225 2226 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2227 PKT3_SHADER_TYPE_S(1)); 2228 radeon_emit(cmd_buffer->cs, x); 2229 radeon_emit(cmd_buffer->cs, y); 2230 radeon_emit(cmd_buffer->cs, z); 2231 radeon_emit(cmd_buffer->cs, 1); 2232 2233 assert(cmd_buffer->cs->cdw <= cdw_max); 2234 radv_cmd_buffer_trace_emit(cmd_buffer); 2235 } 2236 2237 void radv_CmdDispatchIndirect( 2238 VkCommandBuffer commandBuffer, 2239 VkBuffer _buffer, 2240 VkDeviceSize offset) 2241 { 2242 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2243 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer); 2244 uint64_t va = cmd_buffer->device->ws->buffer_get_va(buffer->bo); 2245 va += buffer->offset + offset; 2246 2247 cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8); 2248 2249 radv_flush_compute_state(cmd_buffer); 2250 2251 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 25); 2252 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2253 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2254 if (loc->sgpr_idx != -1) { 2255 for (unsigned i = 0; i < 3; ++i) { 2256 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0)); 2257 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) | 2258 COPY_DATA_DST_SEL(COPY_DATA_REG)); 2259 radeon_emit(cmd_buffer->cs, (va + 4 * i)); 2260 radeon_emit(cmd_buffer->cs, (va + 4 * i) >> 32); 2261 radeon_emit(cmd_buffer->cs, ((R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4) >> 2) + i); 2262 radeon_emit(cmd_buffer->cs, 0); 2263 } 2264 } 2265 2266 if (radv_cmd_buffer_uses_mec(cmd_buffer)) { 2267 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) | 2268 PKT3_SHADER_TYPE_S(1)); 2269 radeon_emit(cmd_buffer->cs, va); 2270 radeon_emit(cmd_buffer->cs, va >> 32); 2271 radeon_emit(cmd_buffer->cs, 1); 2272 } else { 2273 radeon_emit(cmd_buffer->cs, PKT3(PKT3_SET_BASE, 2, 0) | 2274 PKT3_SHADER_TYPE_S(1)); 2275 radeon_emit(cmd_buffer->cs, 1); 2276 radeon_emit(cmd_buffer->cs, va); 2277 radeon_emit(cmd_buffer->cs, va >> 32); 2278 2279 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) | 2280 PKT3_SHADER_TYPE_S(1)); 2281 radeon_emit(cmd_buffer->cs, 0); 2282 radeon_emit(cmd_buffer->cs, 1); 2283 } 2284 2285 assert(cmd_buffer->cs->cdw <= cdw_max); 2286 radv_cmd_buffer_trace_emit(cmd_buffer); 2287 } 2288 2289 void radv_unaligned_dispatch( 2290 struct radv_cmd_buffer *cmd_buffer, 2291 uint32_t x, 2292 uint32_t y, 2293 uint32_t z) 2294 { 2295 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline; 2296 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE]; 2297 uint32_t blocks[3], remainder[3]; 2298 2299 blocks[0] = round_up_u32(x, compute_shader->info.cs.block_size[0]); 2300 blocks[1] = round_up_u32(y, compute_shader->info.cs.block_size[1]); 2301 blocks[2] = round_up_u32(z, compute_shader->info.cs.block_size[2]); 2302 2303 /* If aligned, these should be an entire block size, not 0 */ 2304 remainder[0] = x + compute_shader->info.cs.block_size[0] - align_u32_npot(x, compute_shader->info.cs.block_size[0]); 2305 remainder[1] = y + compute_shader->info.cs.block_size[1] - align_u32_npot(y, compute_shader->info.cs.block_size[1]); 2306 remainder[2] = z + compute_shader->info.cs.block_size[2] - align_u32_npot(z, compute_shader->info.cs.block_size[2]); 2307 2308 radv_flush_compute_state(cmd_buffer); 2309 2310 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 15); 2311 2312 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3); 2313 radeon_emit(cmd_buffer->cs, 2314 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[0]) | 2315 S_00B81C_NUM_THREAD_PARTIAL(remainder[0])); 2316 radeon_emit(cmd_buffer->cs, 2317 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[1]) | 2318 S_00B81C_NUM_THREAD_PARTIAL(remainder[1])); 2319 radeon_emit(cmd_buffer->cs, 2320 S_00B81C_NUM_THREAD_FULL(compute_shader->info.cs.block_size[2]) | 2321 S_00B81C_NUM_THREAD_PARTIAL(remainder[2])); 2322 2323 struct ac_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.compute_pipeline, 2324 MESA_SHADER_COMPUTE, AC_UD_CS_GRID_SIZE); 2325 if (loc->sgpr_idx != -1) { 2326 radeon_set_sh_reg_seq(cmd_buffer->cs, R_00B900_COMPUTE_USER_DATA_0 + loc->sgpr_idx * 4, 3); 2327 radeon_emit(cmd_buffer->cs, blocks[0]); 2328 radeon_emit(cmd_buffer->cs, blocks[1]); 2329 radeon_emit(cmd_buffer->cs, blocks[2]); 2330 } 2331 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) | 2332 PKT3_SHADER_TYPE_S(1)); 2333 radeon_emit(cmd_buffer->cs, blocks[0]); 2334 radeon_emit(cmd_buffer->cs, blocks[1]); 2335 radeon_emit(cmd_buffer->cs, blocks[2]); 2336 radeon_emit(cmd_buffer->cs, S_00B800_COMPUTE_SHADER_EN(1) | 2337 S_00B800_PARTIAL_TG_EN(1)); 2338 2339 assert(cmd_buffer->cs->cdw <= cdw_max); 2340 radv_cmd_buffer_trace_emit(cmd_buffer); 2341 } 2342 2343 void radv_CmdEndRenderPass( 2344 VkCommandBuffer commandBuffer) 2345 { 2346 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2347 2348 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier); 2349 2350 si_emit_cache_flush(cmd_buffer); 2351 radv_cmd_buffer_resolve_subpass(cmd_buffer); 2352 2353 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) { 2354 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout; 2355 radv_handle_subpass_image_transition(cmd_buffer, 2356 (VkAttachmentReference){i, layout}); 2357 } 2358 2359 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments); 2360 2361 cmd_buffer->state.pass = NULL; 2362 cmd_buffer->state.subpass = NULL; 2363 cmd_buffer->state.attachments = NULL; 2364 cmd_buffer->state.framebuffer = NULL; 2365 } 2366 2367 2368 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer, 2369 struct radv_image *image) 2370 { 2371 2372 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | 2373 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; 2374 2375 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->htile.offset, 2376 image->htile.size, 0xffffffff); 2377 2378 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META | 2379 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2380 RADV_CMD_FLAG_INV_VMEM_L1 | 2381 RADV_CMD_FLAG_INV_GLOBAL_L2; 2382 } 2383 2384 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer, 2385 struct radv_image *image, 2386 VkImageLayout src_layout, 2387 VkImageLayout dst_layout, 2388 VkImageSubresourceRange range, 2389 VkImageAspectFlags pending_clears) 2390 { 2391 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL && 2392 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) && 2393 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 && 2394 cmd_buffer->state.render_area.extent.width == image->extent.width && 2395 cmd_buffer->state.render_area.extent.height == image->extent.height) { 2396 /* The clear will initialize htile. */ 2397 return; 2398 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED && 2399 radv_layout_has_htile(image, dst_layout)) { 2400 /* TODO: merge with the clear if applicable */ 2401 radv_initialize_htile(cmd_buffer, image); 2402 } else if (!radv_layout_has_htile(image, src_layout) && 2403 radv_layout_has_htile(image, dst_layout)) { 2404 radv_initialize_htile(cmd_buffer, image); 2405 } else if ((radv_layout_has_htile(image, src_layout) && 2406 !radv_layout_has_htile(image, dst_layout)) || 2407 (radv_layout_is_htile_compressed(image, src_layout) && 2408 !radv_layout_is_htile_compressed(image, dst_layout))) { 2409 2410 range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT; 2411 range.baseMipLevel = 0; 2412 range.levelCount = 1; 2413 2414 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | 2415 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; 2416 2417 radv_decompress_depth_image_inplace(cmd_buffer, image, &range); 2418 2419 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB | 2420 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META; 2421 } 2422 } 2423 2424 void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer, 2425 struct radv_image *image, uint32_t value) 2426 { 2427 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2428 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2429 2430 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->cmask.offset, 2431 image->cmask.size, value); 2432 2433 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2434 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2435 RADV_CMD_FLAG_INV_VMEM_L1 | 2436 RADV_CMD_FLAG_INV_GLOBAL_L2; 2437 } 2438 2439 static void radv_handle_cmask_image_transition(struct radv_cmd_buffer *cmd_buffer, 2440 struct radv_image *image, 2441 VkImageLayout src_layout, 2442 VkImageLayout dst_layout, 2443 unsigned src_queue_mask, 2444 unsigned dst_queue_mask, 2445 VkImageSubresourceRange range, 2446 VkImageAspectFlags pending_clears) 2447 { 2448 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2449 if (image->fmask.size) 2450 radv_initialise_cmask(cmd_buffer, image, 0xccccccccu); 2451 else 2452 radv_initialise_cmask(cmd_buffer, image, 0xffffffffu); 2453 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && 2454 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { 2455 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2456 } 2457 } 2458 2459 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer, 2460 struct radv_image *image, uint32_t value) 2461 { 2462 2463 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2464 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META; 2465 2466 radv_fill_buffer(cmd_buffer, image->bo, image->offset + image->dcc_offset, 2467 image->surface.dcc_size, value); 2468 2469 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2470 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META | 2471 RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2472 RADV_CMD_FLAG_INV_VMEM_L1 | 2473 RADV_CMD_FLAG_INV_GLOBAL_L2; 2474 } 2475 2476 static void radv_handle_dcc_image_transition(struct radv_cmd_buffer *cmd_buffer, 2477 struct radv_image *image, 2478 VkImageLayout src_layout, 2479 VkImageLayout dst_layout, 2480 unsigned src_queue_mask, 2481 unsigned dst_queue_mask, 2482 VkImageSubresourceRange range, 2483 VkImageAspectFlags pending_clears) 2484 { 2485 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) { 2486 radv_initialize_dcc(cmd_buffer, image, 0x20202020u); 2487 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) && 2488 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) { 2489 radv_fast_clear_flush_image_inplace(cmd_buffer, image); 2490 } 2491 } 2492 2493 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer, 2494 struct radv_image *image, 2495 VkImageLayout src_layout, 2496 VkImageLayout dst_layout, 2497 int src_family, 2498 int dst_family, 2499 VkImageSubresourceRange range, 2500 VkImageAspectFlags pending_clears) 2501 { 2502 if (image->exclusive && src_family != dst_family) { 2503 /* This is an acquire or a release operation and there will be 2504 * a corresponding release/acquire. Do the transition in the 2505 * most flexible queue. */ 2506 2507 assert(src_family == cmd_buffer->queue_family_index || 2508 dst_family == cmd_buffer->queue_family_index); 2509 2510 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER) 2511 return; 2512 2513 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE && 2514 (src_family == RADV_QUEUE_GENERAL || 2515 dst_family == RADV_QUEUE_GENERAL)) 2516 return; 2517 } 2518 2519 unsigned src_queue_mask = radv_image_queue_family_mask(image, src_family); 2520 unsigned dst_queue_mask = radv_image_queue_family_mask(image, dst_family); 2521 2522 if (image->htile.size) 2523 radv_handle_depth_image_transition(cmd_buffer, image, src_layout, 2524 dst_layout, range, pending_clears); 2525 2526 if (image->cmask.size) 2527 radv_handle_cmask_image_transition(cmd_buffer, image, src_layout, 2528 dst_layout, src_queue_mask, 2529 dst_queue_mask, range, 2530 pending_clears); 2531 2532 if (image->surface.dcc_size) 2533 radv_handle_dcc_image_transition(cmd_buffer, image, src_layout, 2534 dst_layout, src_queue_mask, 2535 dst_queue_mask, range, 2536 pending_clears); 2537 } 2538 2539 void radv_CmdPipelineBarrier( 2540 VkCommandBuffer commandBuffer, 2541 VkPipelineStageFlags srcStageMask, 2542 VkPipelineStageFlags destStageMask, 2543 VkBool32 byRegion, 2544 uint32_t memoryBarrierCount, 2545 const VkMemoryBarrier* pMemoryBarriers, 2546 uint32_t bufferMemoryBarrierCount, 2547 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2548 uint32_t imageMemoryBarrierCount, 2549 const VkImageMemoryBarrier* pImageMemoryBarriers) 2550 { 2551 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2552 VkAccessFlags src_flags = 0; 2553 VkAccessFlags dst_flags = 0; 2554 uint32_t b; 2555 for (uint32_t i = 0; i < memoryBarrierCount; i++) { 2556 src_flags |= pMemoryBarriers[i].srcAccessMask; 2557 dst_flags |= pMemoryBarriers[i].dstAccessMask; 2558 } 2559 2560 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) { 2561 src_flags |= pBufferMemoryBarriers[i].srcAccessMask; 2562 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask; 2563 } 2564 2565 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2566 src_flags |= pImageMemoryBarriers[i].srcAccessMask; 2567 dst_flags |= pImageMemoryBarriers[i].dstAccessMask; 2568 } 2569 2570 enum radv_cmd_flush_bits flush_bits = 0; 2571 for_each_bit(b, src_flags) { 2572 switch ((VkAccessFlagBits)(1 << b)) { 2573 case VK_ACCESS_SHADER_WRITE_BIT: 2574 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2575 break; 2576 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT: 2577 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB; 2578 break; 2579 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT: 2580 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB; 2581 break; 2582 case VK_ACCESS_TRANSFER_WRITE_BIT: 2583 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB | 2584 RADV_CMD_FLAG_INV_GLOBAL_L2; 2585 break; 2586 default: 2587 break; 2588 } 2589 } 2590 cmd_buffer->state.flush_bits |= flush_bits; 2591 2592 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2593 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2594 radv_handle_image_transition(cmd_buffer, image, 2595 pImageMemoryBarriers[i].oldLayout, 2596 pImageMemoryBarriers[i].newLayout, 2597 pImageMemoryBarriers[i].srcQueueFamilyIndex, 2598 pImageMemoryBarriers[i].dstQueueFamilyIndex, 2599 pImageMemoryBarriers[i].subresourceRange, 2600 0); 2601 } 2602 2603 flush_bits = 0; 2604 2605 for_each_bit(b, dst_flags) { 2606 switch ((VkAccessFlagBits)(1 << b)) { 2607 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT: 2608 case VK_ACCESS_INDEX_READ_BIT: 2609 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT: 2610 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1; 2611 break; 2612 case VK_ACCESS_UNIFORM_READ_BIT: 2613 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1; 2614 break; 2615 case VK_ACCESS_SHADER_READ_BIT: 2616 flush_bits |= RADV_CMD_FLAG_INV_GLOBAL_L2; 2617 break; 2618 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT: 2619 case VK_ACCESS_TRANSFER_READ_BIT: 2620 case VK_ACCESS_TRANSFER_WRITE_BIT: 2621 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT: 2622 flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | RADV_CMD_FLAG_INV_GLOBAL_L2; 2623 default: 2624 break; 2625 } 2626 } 2627 2628 flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | 2629 RADV_CMD_FLAG_PS_PARTIAL_FLUSH; 2630 2631 cmd_buffer->state.flush_bits |= flush_bits; 2632 } 2633 2634 2635 static void write_event(struct radv_cmd_buffer *cmd_buffer, 2636 struct radv_event *event, 2637 VkPipelineStageFlags stageMask, 2638 unsigned value) 2639 { 2640 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2641 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2642 2643 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2644 2645 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 12); 2646 2647 /* TODO: this is overkill. Probably should figure something out from 2648 * the stage mask. */ 2649 2650 if (cmd_buffer->device->physical_device->rad_info.chip_class == CIK) { 2651 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2652 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2653 EVENT_INDEX(5)); 2654 radeon_emit(cs, va); 2655 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2656 radeon_emit(cs, 2); 2657 radeon_emit(cs, 0); 2658 } 2659 2660 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE_EOP, 4, 0)); 2661 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_BOTTOM_OF_PIPE_TS) | 2662 EVENT_INDEX(5)); 2663 radeon_emit(cs, va); 2664 radeon_emit(cs, (va >> 32) | EOP_DATA_SEL(1)); 2665 radeon_emit(cs, value); 2666 radeon_emit(cs, 0); 2667 2668 assert(cmd_buffer->cs->cdw <= cdw_max); 2669 } 2670 2671 void radv_CmdSetEvent(VkCommandBuffer commandBuffer, 2672 VkEvent _event, 2673 VkPipelineStageFlags stageMask) 2674 { 2675 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2676 RADV_FROM_HANDLE(radv_event, event, _event); 2677 2678 write_event(cmd_buffer, event, stageMask, 1); 2679 } 2680 2681 void radv_CmdResetEvent(VkCommandBuffer commandBuffer, 2682 VkEvent _event, 2683 VkPipelineStageFlags stageMask) 2684 { 2685 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2686 RADV_FROM_HANDLE(radv_event, event, _event); 2687 2688 write_event(cmd_buffer, event, stageMask, 0); 2689 } 2690 2691 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer, 2692 uint32_t eventCount, 2693 const VkEvent* pEvents, 2694 VkPipelineStageFlags srcStageMask, 2695 VkPipelineStageFlags dstStageMask, 2696 uint32_t memoryBarrierCount, 2697 const VkMemoryBarrier* pMemoryBarriers, 2698 uint32_t bufferMemoryBarrierCount, 2699 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 2700 uint32_t imageMemoryBarrierCount, 2701 const VkImageMemoryBarrier* pImageMemoryBarriers) 2702 { 2703 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer); 2704 struct radeon_winsys_cs *cs = cmd_buffer->cs; 2705 2706 for (unsigned i = 0; i < eventCount; ++i) { 2707 RADV_FROM_HANDLE(radv_event, event, pEvents[i]); 2708 uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo); 2709 2710 cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8); 2711 2712 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7); 2713 2714 radeon_emit(cs, PKT3(PKT3_WAIT_REG_MEM, 5, 0)); 2715 radeon_emit(cs, WAIT_REG_MEM_EQUAL | WAIT_REG_MEM_MEM_SPACE(1)); 2716 radeon_emit(cs, va); 2717 radeon_emit(cs, va >> 32); 2718 radeon_emit(cs, 1); /* reference value */ 2719 radeon_emit(cs, 0xffffffff); /* mask */ 2720 radeon_emit(cs, 4); /* poll interval */ 2721 2722 assert(cmd_buffer->cs->cdw <= cdw_max); 2723 } 2724 2725 2726 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 2727 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image); 2728 2729 radv_handle_image_transition(cmd_buffer, image, 2730 pImageMemoryBarriers[i].oldLayout, 2731 pImageMemoryBarriers[i].newLayout, 2732 pImageMemoryBarriers[i].srcQueueFamilyIndex, 2733 pImageMemoryBarriers[i].dstQueueFamilyIndex, 2734 pImageMemoryBarriers[i].subresourceRange, 2735 0); 2736 } 2737 2738 /* TODO: figure out how to do memory barriers without waiting */ 2739 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER | 2740 RADV_CMD_FLAG_INV_GLOBAL_L2 | 2741 RADV_CMD_FLAG_INV_VMEM_L1 | 2742 RADV_CMD_FLAG_INV_SMEM_L1; 2743 } 2744