1 /* 2 * Copyright 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 */ 23 24 #include <assert.h> 25 #include <stdbool.h> 26 27 #include "anv_private.h" 28 #include "vk_format_info.h" 29 #include "vk_util.h" 30 31 #include "common/gen_l3_config.h" 32 #include "genxml/gen_macros.h" 33 #include "genxml/genX_pack.h" 34 35 static void 36 emit_lrm(struct anv_batch *batch, 37 uint32_t reg, struct anv_bo *bo, uint32_t offset) 38 { 39 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) { 40 lrm.RegisterAddress = reg; 41 lrm.MemoryAddress = (struct anv_address) { bo, offset }; 42 } 43 } 44 45 static void 46 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm) 47 { 48 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) { 49 lri.RegisterOffset = reg; 50 lri.DataDWord = imm; 51 } 52 } 53 54 #if GEN_IS_HASWELL || GEN_GEN >= 8 55 static void 56 emit_lrr(struct anv_batch *batch, uint32_t dst, uint32_t src) 57 { 58 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_REG), lrr) { 59 lrr.SourceRegisterAddress = src; 60 lrr.DestinationRegisterAddress = dst; 61 } 62 } 63 #endif 64 65 void 66 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer) 67 { 68 struct anv_device *device = cmd_buffer->device; 69 70 /* Emit a render target cache flush. 71 * 72 * This isn't documented anywhere in the PRM. However, it seems to be 73 * necessary prior to changing the surface state base adress. Without 74 * this, we get GPU hangs when using multi-level command buffers which 75 * clear depth, reset state base address, and then go render stuff. 76 */ 77 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 78 pc.DCFlushEnable = true; 79 pc.RenderTargetCacheFlushEnable = true; 80 pc.CommandStreamerStallEnable = true; 81 } 82 83 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) { 84 sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 }; 85 sba.GeneralStateMemoryObjectControlState = GENX(MOCS); 86 sba.GeneralStateBaseAddressModifyEnable = true; 87 88 sba.SurfaceStateBaseAddress = 89 anv_cmd_buffer_surface_base_address(cmd_buffer); 90 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS); 91 sba.SurfaceStateBaseAddressModifyEnable = true; 92 93 sba.DynamicStateBaseAddress = 94 (struct anv_address) { &device->dynamic_state_pool.block_pool.bo, 0 }; 95 sba.DynamicStateMemoryObjectControlState = GENX(MOCS); 96 sba.DynamicStateBaseAddressModifyEnable = true; 97 98 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 }; 99 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS); 100 sba.IndirectObjectBaseAddressModifyEnable = true; 101 102 sba.InstructionBaseAddress = 103 (struct anv_address) { &device->instruction_state_pool.block_pool.bo, 0 }; 104 sba.InstructionMemoryObjectControlState = GENX(MOCS); 105 sba.InstructionBaseAddressModifyEnable = true; 106 107 # if (GEN_GEN >= 8) 108 /* Broadwell requires that we specify a buffer size for a bunch of 109 * these fields. However, since we will be growing the BO's live, we 110 * just set them all to the maximum. 111 */ 112 sba.GeneralStateBufferSize = 0xfffff; 113 sba.GeneralStateBufferSizeModifyEnable = true; 114 sba.DynamicStateBufferSize = 0xfffff; 115 sba.DynamicStateBufferSizeModifyEnable = true; 116 sba.IndirectObjectBufferSize = 0xfffff; 117 sba.IndirectObjectBufferSizeModifyEnable = true; 118 sba.InstructionBufferSize = 0xfffff; 119 sba.InstructionBuffersizeModifyEnable = true; 120 # endif 121 } 122 123 /* After re-setting the surface state base address, we have to do some 124 * cache flusing so that the sampler engine will pick up the new 125 * SURFACE_STATE objects and binding tables. From the Broadwell PRM, 126 * Shared Function > 3D Sampler > State > State Caching (page 96): 127 * 128 * Coherency with system memory in the state cache, like the texture 129 * cache is handled partially by software. It is expected that the 130 * command stream or shader will issue Cache Flush operation or 131 * Cache_Flush sampler message to ensure that the L1 cache remains 132 * coherent with system memory. 133 * 134 * [...] 135 * 136 * Whenever the value of the Dynamic_State_Base_Addr, 137 * Surface_State_Base_Addr are altered, the L1 state cache must be 138 * invalidated to ensure the new surface or sampler state is fetched 139 * from system memory. 140 * 141 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit 142 * which, according the PIPE_CONTROL instruction documentation in the 143 * Broadwell PRM: 144 * 145 * Setting this bit is independent of any other bit in this packet. 146 * This bit controls the invalidation of the L1 and L2 state caches 147 * at the top of the pipe i.e. at the parsing time. 148 * 149 * Unfortunately, experimentation seems to indicate that state cache 150 * invalidation through a PIPE_CONTROL does nothing whatsoever in 151 * regards to surface state and binding tables. In stead, it seems that 152 * invalidating the texture cache is what is actually needed. 153 * 154 * XXX: As far as we have been able to determine through 155 * experimentation, shows that flush the texture cache appears to be 156 * sufficient. The theory here is that all of the sampling/rendering 157 * units cache the binding table in the texture cache. However, we have 158 * yet to be able to actually confirm this. 159 */ 160 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 161 pc.TextureCacheInvalidationEnable = true; 162 pc.ConstantCacheInvalidationEnable = true; 163 pc.StateCacheInvalidationEnable = true; 164 } 165 } 166 167 static void 168 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer, 169 struct anv_state state, 170 struct anv_bo *bo, uint32_t offset) 171 { 172 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; 173 174 VkResult result = 175 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc, 176 state.offset + isl_dev->ss.addr_offset, bo, offset); 177 if (result != VK_SUCCESS) 178 anv_batch_set_error(&cmd_buffer->batch, result); 179 } 180 181 static void 182 add_image_view_relocs(struct anv_cmd_buffer *cmd_buffer, 183 const struct anv_image_view *image_view, 184 const uint32_t plane, 185 struct anv_surface_state state) 186 { 187 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; 188 const struct anv_image *image = image_view->image; 189 uint32_t image_plane = image_view->planes[plane].image_plane; 190 191 add_surface_state_reloc(cmd_buffer, state.state, 192 image->planes[image_plane].bo, state.address); 193 194 if (state.aux_address) { 195 VkResult result = 196 anv_reloc_list_add(&cmd_buffer->surface_relocs, 197 &cmd_buffer->pool->alloc, 198 state.state.offset + isl_dev->ss.aux_addr_offset, 199 image->planes[image_plane].bo, state.aux_address); 200 if (result != VK_SUCCESS) 201 anv_batch_set_error(&cmd_buffer->batch, result); 202 } 203 } 204 205 static bool 206 color_is_zero_one(VkClearColorValue value, enum isl_format format) 207 { 208 if (isl_format_has_int_channel(format)) { 209 for (unsigned i = 0; i < 4; i++) { 210 if (value.int32[i] != 0 && value.int32[i] != 1) 211 return false; 212 } 213 } else { 214 for (unsigned i = 0; i < 4; i++) { 215 if (value.float32[i] != 0.0f && value.float32[i] != 1.0f) 216 return false; 217 } 218 } 219 220 return true; 221 } 222 223 static void 224 color_attachment_compute_aux_usage(struct anv_device * device, 225 struct anv_cmd_state * cmd_state, 226 uint32_t att, VkRect2D render_area, 227 union isl_color_value *fast_clear_color) 228 { 229 struct anv_attachment_state *att_state = &cmd_state->attachments[att]; 230 struct anv_image_view *iview = cmd_state->framebuffer->attachments[att]; 231 232 assert(iview->n_planes == 1); 233 234 if (iview->planes[0].isl.base_array_layer >= 235 anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT, 236 iview->planes[0].isl.base_level)) { 237 /* There is no aux buffer which corresponds to the level and layer(s) 238 * being accessed. 239 */ 240 att_state->aux_usage = ISL_AUX_USAGE_NONE; 241 att_state->input_aux_usage = ISL_AUX_USAGE_NONE; 242 att_state->fast_clear = false; 243 return; 244 } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_MCS) { 245 att_state->aux_usage = ISL_AUX_USAGE_MCS; 246 att_state->input_aux_usage = ISL_AUX_USAGE_MCS; 247 att_state->fast_clear = false; 248 return; 249 } else if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E) { 250 att_state->aux_usage = ISL_AUX_USAGE_CCS_E; 251 att_state->input_aux_usage = ISL_AUX_USAGE_CCS_E; 252 } else { 253 att_state->aux_usage = ISL_AUX_USAGE_CCS_D; 254 /* From the Sky Lake PRM, RENDER_SURFACE_STATE::AuxiliarySurfaceMode: 255 * 256 * "If Number of Multisamples is MULTISAMPLECOUNT_1, AUX_CCS_D 257 * setting is only allowed if Surface Format supported for Fast 258 * Clear. In addition, if the surface is bound to the sampling 259 * engine, Surface Format must be supported for Render Target 260 * Compression for surfaces bound to the sampling engine." 261 * 262 * In other words, we can only sample from a fast-cleared image if it 263 * also supports color compression. 264 */ 265 if (isl_format_supports_ccs_e(&device->info, iview->planes[0].isl.format)) { 266 att_state->input_aux_usage = ISL_AUX_USAGE_CCS_D; 267 268 /* While fast-clear resolves and partial resolves are fairly cheap in the 269 * case where you render to most of the pixels, full resolves are not 270 * because they potentially involve reading and writing the entire 271 * framebuffer. If we can't texture with CCS_E, we should leave it off and 272 * limit ourselves to fast clears. 273 */ 274 if (cmd_state->pass->attachments[att].first_subpass_layout == 275 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 276 anv_perf_warn(device->instance, iview->image, 277 "Not temporarily enabling CCS_E."); 278 } 279 } else { 280 att_state->input_aux_usage = ISL_AUX_USAGE_NONE; 281 } 282 } 283 284 assert(iview->image->planes[0].aux_surface.isl.usage & ISL_SURF_USAGE_CCS_BIT); 285 286 att_state->clear_color_is_zero_one = 287 color_is_zero_one(att_state->clear_value.color, iview->planes[0].isl.format); 288 att_state->clear_color_is_zero = 289 att_state->clear_value.color.uint32[0] == 0 && 290 att_state->clear_value.color.uint32[1] == 0 && 291 att_state->clear_value.color.uint32[2] == 0 && 292 att_state->clear_value.color.uint32[3] == 0; 293 294 if (att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT) { 295 /* Start off assuming fast clears are possible */ 296 att_state->fast_clear = true; 297 298 /* Potentially, we could do partial fast-clears but doing so has crazy 299 * alignment restrictions. It's easier to just restrict to full size 300 * fast clears for now. 301 */ 302 if (render_area.offset.x != 0 || 303 render_area.offset.y != 0 || 304 render_area.extent.width != iview->extent.width || 305 render_area.extent.height != iview->extent.height) 306 att_state->fast_clear = false; 307 308 /* On Broadwell and earlier, we can only handle 0/1 clear colors */ 309 if (GEN_GEN <= 8 && !att_state->clear_color_is_zero_one) 310 att_state->fast_clear = false; 311 312 /* We allow fast clears when all aux layers of the miplevel are targeted. 313 * See add_fast_clear_state_buffer() for more information. Also, because 314 * we only either do a fast clear or a normal clear and not both, this 315 * complies with the gen7 restriction of not fast-clearing multiple 316 * layers. 317 */ 318 if (cmd_state->framebuffer->layers != 319 anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT, 320 iview->planes[0].isl.base_level)) { 321 att_state->fast_clear = false; 322 if (GEN_GEN == 7) { 323 anv_perf_warn(device->instance, iview->image, 324 "Not fast-clearing the first layer in " 325 "a multi-layer fast clear."); 326 } 327 } 328 329 /* We only allow fast clears in the GENERAL layout if the auxiliary 330 * buffer is always enabled and the fast-clear value is all 0's. See 331 * add_fast_clear_state_buffer() for more information. 332 */ 333 if (cmd_state->pass->attachments[att].first_subpass_layout == 334 VK_IMAGE_LAYOUT_GENERAL && 335 (!att_state->clear_color_is_zero || 336 iview->image->planes[0].aux_usage == ISL_AUX_USAGE_NONE)) { 337 att_state->fast_clear = false; 338 } 339 340 if (att_state->fast_clear) { 341 memcpy(fast_clear_color->u32, att_state->clear_value.color.uint32, 342 sizeof(fast_clear_color->u32)); 343 } 344 } else { 345 att_state->fast_clear = false; 346 } 347 } 348 349 static bool 350 need_input_attachment_state(const struct anv_render_pass_attachment *att) 351 { 352 if (!(att->usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) 353 return false; 354 355 /* We only allocate input attachment states for color surfaces. Compression 356 * is not yet enabled for depth textures and stencil doesn't allow 357 * compression so we can just use the texture surface state from the view. 358 */ 359 return vk_format_is_color(att->format); 360 } 361 362 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless 363 * the initial layout is undefined, the HiZ buffer and depth buffer will 364 * represent the same data at the end of this operation. 365 */ 366 static void 367 transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer, 368 const struct anv_image *image, 369 VkImageLayout initial_layout, 370 VkImageLayout final_layout) 371 { 372 assert(image); 373 374 /* A transition is a no-op if HiZ is not enabled, or if the initial and 375 * final layouts are equal. 376 * 377 * The undefined layout indicates that the user doesn't care about the data 378 * that's currently in the buffer. Therefore, a data-preserving resolve 379 * operation is not needed. 380 */ 381 if (image->planes[0].aux_usage != ISL_AUX_USAGE_HIZ || initial_layout == final_layout) 382 return; 383 384 const bool hiz_enabled = ISL_AUX_USAGE_HIZ == 385 anv_layout_to_aux_usage(&cmd_buffer->device->info, image, 386 VK_IMAGE_ASPECT_DEPTH_BIT, initial_layout); 387 const bool enable_hiz = ISL_AUX_USAGE_HIZ == 388 anv_layout_to_aux_usage(&cmd_buffer->device->info, image, 389 VK_IMAGE_ASPECT_DEPTH_BIT, final_layout); 390 391 enum blorp_hiz_op hiz_op; 392 if (hiz_enabled && !enable_hiz) { 393 hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE; 394 } else if (!hiz_enabled && enable_hiz) { 395 hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE; 396 } else { 397 assert(hiz_enabled == enable_hiz); 398 /* If the same buffer will be used, no resolves are necessary. */ 399 hiz_op = BLORP_HIZ_OP_NONE; 400 } 401 402 if (hiz_op != BLORP_HIZ_OP_NONE) 403 anv_gen8_hiz_op_resolve(cmd_buffer, image, hiz_op); 404 } 405 406 #define MI_PREDICATE_SRC0 0x2400 407 #define MI_PREDICATE_SRC1 0x2408 408 409 /* Manages the state of an color image subresource to ensure resolves are 410 * performed properly. 411 */ 412 static void 413 genX(set_image_needs_resolve)(struct anv_cmd_buffer *cmd_buffer, 414 const struct anv_image *image, 415 VkImageAspectFlagBits aspect, 416 unsigned level, bool needs_resolve) 417 { 418 assert(cmd_buffer && image); 419 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV); 420 assert(level < anv_image_aux_levels(image, aspect)); 421 422 /* The HW docs say that there is no way to guarantee the completion of 423 * the following command. We use it nevertheless because it shows no 424 * issues in testing is currently being used in the GL driver. 425 */ 426 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) { 427 sdi.Address = anv_image_get_needs_resolve_addr(cmd_buffer->device, 428 image, aspect, level); 429 sdi.ImmediateData = needs_resolve; 430 } 431 } 432 433 static void 434 genX(load_needs_resolve_predicate)(struct anv_cmd_buffer *cmd_buffer, 435 const struct anv_image *image, 436 VkImageAspectFlagBits aspect, 437 unsigned level) 438 { 439 assert(cmd_buffer && image); 440 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV); 441 assert(level < anv_image_aux_levels(image, aspect)); 442 443 const struct anv_address resolve_flag_addr = 444 anv_image_get_needs_resolve_addr(cmd_buffer->device, 445 image, aspect, level); 446 447 /* Make the pending predicated resolve a no-op if one is not needed. 448 * predicate = do_resolve = resolve_flag != 0; 449 */ 450 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 , 0); 451 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC1 + 4, 0); 452 emit_lri(&cmd_buffer->batch, MI_PREDICATE_SRC0 , 0); 453 emit_lrm(&cmd_buffer->batch, MI_PREDICATE_SRC0 + 4, 454 resolve_flag_addr.bo, resolve_flag_addr.offset); 455 anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) { 456 mip.LoadOperation = LOAD_LOADINV; 457 mip.CombineOperation = COMBINE_SET; 458 mip.CompareOperation = COMPARE_SRCS_EQUAL; 459 } 460 } 461 462 static void 463 init_fast_clear_state_entry(struct anv_cmd_buffer *cmd_buffer, 464 const struct anv_image *image, 465 VkImageAspectFlagBits aspect, 466 unsigned level) 467 { 468 assert(cmd_buffer && image); 469 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV); 470 assert(level < anv_image_aux_levels(image, aspect)); 471 472 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect); 473 enum isl_aux_usage aux_usage = image->planes[plane].aux_usage; 474 475 /* The resolve flag should updated to signify that fast-clear/compression 476 * data needs to be removed when leaving the undefined layout. Such data 477 * may need to be removed if it would cause accesses to the color buffer 478 * to return incorrect data. The fast clear data in CCS_D buffers should 479 * be removed because CCS_D isn't enabled all the time. 480 */ 481 genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level, 482 aux_usage == ISL_AUX_USAGE_NONE); 483 484 /* The fast clear value dword(s) will be copied into a surface state object. 485 * Ensure that the restrictions of the fields in the dword(s) are followed. 486 * 487 * CCS buffers on SKL+ can have any value set for the clear colors. 488 */ 489 if (image->samples == 1 && GEN_GEN >= 9) 490 return; 491 492 /* Other combinations of auxiliary buffers and platforms require specific 493 * values in the clear value dword(s). 494 */ 495 struct anv_address addr = 496 anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect, level); 497 unsigned i = 0; 498 for (; i < cmd_buffer->device->isl_dev.ss.clear_value_size; i += 4) { 499 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) { 500 sdi.Address = addr; 501 502 if (GEN_GEN >= 9) { 503 /* MCS buffers on SKL+ can only have 1/0 clear colors. */ 504 assert(aux_usage == ISL_AUX_USAGE_MCS); 505 sdi.ImmediateData = 0; 506 } else if (GEN_VERSIONx10 >= 75) { 507 /* Pre-SKL, the dword containing the clear values also contains 508 * other fields, so we need to initialize those fields to match the 509 * values that would be in a color attachment. 510 */ 511 assert(i == 0); 512 sdi.ImmediateData = ISL_CHANNEL_SELECT_RED << 25 | 513 ISL_CHANNEL_SELECT_GREEN << 22 | 514 ISL_CHANNEL_SELECT_BLUE << 19 | 515 ISL_CHANNEL_SELECT_ALPHA << 16; 516 } else if (GEN_VERSIONx10 == 70) { 517 /* On IVB, the dword containing the clear values also contains 518 * other fields that must be zero or can be zero. 519 */ 520 assert(i == 0); 521 sdi.ImmediateData = 0; 522 } 523 } 524 525 addr.offset += 4; 526 } 527 } 528 529 /* Copy the fast-clear value dword(s) between a surface state object and an 530 * image's fast clear state buffer. 531 */ 532 static void 533 genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer, 534 struct anv_state surface_state, 535 const struct anv_image *image, 536 VkImageAspectFlagBits aspect, 537 unsigned level, 538 bool copy_from_surface_state) 539 { 540 assert(cmd_buffer && image); 541 assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV); 542 assert(level < anv_image_aux_levels(image, aspect)); 543 544 struct anv_bo *ss_bo = 545 &cmd_buffer->device->surface_state_pool.block_pool.bo; 546 uint32_t ss_clear_offset = surface_state.offset + 547 cmd_buffer->device->isl_dev.ss.clear_value_offset; 548 const struct anv_address entry_addr = 549 anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect, level); 550 unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size; 551 552 if (copy_from_surface_state) { 553 genX(cmd_buffer_mi_memcpy)(cmd_buffer, entry_addr.bo, entry_addr.offset, 554 ss_bo, ss_clear_offset, copy_size); 555 } else { 556 genX(cmd_buffer_mi_memcpy)(cmd_buffer, ss_bo, ss_clear_offset, 557 entry_addr.bo, entry_addr.offset, copy_size); 558 559 /* Updating a surface state object may require that the state cache be 560 * invalidated. From the SKL PRM, Shared Functions -> State -> State 561 * Caching: 562 * 563 * Whenever the RENDER_SURFACE_STATE object in memory pointed to by 564 * the Binding Table Pointer (BTP) and Binding Table Index (BTI) is 565 * modified [...], the L1 state cache must be invalidated to ensure 566 * the new surface or sampler state is fetched from system memory. 567 * 568 * In testing, SKL doesn't actually seem to need this, but HSW does. 569 */ 570 cmd_buffer->state.pending_pipe_bits |= 571 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT; 572 } 573 } 574 575 /** 576 * @brief Transitions a color buffer from one layout to another. 577 * 578 * See section 6.1.1. Image Layout Transitions of the Vulkan 1.0.50 spec for 579 * more information. 580 * 581 * @param level_count VK_REMAINING_MIP_LEVELS isn't supported. 582 * @param layer_count VK_REMAINING_ARRAY_LAYERS isn't supported. For 3D images, 583 * this represents the maximum layers to transition at each 584 * specified miplevel. 585 */ 586 static void 587 transition_color_buffer(struct anv_cmd_buffer *cmd_buffer, 588 const struct anv_image *image, 589 VkImageAspectFlagBits aspect, 590 const uint32_t base_level, uint32_t level_count, 591 uint32_t base_layer, uint32_t layer_count, 592 VkImageLayout initial_layout, 593 VkImageLayout final_layout) 594 { 595 /* Validate the inputs. */ 596 assert(cmd_buffer); 597 assert(image && image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV); 598 /* These values aren't supported for simplicity's sake. */ 599 assert(level_count != VK_REMAINING_MIP_LEVELS && 600 layer_count != VK_REMAINING_ARRAY_LAYERS); 601 /* Ensure the subresource range is valid. */ 602 uint64_t last_level_num = base_level + level_count; 603 const uint32_t max_depth = anv_minify(image->extent.depth, base_level); 604 UNUSED const uint32_t image_layers = MAX2(image->array_size, max_depth); 605 assert((uint64_t)base_layer + layer_count <= image_layers); 606 assert(last_level_num <= image->levels); 607 /* The spec disallows these final layouts. */ 608 assert(final_layout != VK_IMAGE_LAYOUT_UNDEFINED && 609 final_layout != VK_IMAGE_LAYOUT_PREINITIALIZED); 610 611 /* No work is necessary if the layout stays the same or if this subresource 612 * range lacks auxiliary data. 613 */ 614 if (initial_layout == final_layout) 615 return; 616 617 uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect); 618 619 if (image->planes[plane].shadow_surface.isl.size > 0 && 620 final_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) { 621 /* This surface is a linear compressed image with a tiled shadow surface 622 * for texturing. The client is about to use it in READ_ONLY_OPTIMAL so 623 * we need to ensure the shadow copy is up-to-date. 624 */ 625 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); 626 assert(image->planes[plane].surface.isl.tiling == ISL_TILING_LINEAR); 627 assert(image->planes[plane].shadow_surface.isl.tiling != ISL_TILING_LINEAR); 628 assert(isl_format_is_compressed(image->planes[plane].surface.isl.format)); 629 assert(plane == 0); 630 anv_image_copy_to_shadow(cmd_buffer, image, 631 base_level, level_count, 632 base_layer, layer_count); 633 } 634 635 if (base_layer >= anv_image_aux_layers(image, aspect, base_level)) 636 return; 637 638 /* A transition of a 3D subresource works on all slices at a time. */ 639 if (image->type == VK_IMAGE_TYPE_3D) { 640 base_layer = 0; 641 layer_count = anv_minify(image->extent.depth, base_level); 642 } 643 644 /* We're interested in the subresource range subset that has aux data. */ 645 level_count = MIN2(level_count, anv_image_aux_levels(image, aspect) - base_level); 646 layer_count = MIN2(layer_count, 647 anv_image_aux_layers(image, aspect, base_level) - base_layer); 648 last_level_num = base_level + level_count; 649 650 /* Record whether or not the layout is undefined. Pre-initialized images 651 * with auxiliary buffers have a non-linear layout and are thus undefined. 652 */ 653 assert(image->tiling == VK_IMAGE_TILING_OPTIMAL); 654 const bool undef_layout = initial_layout == VK_IMAGE_LAYOUT_UNDEFINED || 655 initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED; 656 657 /* Do preparatory work before the resolve operation or return early if no 658 * resolve is actually needed. 659 */ 660 if (undef_layout) { 661 /* A subresource in the undefined layout may have been aliased and 662 * populated with any arrangement of bits. Therefore, we must initialize 663 * the related aux buffer and clear buffer entry with desirable values. 664 * 665 * Initialize the relevant clear buffer entries. 666 */ 667 for (unsigned level = base_level; level < last_level_num; level++) 668 init_fast_clear_state_entry(cmd_buffer, image, aspect, level); 669 670 /* Initialize the aux buffers to enable correct rendering. This operation 671 * requires up to two steps: one to rid the aux buffer of data that may 672 * cause GPU hangs, and another to ensure that writes done without aux 673 * will be visible to reads done with aux. 674 * 675 * Having an aux buffer with invalid data is possible for CCS buffers 676 * SKL+ and for MCS buffers with certain sample counts (2x and 8x). One 677 * easy way to get to a valid state is to fast-clear the specified range. 678 * 679 * Even for MCS buffers that have sample counts that don't require 680 * certain bits to be reserved (4x and 8x), we're unsure if the hardware 681 * will be okay with the sample mappings given by the undefined buffer. 682 * We don't have any data to show that this is a problem, but we want to 683 * avoid causing difficult-to-debug problems. 684 */ 685 if ((GEN_GEN >= 9 && image->samples == 1) || image->samples > 1) { 686 if (image->samples == 4 || image->samples == 16) { 687 anv_perf_warn(cmd_buffer->device->instance, image, 688 "Doing a potentially unnecessary fast-clear to " 689 "define an MCS buffer."); 690 } 691 692 anv_image_fast_clear(cmd_buffer, image, aspect, 693 base_level, level_count, 694 base_layer, layer_count); 695 } 696 /* At this point, some elements of the CCS buffer may have the fast-clear 697 * bit-arrangement. As the user writes to a subresource, we need to have 698 * the associated CCS elements enter the ambiguated state. This enables 699 * reads (implicit or explicit) to reflect the user-written data instead 700 * of the clear color. The only time such elements will not change their 701 * state as described above, is in a final layout that doesn't have CCS 702 * enabled. In this case, we must force the associated CCS buffers of the 703 * specified range to enter the ambiguated state in advance. 704 */ 705 if (image->samples == 1 && 706 image->planes[plane].aux_usage != ISL_AUX_USAGE_CCS_E && 707 final_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 708 /* The CCS_D buffer may not be enabled in the final layout. Continue 709 * executing this function to perform a resolve. 710 */ 711 anv_perf_warn(cmd_buffer->device->instance, image, 712 "Performing an additional resolve for CCS_D layout " 713 "transition. Consider always leaving it on or " 714 "performing an ambiguation pass."); 715 } else { 716 /* Writes in the final layout will be aware of the auxiliary buffer. 717 * In addition, the clear buffer entries and the auxiliary buffers 718 * have been populated with values that will result in correct 719 * rendering. 720 */ 721 return; 722 } 723 } else if (initial_layout != VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL) { 724 /* Resolves are only necessary if the subresource may contain blocks 725 * fast-cleared to values unsupported in other layouts. This only occurs 726 * if the initial layout is COLOR_ATTACHMENT_OPTIMAL. 727 */ 728 return; 729 } else if (image->samples > 1) { 730 /* MCS buffers don't need resolving. */ 731 return; 732 } 733 734 /* Perform a resolve to synchronize data between the main and aux buffer. 735 * Before we begin, we must satisfy the cache flushing requirement specified 736 * in the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)": 737 * 738 * Any transition from any value in {Clear, Render, Resolve} to a 739 * different value in {Clear, Render, Resolve} requires end of pipe 740 * synchronization. 741 * 742 * We perform a flush of the write cache before and after the clear and 743 * resolve operations to meet this requirement. 744 * 745 * Unlike other drawing, fast clear operations are not properly 746 * synchronized. The first PIPE_CONTROL here likely ensures that the 747 * contents of the previous render or clear hit the render target before we 748 * resolve and the second likely ensures that the resolve is complete before 749 * we do any more rendering or clearing. 750 */ 751 cmd_buffer->state.pending_pipe_bits |= 752 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT; 753 754 for (uint32_t level = base_level; level < last_level_num; level++) { 755 756 /* The number of layers changes at each 3D miplevel. */ 757 if (image->type == VK_IMAGE_TYPE_3D) { 758 layer_count = MIN2(layer_count, anv_image_aux_layers(image, aspect, level)); 759 } 760 761 genX(load_needs_resolve_predicate)(cmd_buffer, image, aspect, level); 762 763 anv_ccs_resolve(cmd_buffer, image, aspect, level, base_layer, layer_count, 764 image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E ? 765 BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL : 766 BLORP_FAST_CLEAR_OP_RESOLVE_FULL); 767 768 genX(set_image_needs_resolve)(cmd_buffer, image, aspect, level, false); 769 } 770 771 cmd_buffer->state.pending_pipe_bits |= 772 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_CS_STALL_BIT; 773 } 774 775 /** 776 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass. 777 */ 778 static VkResult 779 genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer, 780 struct anv_render_pass *pass, 781 const VkRenderPassBeginInfo *begin) 782 { 783 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev; 784 struct anv_cmd_state *state = &cmd_buffer->state; 785 786 vk_free(&cmd_buffer->pool->alloc, state->attachments); 787 788 if (pass->attachment_count > 0) { 789 state->attachments = vk_alloc(&cmd_buffer->pool->alloc, 790 pass->attachment_count * 791 sizeof(state->attachments[0]), 792 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT); 793 if (state->attachments == NULL) { 794 /* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */ 795 return anv_batch_set_error(&cmd_buffer->batch, 796 VK_ERROR_OUT_OF_HOST_MEMORY); 797 } 798 } else { 799 state->attachments = NULL; 800 } 801 802 /* Reserve one for the NULL state. */ 803 unsigned num_states = 1; 804 for (uint32_t i = 0; i < pass->attachment_count; ++i) { 805 if (vk_format_is_color(pass->attachments[i].format)) 806 num_states++; 807 808 if (need_input_attachment_state(&pass->attachments[i])) 809 num_states++; 810 } 811 812 const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align); 813 state->render_pass_states = 814 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 815 num_states * ss_stride, isl_dev->ss.align); 816 817 struct anv_state next_state = state->render_pass_states; 818 next_state.alloc_size = isl_dev->ss.size; 819 820 state->null_surface_state = next_state; 821 next_state.offset += ss_stride; 822 next_state.map += ss_stride; 823 824 for (uint32_t i = 0; i < pass->attachment_count; ++i) { 825 if (vk_format_is_color(pass->attachments[i].format)) { 826 state->attachments[i].color.state = next_state; 827 next_state.offset += ss_stride; 828 next_state.map += ss_stride; 829 } 830 831 if (need_input_attachment_state(&pass->attachments[i])) { 832 state->attachments[i].input.state = next_state; 833 next_state.offset += ss_stride; 834 next_state.map += ss_stride; 835 } 836 } 837 assert(next_state.offset == state->render_pass_states.offset + 838 state->render_pass_states.alloc_size); 839 840 if (begin) { 841 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, begin->framebuffer); 842 assert(pass->attachment_count == framebuffer->attachment_count); 843 844 isl_null_fill_state(isl_dev, state->null_surface_state.map, 845 isl_extent3d(framebuffer->width, 846 framebuffer->height, 847 framebuffer->layers)); 848 849 for (uint32_t i = 0; i < pass->attachment_count; ++i) { 850 struct anv_render_pass_attachment *att = &pass->attachments[i]; 851 VkImageAspectFlags att_aspects = vk_format_aspects(att->format); 852 VkImageAspectFlags clear_aspects = 0; 853 854 if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) { 855 /* color attachment */ 856 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 857 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT; 858 } 859 } else { 860 /* depthstencil attachment */ 861 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) && 862 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 863 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT; 864 } 865 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) && 866 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) { 867 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT; 868 } 869 } 870 871 state->attachments[i].current_layout = att->initial_layout; 872 state->attachments[i].pending_clear_aspects = clear_aspects; 873 if (clear_aspects) 874 state->attachments[i].clear_value = begin->pClearValues[i]; 875 876 struct anv_image_view *iview = framebuffer->attachments[i]; 877 anv_assert(iview->vk_format == att->format); 878 879 union isl_color_value clear_color = { .u32 = { 0, } }; 880 if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) { 881 anv_assert(iview->n_planes == 1); 882 assert(att_aspects == VK_IMAGE_ASPECT_COLOR_BIT); 883 color_attachment_compute_aux_usage(cmd_buffer->device, 884 state, i, begin->renderArea, 885 &clear_color); 886 887 anv_image_fill_surface_state(cmd_buffer->device, 888 iview->image, 889 VK_IMAGE_ASPECT_COLOR_BIT, 890 &iview->planes[0].isl, 891 ISL_SURF_USAGE_RENDER_TARGET_BIT, 892 state->attachments[i].aux_usage, 893 &clear_color, 894 0, 895 &state->attachments[i].color, 896 NULL); 897 898 add_image_view_relocs(cmd_buffer, iview, 0, 899 state->attachments[i].color); 900 } else { 901 /* This field will be initialized after the first subpass 902 * transition. 903 */ 904 state->attachments[i].aux_usage = ISL_AUX_USAGE_NONE; 905 906 state->attachments[i].input_aux_usage = ISL_AUX_USAGE_NONE; 907 } 908 909 if (need_input_attachment_state(&pass->attachments[i])) { 910 anv_image_fill_surface_state(cmd_buffer->device, 911 iview->image, 912 VK_IMAGE_ASPECT_COLOR_BIT, 913 &iview->planes[0].isl, 914 ISL_SURF_USAGE_TEXTURE_BIT, 915 state->attachments[i].input_aux_usage, 916 &clear_color, 917 0, 918 &state->attachments[i].input, 919 NULL); 920 921 add_image_view_relocs(cmd_buffer, iview, 0, 922 state->attachments[i].input); 923 } 924 } 925 } 926 927 return VK_SUCCESS; 928 } 929 930 VkResult 931 genX(BeginCommandBuffer)( 932 VkCommandBuffer commandBuffer, 933 const VkCommandBufferBeginInfo* pBeginInfo) 934 { 935 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 936 937 /* If this is the first vkBeginCommandBuffer, we must *initialize* the 938 * command buffer's state. Otherwise, we must *reset* its state. In both 939 * cases we reset it. 940 * 941 * From the Vulkan 1.0 spec: 942 * 943 * If a command buffer is in the executable state and the command buffer 944 * was allocated from a command pool with the 945 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then 946 * vkBeginCommandBuffer implicitly resets the command buffer, behaving 947 * as if vkResetCommandBuffer had been called with 948 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts 949 * the command buffer in the recording state. 950 */ 951 anv_cmd_buffer_reset(cmd_buffer); 952 953 cmd_buffer->usage_flags = pBeginInfo->flags; 954 955 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY || 956 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)); 957 958 genX(cmd_buffer_emit_state_base_address)(cmd_buffer); 959 960 /* We sometimes store vertex data in the dynamic state buffer for blorp 961 * operations and our dynamic state stream may re-use data from previous 962 * command buffers. In order to prevent stale cache data, we flush the VF 963 * cache. We could do this on every blorp call but that's not really 964 * needed as all of the data will get written by the CPU prior to the GPU 965 * executing anything. The chances are fairly high that they will use 966 * blorp at least once per primary command buffer so it shouldn't be 967 * wasted. 968 */ 969 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) 970 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT; 971 972 /* We send an "Indirect State Pointers Disable" packet at 973 * EndCommandBuffer, so all push contant packets are ignored during a 974 * context restore. Documentation says after that command, we need to 975 * emit push constants again before any rendering operation. So we 976 * flag them dirty here to make sure they get emitted. 977 */ 978 if (GEN_GEN == 10) 979 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS; 980 981 VkResult result = VK_SUCCESS; 982 if (cmd_buffer->usage_flags & 983 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 984 assert(pBeginInfo->pInheritanceInfo); 985 cmd_buffer->state.pass = 986 anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass); 987 cmd_buffer->state.subpass = 988 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass]; 989 990 /* This is optional in the inheritance info. */ 991 cmd_buffer->state.framebuffer = 992 anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer); 993 994 result = genX(cmd_buffer_setup_attachments)(cmd_buffer, 995 cmd_buffer->state.pass, NULL); 996 997 /* Record that HiZ is enabled if we can. */ 998 if (cmd_buffer->state.framebuffer) { 999 const struct anv_image_view * const iview = 1000 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer); 1001 1002 if (iview) { 1003 VkImageLayout layout = 1004 cmd_buffer->state.subpass->depth_stencil_attachment.layout; 1005 1006 enum isl_aux_usage aux_usage = 1007 anv_layout_to_aux_usage(&cmd_buffer->device->info, iview->image, 1008 VK_IMAGE_ASPECT_DEPTH_BIT, layout); 1009 1010 cmd_buffer->state.hiz_enabled = aux_usage == ISL_AUX_USAGE_HIZ; 1011 } 1012 } 1013 1014 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS; 1015 } 1016 1017 return result; 1018 } 1019 1020 /* From the PRM, Volume 2a: 1021 * 1022 * "Indirect State Pointers Disable 1023 * 1024 * At the completion of the post-sync operation associated with this pipe 1025 * control packet, the indirect state pointers in the hardware are 1026 * considered invalid; the indirect pointers are not saved in the context. 1027 * If any new indirect state commands are executed in the command stream 1028 * while the pipe control is pending, the new indirect state commands are 1029 * preserved. 1030 * 1031 * [DevIVB+]: Using Invalidate State Pointer (ISP) only inhibits context 1032 * restoring of Push Constant (3DSTATE_CONSTANT_*) commands. Push Constant 1033 * commands are only considered as Indirect State Pointers. Once ISP is 1034 * issued in a context, SW must initialize by programming push constant 1035 * commands for all the shaders (at least to zero length) before attempting 1036 * any rendering operation for the same context." 1037 * 1038 * 3DSTATE_CONSTANT_* packets are restored during a context restore, 1039 * even though they point to a BO that has been already unreferenced at 1040 * the end of the previous batch buffer. This has been fine so far since 1041 * we are protected by these scratch page (every address not covered by 1042 * a BO should be pointing to the scratch page). But on CNL, it is 1043 * causing a GPU hang during context restore at the 3DSTATE_CONSTANT_* 1044 * instruction. 1045 * 1046 * The flag "Indirect State Pointers Disable" in PIPE_CONTROL tells the 1047 * hardware to ignore previous 3DSTATE_CONSTANT_* packets during a 1048 * context restore, so the mentioned hang doesn't happen. However, 1049 * software must program push constant commands for all stages prior to 1050 * rendering anything. So we flag them dirty in BeginCommandBuffer. 1051 */ 1052 static void 1053 emit_isp_disable(struct anv_cmd_buffer *cmd_buffer) 1054 { 1055 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 1056 pc.IndirectStatePointersDisable = true; 1057 pc.CommandStreamerStallEnable = true; 1058 } 1059 } 1060 1061 VkResult 1062 genX(EndCommandBuffer)( 1063 VkCommandBuffer commandBuffer) 1064 { 1065 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 1066 1067 if (anv_batch_has_error(&cmd_buffer->batch)) 1068 return cmd_buffer->batch.status; 1069 1070 /* We want every command buffer to start with the PMA fix in a known state, 1071 * so we disable it at the end of the command buffer. 1072 */ 1073 genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false); 1074 1075 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); 1076 1077 if (GEN_GEN == 10) 1078 emit_isp_disable(cmd_buffer); 1079 1080 anv_cmd_buffer_end_batch_buffer(cmd_buffer); 1081 1082 return VK_SUCCESS; 1083 } 1084 1085 void 1086 genX(CmdExecuteCommands)( 1087 VkCommandBuffer commandBuffer, 1088 uint32_t commandBufferCount, 1089 const VkCommandBuffer* pCmdBuffers) 1090 { 1091 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer); 1092 1093 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY); 1094 1095 if (anv_batch_has_error(&primary->batch)) 1096 return; 1097 1098 /* The secondary command buffers will assume that the PMA fix is disabled 1099 * when they begin executing. Make sure this is true. 1100 */ 1101 genX(cmd_buffer_enable_pma_fix)(primary, false); 1102 1103 /* The secondary command buffer doesn't know which textures etc. have been 1104 * flushed prior to their execution. Apply those flushes now. 1105 */ 1106 genX(cmd_buffer_apply_pipe_flushes)(primary); 1107 1108 for (uint32_t i = 0; i < commandBufferCount; i++) { 1109 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]); 1110 1111 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY); 1112 assert(!anv_batch_has_error(&secondary->batch)); 1113 1114 if (secondary->usage_flags & 1115 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) { 1116 /* If we're continuing a render pass from the primary, we need to 1117 * copy the surface states for the current subpass into the storage 1118 * we allocated for them in BeginCommandBuffer. 1119 */ 1120 struct anv_bo *ss_bo = 1121 &primary->device->surface_state_pool.block_pool.bo; 1122 struct anv_state src_state = primary->state.render_pass_states; 1123 struct anv_state dst_state = secondary->state.render_pass_states; 1124 assert(src_state.alloc_size == dst_state.alloc_size); 1125 1126 genX(cmd_buffer_so_memcpy)(primary, ss_bo, dst_state.offset, 1127 ss_bo, src_state.offset, 1128 src_state.alloc_size); 1129 } 1130 1131 anv_cmd_buffer_add_secondary(primary, secondary); 1132 } 1133 1134 /* The secondary may have selected a different pipeline (3D or compute) and 1135 * may have changed the current L3$ configuration. Reset our tracking 1136 * variables to invalid values to ensure that we re-emit these in the case 1137 * where we do any draws or compute dispatches from the primary after the 1138 * secondary has returned. 1139 */ 1140 primary->state.current_pipeline = UINT32_MAX; 1141 primary->state.current_l3_config = NULL; 1142 1143 /* Each of the secondary command buffers will use its own state base 1144 * address. We need to re-emit state base address for the primary after 1145 * all of the secondaries are done. 1146 * 1147 * TODO: Maybe we want to make this a dirty bit to avoid extra state base 1148 * address calls? 1149 */ 1150 genX(cmd_buffer_emit_state_base_address)(primary); 1151 } 1152 1153 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000 1154 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000 1155 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000 1156 1157 /** 1158 * Program the hardware to use the specified L3 configuration. 1159 */ 1160 void 1161 genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer, 1162 const struct gen_l3_config *cfg) 1163 { 1164 assert(cfg); 1165 if (cfg == cmd_buffer->state.current_l3_config) 1166 return; 1167 1168 if (unlikely(INTEL_DEBUG & DEBUG_L3)) { 1169 intel_logd("L3 config transition: "); 1170 gen_dump_l3_config(cfg, stderr); 1171 } 1172 1173 const bool has_slm = cfg->n[GEN_L3P_SLM]; 1174 1175 /* According to the hardware docs, the L3 partitioning can only be changed 1176 * while the pipeline is completely drained and the caches are flushed, 1177 * which involves a first PIPE_CONTROL flush which stalls the pipeline... 1178 */ 1179 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 1180 pc.DCFlushEnable = true; 1181 pc.PostSyncOperation = NoWrite; 1182 pc.CommandStreamerStallEnable = true; 1183 } 1184 1185 /* ...followed by a second pipelined PIPE_CONTROL that initiates 1186 * invalidation of the relevant caches. Note that because RO invalidation 1187 * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL 1188 * command is processed by the CS) we cannot combine it with the previous 1189 * stalling flush as the hardware documentation suggests, because that 1190 * would cause the CS to stall on previous rendering *after* RO 1191 * invalidation and wouldn't prevent the RO caches from being polluted by 1192 * concurrent rendering before the stall completes. This intentionally 1193 * doesn't implement the SKL+ hardware workaround suggesting to enable CS 1194 * stall on PIPE_CONTROLs with the texture cache invalidation bit set for 1195 * GPGPU workloads because the previous and subsequent PIPE_CONTROLs 1196 * already guarantee that there is no concurrent GPGPU kernel execution 1197 * (see SKL HSD 2132585). 1198 */ 1199 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 1200 pc.TextureCacheInvalidationEnable = true; 1201 pc.ConstantCacheInvalidationEnable = true; 1202 pc.InstructionCacheInvalidateEnable = true; 1203 pc.StateCacheInvalidationEnable = true; 1204 pc.PostSyncOperation = NoWrite; 1205 } 1206 1207 /* Now send a third stalling flush to make sure that invalidation is 1208 * complete when the L3 configuration registers are modified. 1209 */ 1210 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 1211 pc.DCFlushEnable = true; 1212 pc.PostSyncOperation = NoWrite; 1213 pc.CommandStreamerStallEnable = true; 1214 } 1215 1216 #if GEN_GEN >= 8 1217 1218 assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]); 1219 1220 uint32_t l3cr; 1221 anv_pack_struct(&l3cr, GENX(L3CNTLREG), 1222 .SLMEnable = has_slm, 1223 .URBAllocation = cfg->n[GEN_L3P_URB], 1224 .ROAllocation = cfg->n[GEN_L3P_RO], 1225 .DCAllocation = cfg->n[GEN_L3P_DC], 1226 .AllAllocation = cfg->n[GEN_L3P_ALL]); 1227 1228 /* Set up the L3 partitioning. */ 1229 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG_num), l3cr); 1230 1231 #else 1232 1233 const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL]; 1234 const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] || 1235 cfg->n[GEN_L3P_ALL]; 1236 const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] || 1237 cfg->n[GEN_L3P_ALL]; 1238 const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] || 1239 cfg->n[GEN_L3P_ALL]; 1240 1241 assert(!cfg->n[GEN_L3P_ALL]); 1242 1243 /* When enabled SLM only uses a portion of the L3 on half of the banks, 1244 * the matching space on the remaining banks has to be allocated to a 1245 * client (URB for all validated configurations) set to the 1246 * lower-bandwidth 2-bank address hashing mode. 1247 */ 1248 const struct gen_device_info *devinfo = &cmd_buffer->device->info; 1249 const bool urb_low_bw = has_slm && !devinfo->is_baytrail; 1250 assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]); 1251 1252 /* Minimum number of ways that can be allocated to the URB. */ 1253 MAYBE_UNUSED const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0; 1254 assert(cfg->n[GEN_L3P_URB] >= n0_urb); 1255 1256 uint32_t l3sqcr1, l3cr2, l3cr3; 1257 anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1), 1258 .ConvertDC_UC = !has_dc, 1259 .ConvertIS_UC = !has_is, 1260 .ConvertC_UC = !has_c, 1261 .ConvertT_UC = !has_t); 1262 l3sqcr1 |= 1263 GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT : 1264 devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT : 1265 IVB_L3SQCREG1_SQGHPCI_DEFAULT; 1266 1267 anv_pack_struct(&l3cr2, GENX(L3CNTLREG2), 1268 .SLMEnable = has_slm, 1269 .URBLowBandwidth = urb_low_bw, 1270 .URBAllocation = cfg->n[GEN_L3P_URB] - n0_urb, 1271 #if !GEN_IS_HASWELL 1272 .ALLAllocation = cfg->n[GEN_L3P_ALL], 1273 #endif 1274 .ROAllocation = cfg->n[GEN_L3P_RO], 1275 .DCAllocation = cfg->n[GEN_L3P_DC]); 1276 1277 anv_pack_struct(&l3cr3, GENX(L3CNTLREG3), 1278 .ISAllocation = cfg->n[GEN_L3P_IS], 1279 .ISLowBandwidth = 0, 1280 .CAllocation = cfg->n[GEN_L3P_C], 1281 .CLowBandwidth = 0, 1282 .TAllocation = cfg->n[GEN_L3P_T], 1283 .TLowBandwidth = 0); 1284 1285 /* Set up the L3 partitioning. */ 1286 emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1); 1287 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2); 1288 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3); 1289 1290 #if GEN_IS_HASWELL 1291 if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) { 1292 /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep 1293 * them disabled to avoid crashing the system hard. 1294 */ 1295 uint32_t scratch1, chicken3; 1296 anv_pack_struct(&scratch1, GENX(SCRATCH1), 1297 .L3AtomicDisable = !has_dc); 1298 anv_pack_struct(&chicken3, GENX(CHICKEN3), 1299 .L3AtomicDisableMask = true, 1300 .L3AtomicDisable = !has_dc); 1301 emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1); 1302 emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3); 1303 } 1304 #endif 1305 1306 #endif 1307 1308 cmd_buffer->state.current_l3_config = cfg; 1309 } 1310 1311 void 1312 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer) 1313 { 1314 enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits; 1315 1316 /* Flushes are pipelined while invalidations are handled immediately. 1317 * Therefore, if we're flushing anything then we need to schedule a stall 1318 * before any invalidations can happen. 1319 */ 1320 if (bits & ANV_PIPE_FLUSH_BITS) 1321 bits |= ANV_PIPE_NEEDS_CS_STALL_BIT; 1322 1323 /* If we're going to do an invalidate and we have a pending CS stall that 1324 * has yet to be resolved, we do the CS stall now. 1325 */ 1326 if ((bits & ANV_PIPE_INVALIDATE_BITS) && 1327 (bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) { 1328 bits |= ANV_PIPE_CS_STALL_BIT; 1329 bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT; 1330 } 1331 1332 if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) { 1333 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { 1334 pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT; 1335 pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT; 1336 pipe.RenderTargetCacheFlushEnable = 1337 bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT; 1338 1339 pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT; 1340 pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT; 1341 pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT; 1342 1343 /* 1344 * According to the Broadwell documentation, any PIPE_CONTROL with the 1345 * "Command Streamer Stall" bit set must also have another bit set, 1346 * with five different options: 1347 * 1348 * - Render Target Cache Flush 1349 * - Depth Cache Flush 1350 * - Stall at Pixel Scoreboard 1351 * - Post-Sync Operation 1352 * - Depth Stall 1353 * - DC Flush Enable 1354 * 1355 * I chose "Stall at Pixel Scoreboard" since that's what we use in 1356 * mesa and it seems to work fine. The choice is fairly arbitrary. 1357 */ 1358 if ((bits & ANV_PIPE_CS_STALL_BIT) && 1359 !(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT | 1360 ANV_PIPE_STALL_AT_SCOREBOARD_BIT))) 1361 pipe.StallAtPixelScoreboard = true; 1362 } 1363 1364 bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT); 1365 } 1366 1367 if (bits & ANV_PIPE_INVALIDATE_BITS) { 1368 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { 1369 pipe.StateCacheInvalidationEnable = 1370 bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT; 1371 pipe.ConstantCacheInvalidationEnable = 1372 bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT; 1373 pipe.VFCacheInvalidationEnable = 1374 bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT; 1375 pipe.TextureCacheInvalidationEnable = 1376 bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT; 1377 pipe.InstructionCacheInvalidateEnable = 1378 bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT; 1379 } 1380 1381 bits &= ~ANV_PIPE_INVALIDATE_BITS; 1382 } 1383 1384 cmd_buffer->state.pending_pipe_bits = bits; 1385 } 1386 1387 void genX(CmdPipelineBarrier)( 1388 VkCommandBuffer commandBuffer, 1389 VkPipelineStageFlags srcStageMask, 1390 VkPipelineStageFlags destStageMask, 1391 VkBool32 byRegion, 1392 uint32_t memoryBarrierCount, 1393 const VkMemoryBarrier* pMemoryBarriers, 1394 uint32_t bufferMemoryBarrierCount, 1395 const VkBufferMemoryBarrier* pBufferMemoryBarriers, 1396 uint32_t imageMemoryBarrierCount, 1397 const VkImageMemoryBarrier* pImageMemoryBarriers) 1398 { 1399 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 1400 1401 /* XXX: Right now, we're really dumb and just flush whatever categories 1402 * the app asks for. One of these days we may make this a bit better 1403 * but right now that's all the hardware allows for in most areas. 1404 */ 1405 VkAccessFlags src_flags = 0; 1406 VkAccessFlags dst_flags = 0; 1407 1408 for (uint32_t i = 0; i < memoryBarrierCount; i++) { 1409 src_flags |= pMemoryBarriers[i].srcAccessMask; 1410 dst_flags |= pMemoryBarriers[i].dstAccessMask; 1411 } 1412 1413 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) { 1414 src_flags |= pBufferMemoryBarriers[i].srcAccessMask; 1415 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask; 1416 } 1417 1418 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) { 1419 src_flags |= pImageMemoryBarriers[i].srcAccessMask; 1420 dst_flags |= pImageMemoryBarriers[i].dstAccessMask; 1421 ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image); 1422 const VkImageSubresourceRange *range = 1423 &pImageMemoryBarriers[i].subresourceRange; 1424 1425 if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) { 1426 transition_depth_buffer(cmd_buffer, image, 1427 pImageMemoryBarriers[i].oldLayout, 1428 pImageMemoryBarriers[i].newLayout); 1429 } else if (range->aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) { 1430 VkImageAspectFlags color_aspects = 1431 anv_image_expand_aspects(image, range->aspectMask); 1432 uint32_t aspect_bit; 1433 1434 anv_foreach_image_aspect_bit(aspect_bit, image, color_aspects) { 1435 transition_color_buffer(cmd_buffer, image, 1UL << aspect_bit, 1436 range->baseMipLevel, 1437 anv_get_levelCount(image, range), 1438 range->baseArrayLayer, 1439 anv_get_layerCount(image, range), 1440 pImageMemoryBarriers[i].oldLayout, 1441 pImageMemoryBarriers[i].newLayout); 1442 } 1443 } 1444 } 1445 1446 cmd_buffer->state.pending_pipe_bits |= 1447 anv_pipe_flush_bits_for_access_flags(src_flags) | 1448 anv_pipe_invalidate_bits_for_access_flags(dst_flags); 1449 } 1450 1451 static void 1452 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer) 1453 { 1454 VkShaderStageFlags stages = 1455 cmd_buffer->state.gfx.base.pipeline->active_stages; 1456 1457 /* In order to avoid thrash, we assume that vertex and fragment stages 1458 * always exist. In the rare case where one is missing *and* the other 1459 * uses push concstants, this may be suboptimal. However, avoiding stalls 1460 * seems more important. 1461 */ 1462 stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT; 1463 1464 if (stages == cmd_buffer->state.push_constant_stages) 1465 return; 1466 1467 #if GEN_GEN >= 8 1468 const unsigned push_constant_kb = 32; 1469 #elif GEN_IS_HASWELL 1470 const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16; 1471 #else 1472 const unsigned push_constant_kb = 16; 1473 #endif 1474 1475 const unsigned num_stages = 1476 _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS); 1477 unsigned size_per_stage = push_constant_kb / num_stages; 1478 1479 /* Broadwell+ and Haswell gt3 require that the push constant sizes be in 1480 * units of 2KB. Incidentally, these are the same platforms that have 1481 * 32KB worth of push constant space. 1482 */ 1483 if (push_constant_kb == 32) 1484 size_per_stage &= ~1u; 1485 1486 uint32_t kb_used = 0; 1487 for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) { 1488 unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0; 1489 anv_batch_emit(&cmd_buffer->batch, 1490 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) { 1491 alloc._3DCommandSubOpcode = 18 + i; 1492 alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0; 1493 alloc.ConstantBufferSize = push_size; 1494 } 1495 kb_used += push_size; 1496 } 1497 1498 anv_batch_emit(&cmd_buffer->batch, 1499 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) { 1500 alloc.ConstantBufferOffset = kb_used; 1501 alloc.ConstantBufferSize = push_constant_kb - kb_used; 1502 } 1503 1504 cmd_buffer->state.push_constant_stages = stages; 1505 1506 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS: 1507 * 1508 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to 1509 * the next 3DPRIMITIVE command after programming the 1510 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS" 1511 * 1512 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of 1513 * pipeline setup, we need to dirty push constants. 1514 */ 1515 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS; 1516 } 1517 1518 static const struct anv_descriptor * 1519 anv_descriptor_for_binding(const struct anv_cmd_pipeline_state *pipe_state, 1520 const struct anv_pipeline_binding *binding) 1521 { 1522 assert(binding->set < MAX_SETS); 1523 const struct anv_descriptor_set *set = 1524 pipe_state->descriptors[binding->set]; 1525 const uint32_t offset = 1526 set->layout->binding[binding->binding].descriptor_index; 1527 return &set->descriptors[offset + binding->index]; 1528 } 1529 1530 static uint32_t 1531 dynamic_offset_for_binding(const struct anv_cmd_pipeline_state *pipe_state, 1532 const struct anv_pipeline *pipeline, 1533 const struct anv_pipeline_binding *binding) 1534 { 1535 assert(binding->set < MAX_SETS); 1536 const struct anv_descriptor_set *set = 1537 pipe_state->descriptors[binding->set]; 1538 1539 uint32_t dynamic_offset_idx = 1540 pipeline->layout->set[binding->set].dynamic_offset_start + 1541 set->layout->binding[binding->binding].dynamic_offset_index + 1542 binding->index; 1543 1544 return pipe_state->dynamic_offsets[dynamic_offset_idx]; 1545 } 1546 1547 static VkResult 1548 emit_binding_table(struct anv_cmd_buffer *cmd_buffer, 1549 gl_shader_stage stage, 1550 struct anv_state *bt_state) 1551 { 1552 struct anv_subpass *subpass = cmd_buffer->state.subpass; 1553 struct anv_cmd_pipeline_state *pipe_state; 1554 struct anv_pipeline *pipeline; 1555 uint32_t bias, state_offset; 1556 1557 switch (stage) { 1558 case MESA_SHADER_COMPUTE: 1559 pipe_state = &cmd_buffer->state.compute.base; 1560 bias = 1; 1561 break; 1562 default: 1563 pipe_state = &cmd_buffer->state.gfx.base; 1564 bias = 0; 1565 break; 1566 } 1567 pipeline = pipe_state->pipeline; 1568 1569 if (!anv_pipeline_has_stage(pipeline, stage)) { 1570 *bt_state = (struct anv_state) { 0, }; 1571 return VK_SUCCESS; 1572 } 1573 1574 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map; 1575 if (bias + map->surface_count == 0) { 1576 *bt_state = (struct anv_state) { 0, }; 1577 return VK_SUCCESS; 1578 } 1579 1580 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer, 1581 bias + map->surface_count, 1582 &state_offset); 1583 uint32_t *bt_map = bt_state->map; 1584 1585 if (bt_state->map == NULL) 1586 return VK_ERROR_OUT_OF_DEVICE_MEMORY; 1587 1588 if (stage == MESA_SHADER_COMPUTE && 1589 get_cs_prog_data(pipeline)->uses_num_work_groups) { 1590 struct anv_bo *bo = cmd_buffer->state.compute.num_workgroups.bo; 1591 uint32_t bo_offset = cmd_buffer->state.compute.num_workgroups.offset; 1592 1593 struct anv_state surface_state; 1594 surface_state = 1595 anv_cmd_buffer_alloc_surface_state(cmd_buffer); 1596 1597 const enum isl_format format = 1598 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER); 1599 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state, 1600 format, bo_offset, 12, 1); 1601 1602 bt_map[0] = surface_state.offset + state_offset; 1603 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset); 1604 } 1605 1606 if (map->surface_count == 0) 1607 goto out; 1608 1609 if (map->image_count > 0) { 1610 VkResult result = 1611 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images); 1612 if (result != VK_SUCCESS) 1613 return result; 1614 1615 cmd_buffer->state.push_constants_dirty |= 1 << stage; 1616 } 1617 1618 uint32_t image = 0; 1619 for (uint32_t s = 0; s < map->surface_count; s++) { 1620 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s]; 1621 1622 struct anv_state surface_state; 1623 1624 if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) { 1625 /* Color attachment binding */ 1626 assert(stage == MESA_SHADER_FRAGMENT); 1627 assert(binding->binding == 0); 1628 if (binding->index < subpass->color_count) { 1629 const unsigned att = 1630 subpass->color_attachments[binding->index].attachment; 1631 1632 /* From the Vulkan 1.0.46 spec: 1633 * 1634 * "If any color or depth/stencil attachments are 1635 * VK_ATTACHMENT_UNUSED, then no writes occur for those 1636 * attachments." 1637 */ 1638 if (att == VK_ATTACHMENT_UNUSED) { 1639 surface_state = cmd_buffer->state.null_surface_state; 1640 } else { 1641 surface_state = cmd_buffer->state.attachments[att].color.state; 1642 } 1643 } else { 1644 surface_state = cmd_buffer->state.null_surface_state; 1645 } 1646 1647 bt_map[bias + s] = surface_state.offset + state_offset; 1648 continue; 1649 } 1650 1651 const struct anv_descriptor *desc = 1652 anv_descriptor_for_binding(pipe_state, binding); 1653 1654 switch (desc->type) { 1655 case VK_DESCRIPTOR_TYPE_SAMPLER: 1656 /* Nothing for us to do here */ 1657 continue; 1658 1659 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: 1660 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: { 1661 struct anv_surface_state sstate = 1662 (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ? 1663 desc->image_view->planes[binding->plane].general_sampler_surface_state : 1664 desc->image_view->planes[binding->plane].optimal_sampler_surface_state; 1665 surface_state = sstate.state; 1666 assert(surface_state.alloc_size); 1667 add_image_view_relocs(cmd_buffer, desc->image_view, 1668 binding->plane, sstate); 1669 break; 1670 } 1671 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT: 1672 assert(stage == MESA_SHADER_FRAGMENT); 1673 if ((desc->image_view->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) == 0) { 1674 /* For depth and stencil input attachments, we treat it like any 1675 * old texture that a user may have bound. 1676 */ 1677 struct anv_surface_state sstate = 1678 (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ? 1679 desc->image_view->planes[binding->plane].general_sampler_surface_state : 1680 desc->image_view->planes[binding->plane].optimal_sampler_surface_state; 1681 surface_state = sstate.state; 1682 assert(surface_state.alloc_size); 1683 add_image_view_relocs(cmd_buffer, desc->image_view, 1684 binding->plane, sstate); 1685 } else { 1686 /* For color input attachments, we create the surface state at 1687 * vkBeginRenderPass time so that we can include aux and clear 1688 * color information. 1689 */ 1690 assert(binding->input_attachment_index < subpass->input_count); 1691 const unsigned subpass_att = binding->input_attachment_index; 1692 const unsigned att = subpass->input_attachments[subpass_att].attachment; 1693 surface_state = cmd_buffer->state.attachments[att].input.state; 1694 } 1695 break; 1696 1697 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: { 1698 struct anv_surface_state sstate = (binding->write_only) 1699 ? desc->image_view->planes[binding->plane].writeonly_storage_surface_state 1700 : desc->image_view->planes[binding->plane].storage_surface_state; 1701 surface_state = sstate.state; 1702 assert(surface_state.alloc_size); 1703 add_image_view_relocs(cmd_buffer, desc->image_view, 1704 binding->plane, sstate); 1705 1706 struct brw_image_param *image_param = 1707 &cmd_buffer->state.push_constants[stage]->images[image++]; 1708 1709 *image_param = desc->image_view->planes[binding->plane].storage_image_param; 1710 image_param->surface_idx = bias + s; 1711 break; 1712 } 1713 1714 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: 1715 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER: 1716 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: 1717 surface_state = desc->buffer_view->surface_state; 1718 assert(surface_state.alloc_size); 1719 add_surface_state_reloc(cmd_buffer, surface_state, 1720 desc->buffer_view->bo, 1721 desc->buffer_view->offset); 1722 break; 1723 1724 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC: 1725 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: { 1726 /* Compute the offset within the buffer */ 1727 uint32_t dynamic_offset = 1728 dynamic_offset_for_binding(pipe_state, pipeline, binding); 1729 uint64_t offset = desc->offset + dynamic_offset; 1730 /* Clamp to the buffer size */ 1731 offset = MIN2(offset, desc->buffer->size); 1732 /* Clamp the range to the buffer size */ 1733 uint32_t range = MIN2(desc->range, desc->buffer->size - offset); 1734 1735 surface_state = 1736 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64); 1737 enum isl_format format = 1738 anv_isl_format_for_descriptor_type(desc->type); 1739 1740 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state, 1741 format, offset, range, 1); 1742 add_surface_state_reloc(cmd_buffer, surface_state, 1743 desc->buffer->bo, 1744 desc->buffer->offset + offset); 1745 break; 1746 } 1747 1748 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: 1749 surface_state = (binding->write_only) 1750 ? desc->buffer_view->writeonly_storage_surface_state 1751 : desc->buffer_view->storage_surface_state; 1752 assert(surface_state.alloc_size); 1753 add_surface_state_reloc(cmd_buffer, surface_state, 1754 desc->buffer_view->bo, 1755 desc->buffer_view->offset); 1756 1757 struct brw_image_param *image_param = 1758 &cmd_buffer->state.push_constants[stage]->images[image++]; 1759 1760 *image_param = desc->buffer_view->storage_image_param; 1761 image_param->surface_idx = bias + s; 1762 break; 1763 1764 default: 1765 assert(!"Invalid descriptor type"); 1766 continue; 1767 } 1768 1769 bt_map[bias + s] = surface_state.offset + state_offset; 1770 } 1771 assert(image == map->image_count); 1772 1773 out: 1774 anv_state_flush(cmd_buffer->device, *bt_state); 1775 1776 return VK_SUCCESS; 1777 } 1778 1779 static VkResult 1780 emit_samplers(struct anv_cmd_buffer *cmd_buffer, 1781 gl_shader_stage stage, 1782 struct anv_state *state) 1783 { 1784 struct anv_cmd_pipeline_state *pipe_state = 1785 stage == MESA_SHADER_COMPUTE ? &cmd_buffer->state.compute.base : 1786 &cmd_buffer->state.gfx.base; 1787 struct anv_pipeline *pipeline = pipe_state->pipeline; 1788 1789 if (!anv_pipeline_has_stage(pipeline, stage)) { 1790 *state = (struct anv_state) { 0, }; 1791 return VK_SUCCESS; 1792 } 1793 1794 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map; 1795 if (map->sampler_count == 0) { 1796 *state = (struct anv_state) { 0, }; 1797 return VK_SUCCESS; 1798 } 1799 1800 uint32_t size = map->sampler_count * 16; 1801 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32); 1802 1803 if (state->map == NULL) 1804 return VK_ERROR_OUT_OF_DEVICE_MEMORY; 1805 1806 for (uint32_t s = 0; s < map->sampler_count; s++) { 1807 struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s]; 1808 const struct anv_descriptor *desc = 1809 anv_descriptor_for_binding(pipe_state, binding); 1810 1811 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER && 1812 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) 1813 continue; 1814 1815 struct anv_sampler *sampler = desc->sampler; 1816 1817 /* This can happen if we have an unfilled slot since TYPE_SAMPLER 1818 * happens to be zero. 1819 */ 1820 if (sampler == NULL) 1821 continue; 1822 1823 memcpy(state->map + (s * 16), 1824 sampler->state[binding->plane], sizeof(sampler->state[0])); 1825 } 1826 1827 anv_state_flush(cmd_buffer->device, *state); 1828 1829 return VK_SUCCESS; 1830 } 1831 1832 static uint32_t 1833 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer) 1834 { 1835 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; 1836 1837 VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty & 1838 pipeline->active_stages; 1839 1840 VkResult result = VK_SUCCESS; 1841 anv_foreach_stage(s, dirty) { 1842 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]); 1843 if (result != VK_SUCCESS) 1844 break; 1845 result = emit_binding_table(cmd_buffer, s, 1846 &cmd_buffer->state.binding_tables[s]); 1847 if (result != VK_SUCCESS) 1848 break; 1849 } 1850 1851 if (result != VK_SUCCESS) { 1852 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY); 1853 1854 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer); 1855 if (result != VK_SUCCESS) 1856 return 0; 1857 1858 /* Re-emit state base addresses so we get the new surface state base 1859 * address before we start emitting binding tables etc. 1860 */ 1861 genX(cmd_buffer_emit_state_base_address)(cmd_buffer); 1862 1863 /* Re-emit all active binding tables */ 1864 dirty |= pipeline->active_stages; 1865 anv_foreach_stage(s, dirty) { 1866 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]); 1867 if (result != VK_SUCCESS) { 1868 anv_batch_set_error(&cmd_buffer->batch, result); 1869 return 0; 1870 } 1871 result = emit_binding_table(cmd_buffer, s, 1872 &cmd_buffer->state.binding_tables[s]); 1873 if (result != VK_SUCCESS) { 1874 anv_batch_set_error(&cmd_buffer->batch, result); 1875 return 0; 1876 } 1877 } 1878 } 1879 1880 cmd_buffer->state.descriptors_dirty &= ~dirty; 1881 1882 return dirty; 1883 } 1884 1885 static void 1886 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer, 1887 uint32_t stages) 1888 { 1889 static const uint32_t sampler_state_opcodes[] = { 1890 [MESA_SHADER_VERTEX] = 43, 1891 [MESA_SHADER_TESS_CTRL] = 44, /* HS */ 1892 [MESA_SHADER_TESS_EVAL] = 45, /* DS */ 1893 [MESA_SHADER_GEOMETRY] = 46, 1894 [MESA_SHADER_FRAGMENT] = 47, 1895 [MESA_SHADER_COMPUTE] = 0, 1896 }; 1897 1898 static const uint32_t binding_table_opcodes[] = { 1899 [MESA_SHADER_VERTEX] = 38, 1900 [MESA_SHADER_TESS_CTRL] = 39, 1901 [MESA_SHADER_TESS_EVAL] = 40, 1902 [MESA_SHADER_GEOMETRY] = 41, 1903 [MESA_SHADER_FRAGMENT] = 42, 1904 [MESA_SHADER_COMPUTE] = 0, 1905 }; 1906 1907 anv_foreach_stage(s, stages) { 1908 assert(s < ARRAY_SIZE(binding_table_opcodes)); 1909 assert(binding_table_opcodes[s] > 0); 1910 1911 if (cmd_buffer->state.samplers[s].alloc_size > 0) { 1912 anv_batch_emit(&cmd_buffer->batch, 1913 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) { 1914 ssp._3DCommandSubOpcode = sampler_state_opcodes[s]; 1915 ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset; 1916 } 1917 } 1918 1919 /* Always emit binding table pointers if we're asked to, since on SKL 1920 * this is what flushes push constants. */ 1921 anv_batch_emit(&cmd_buffer->batch, 1922 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) { 1923 btp._3DCommandSubOpcode = binding_table_opcodes[s]; 1924 btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset; 1925 } 1926 } 1927 } 1928 1929 static void 1930 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer, 1931 VkShaderStageFlags dirty_stages) 1932 { 1933 const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx; 1934 const struct anv_pipeline *pipeline = gfx_state->base.pipeline; 1935 1936 static const uint32_t push_constant_opcodes[] = { 1937 [MESA_SHADER_VERTEX] = 21, 1938 [MESA_SHADER_TESS_CTRL] = 25, /* HS */ 1939 [MESA_SHADER_TESS_EVAL] = 26, /* DS */ 1940 [MESA_SHADER_GEOMETRY] = 22, 1941 [MESA_SHADER_FRAGMENT] = 23, 1942 [MESA_SHADER_COMPUTE] = 0, 1943 }; 1944 1945 VkShaderStageFlags flushed = 0; 1946 1947 anv_foreach_stage(stage, dirty_stages) { 1948 assert(stage < ARRAY_SIZE(push_constant_opcodes)); 1949 assert(push_constant_opcodes[stage] > 0); 1950 1951 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) { 1952 c._3DCommandSubOpcode = push_constant_opcodes[stage]; 1953 1954 if (anv_pipeline_has_stage(pipeline, stage)) { 1955 #if GEN_GEN >= 8 || GEN_IS_HASWELL 1956 const struct brw_stage_prog_data *prog_data = 1957 pipeline->shaders[stage]->prog_data; 1958 const struct anv_pipeline_bind_map *bind_map = 1959 &pipeline->shaders[stage]->bind_map; 1960 1961 /* The Skylake PRM contains the following restriction: 1962 * 1963 * "The driver must ensure The following case does not occur 1964 * without a flush to the 3D engine: 3DSTATE_CONSTANT_* with 1965 * buffer 3 read length equal to zero committed followed by a 1966 * 3DSTATE_CONSTANT_* with buffer 0 read length not equal to 1967 * zero committed." 1968 * 1969 * To avoid this, we program the buffers in the highest slots. 1970 * This way, slot 0 is only used if slot 3 is also used. 1971 */ 1972 int n = 3; 1973 1974 for (int i = 3; i >= 0; i--) { 1975 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i]; 1976 if (range->length == 0) 1977 continue; 1978 1979 const unsigned surface = 1980 prog_data->binding_table.ubo_start + range->block; 1981 1982 assert(surface <= bind_map->surface_count); 1983 const struct anv_pipeline_binding *binding = 1984 &bind_map->surface_to_descriptor[surface]; 1985 1986 const struct anv_descriptor *desc = 1987 anv_descriptor_for_binding(&gfx_state->base, binding); 1988 1989 struct anv_address read_addr; 1990 uint32_t read_len; 1991 if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) { 1992 read_len = MIN2(range->length, 1993 DIV_ROUND_UP(desc->buffer_view->range, 32) - range->start); 1994 read_addr = (struct anv_address) { 1995 .bo = desc->buffer_view->bo, 1996 .offset = desc->buffer_view->offset + 1997 range->start * 32, 1998 }; 1999 } else { 2000 assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC); 2001 2002 uint32_t dynamic_offset = 2003 dynamic_offset_for_binding(&gfx_state->base, 2004 pipeline, binding); 2005 uint32_t buf_offset = 2006 MIN2(desc->offset + dynamic_offset, desc->buffer->size); 2007 uint32_t buf_range = 2008 MIN2(desc->range, desc->buffer->size - buf_offset); 2009 2010 read_len = MIN2(range->length, 2011 DIV_ROUND_UP(buf_range, 32) - range->start); 2012 read_addr = (struct anv_address) { 2013 .bo = desc->buffer->bo, 2014 .offset = desc->buffer->offset + buf_offset + 2015 range->start * 32, 2016 }; 2017 } 2018 2019 if (read_len > 0) { 2020 c.ConstantBody.Buffer[n] = read_addr; 2021 c.ConstantBody.ReadLength[n] = read_len; 2022 n--; 2023 } 2024 } 2025 2026 struct anv_state state = 2027 anv_cmd_buffer_push_constants(cmd_buffer, stage); 2028 2029 if (state.alloc_size > 0) { 2030 c.ConstantBody.Buffer[n] = (struct anv_address) { 2031 .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo, 2032 .offset = state.offset, 2033 }; 2034 c.ConstantBody.ReadLength[n] = 2035 DIV_ROUND_UP(state.alloc_size, 32); 2036 } 2037 #else 2038 /* For Ivy Bridge, the push constants packets have a different 2039 * rule that would require us to iterate in the other direction 2040 * and possibly mess around with dynamic state base address. 2041 * Don't bother; just emit regular push constants at n = 0. 2042 */ 2043 struct anv_state state = 2044 anv_cmd_buffer_push_constants(cmd_buffer, stage); 2045 2046 if (state.alloc_size > 0) { 2047 c.ConstantBody.Buffer[0].offset = state.offset, 2048 c.ConstantBody.ReadLength[0] = 2049 DIV_ROUND_UP(state.alloc_size, 32); 2050 } 2051 #endif 2052 } 2053 } 2054 2055 flushed |= mesa_to_vk_shader_stage(stage); 2056 } 2057 2058 cmd_buffer->state.push_constants_dirty &= ~flushed; 2059 } 2060 2061 void 2062 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer) 2063 { 2064 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; 2065 uint32_t *p; 2066 2067 uint32_t vb_emit = cmd_buffer->state.gfx.vb_dirty & pipeline->vb_used; 2068 2069 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0); 2070 2071 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config); 2072 2073 genX(flush_pipeline_select_3d)(cmd_buffer); 2074 2075 if (vb_emit) { 2076 const uint32_t num_buffers = __builtin_popcount(vb_emit); 2077 const uint32_t num_dwords = 1 + num_buffers * 4; 2078 2079 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords, 2080 GENX(3DSTATE_VERTEX_BUFFERS)); 2081 uint32_t vb, i = 0; 2082 for_each_bit(vb, vb_emit) { 2083 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer; 2084 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset; 2085 2086 struct GENX(VERTEX_BUFFER_STATE) state = { 2087 .VertexBufferIndex = vb, 2088 2089 #if GEN_GEN >= 8 2090 .MemoryObjectControlState = GENX(MOCS), 2091 #else 2092 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA, 2093 /* Our implementation of VK_KHR_multiview uses instancing to draw 2094 * the different views. If the client asks for instancing, we 2095 * need to use the Instance Data Step Rate to ensure that we 2096 * repeat the client's per-instance data once for each view. 2097 */ 2098 .InstanceDataStepRate = anv_subpass_view_count(pipeline->subpass), 2099 .VertexBufferMemoryObjectControlState = GENX(MOCS), 2100 #endif 2101 2102 .AddressModifyEnable = true, 2103 .BufferPitch = pipeline->binding_stride[vb], 2104 .BufferStartingAddress = { buffer->bo, buffer->offset + offset }, 2105 2106 #if GEN_GEN >= 8 2107 .BufferSize = buffer->size - offset 2108 #else 2109 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1}, 2110 #endif 2111 }; 2112 2113 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state); 2114 i++; 2115 } 2116 } 2117 2118 cmd_buffer->state.gfx.vb_dirty &= ~vb_emit; 2119 2120 if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) { 2121 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); 2122 2123 /* The exact descriptor layout is pulled from the pipeline, so we need 2124 * to re-emit binding tables on every pipeline change. 2125 */ 2126 cmd_buffer->state.descriptors_dirty |= pipeline->active_stages; 2127 2128 /* If the pipeline changed, we may need to re-allocate push constant 2129 * space in the URB. 2130 */ 2131 cmd_buffer_alloc_push_constants(cmd_buffer); 2132 } 2133 2134 #if GEN_GEN <= 7 2135 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT || 2136 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) { 2137 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1: 2138 * 2139 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth 2140 * stall needs to be sent just prior to any 3DSTATE_VS, 2141 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS, 2142 * 3DSTATE_BINDING_TABLE_POINTER_VS, 2143 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one 2144 * PIPE_CONTROL needs to be sent before any combination of VS 2145 * associated 3DSTATE." 2146 */ 2147 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 2148 pc.DepthStallEnable = true; 2149 pc.PostSyncOperation = WriteImmediateData; 2150 pc.Address = 2151 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 }; 2152 } 2153 } 2154 #endif 2155 2156 /* Render targets live in the same binding table as fragment descriptors */ 2157 if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_RENDER_TARGETS) 2158 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT; 2159 2160 /* We emit the binding tables and sampler tables first, then emit push 2161 * constants and then finally emit binding table and sampler table 2162 * pointers. It has to happen in this order, since emitting the binding 2163 * tables may change the push constants (in case of storage images). After 2164 * emitting push constants, on SKL+ we have to emit the corresponding 2165 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect. 2166 */ 2167 uint32_t dirty = 0; 2168 if (cmd_buffer->state.descriptors_dirty) 2169 dirty = flush_descriptor_sets(cmd_buffer); 2170 2171 if (dirty || cmd_buffer->state.push_constants_dirty) { 2172 /* Because we're pushing UBOs, we have to push whenever either 2173 * descriptors or push constants is dirty. 2174 */ 2175 dirty |= cmd_buffer->state.push_constants_dirty; 2176 dirty &= ANV_STAGE_MASK & VK_SHADER_STAGE_ALL_GRAPHICS; 2177 cmd_buffer_flush_push_constants(cmd_buffer, dirty); 2178 } 2179 2180 if (dirty) 2181 cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty); 2182 2183 if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT) 2184 gen8_cmd_buffer_emit_viewport(cmd_buffer); 2185 2186 if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT | 2187 ANV_CMD_DIRTY_PIPELINE)) { 2188 gen8_cmd_buffer_emit_depth_viewport(cmd_buffer, 2189 pipeline->depth_clamp_enable); 2190 } 2191 2192 if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR) 2193 gen7_cmd_buffer_emit_scissor(cmd_buffer); 2194 2195 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer); 2196 2197 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); 2198 } 2199 2200 static void 2201 emit_vertex_bo(struct anv_cmd_buffer *cmd_buffer, 2202 struct anv_bo *bo, uint32_t offset, 2203 uint32_t size, uint32_t index) 2204 { 2205 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5, 2206 GENX(3DSTATE_VERTEX_BUFFERS)); 2207 2208 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1, 2209 &(struct GENX(VERTEX_BUFFER_STATE)) { 2210 .VertexBufferIndex = index, 2211 .AddressModifyEnable = true, 2212 .BufferPitch = 0, 2213 #if (GEN_GEN >= 8) 2214 .MemoryObjectControlState = GENX(MOCS), 2215 .BufferStartingAddress = { bo, offset }, 2216 .BufferSize = size 2217 #else 2218 .VertexBufferMemoryObjectControlState = GENX(MOCS), 2219 .BufferStartingAddress = { bo, offset }, 2220 .EndAddress = { bo, offset + size }, 2221 #endif 2222 }); 2223 } 2224 2225 static void 2226 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer, 2227 struct anv_bo *bo, uint32_t offset) 2228 { 2229 emit_vertex_bo(cmd_buffer, bo, offset, 8, ANV_SVGS_VB_INDEX); 2230 } 2231 2232 static void 2233 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer, 2234 uint32_t base_vertex, uint32_t base_instance) 2235 { 2236 struct anv_state id_state = 2237 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4); 2238 2239 ((uint32_t *)id_state.map)[0] = base_vertex; 2240 ((uint32_t *)id_state.map)[1] = base_instance; 2241 2242 anv_state_flush(cmd_buffer->device, id_state); 2243 2244 emit_base_vertex_instance_bo(cmd_buffer, 2245 &cmd_buffer->device->dynamic_state_pool.block_pool.bo, id_state.offset); 2246 } 2247 2248 static void 2249 emit_draw_index(struct anv_cmd_buffer *cmd_buffer, uint32_t draw_index) 2250 { 2251 struct anv_state state = 2252 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 4, 4); 2253 2254 ((uint32_t *)state.map)[0] = draw_index; 2255 2256 anv_state_flush(cmd_buffer->device, state); 2257 2258 emit_vertex_bo(cmd_buffer, 2259 &cmd_buffer->device->dynamic_state_pool.block_pool.bo, 2260 state.offset, 4, ANV_DRAWID_VB_INDEX); 2261 } 2262 2263 void genX(CmdDraw)( 2264 VkCommandBuffer commandBuffer, 2265 uint32_t vertexCount, 2266 uint32_t instanceCount, 2267 uint32_t firstVertex, 2268 uint32_t firstInstance) 2269 { 2270 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 2271 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; 2272 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); 2273 2274 if (anv_batch_has_error(&cmd_buffer->batch)) 2275 return; 2276 2277 genX(cmd_buffer_flush_state)(cmd_buffer); 2278 2279 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) 2280 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance); 2281 if (vs_prog_data->uses_drawid) 2282 emit_draw_index(cmd_buffer, 0); 2283 2284 /* Our implementation of VK_KHR_multiview uses instancing to draw the 2285 * different views. We need to multiply instanceCount by the view count. 2286 */ 2287 instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass); 2288 2289 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { 2290 prim.VertexAccessType = SEQUENTIAL; 2291 prim.PrimitiveTopologyType = pipeline->topology; 2292 prim.VertexCountPerInstance = vertexCount; 2293 prim.StartVertexLocation = firstVertex; 2294 prim.InstanceCount = instanceCount; 2295 prim.StartInstanceLocation = firstInstance; 2296 prim.BaseVertexLocation = 0; 2297 } 2298 } 2299 2300 void genX(CmdDrawIndexed)( 2301 VkCommandBuffer commandBuffer, 2302 uint32_t indexCount, 2303 uint32_t instanceCount, 2304 uint32_t firstIndex, 2305 int32_t vertexOffset, 2306 uint32_t firstInstance) 2307 { 2308 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 2309 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; 2310 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); 2311 2312 if (anv_batch_has_error(&cmd_buffer->batch)) 2313 return; 2314 2315 genX(cmd_buffer_flush_state)(cmd_buffer); 2316 2317 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) 2318 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance); 2319 if (vs_prog_data->uses_drawid) 2320 emit_draw_index(cmd_buffer, 0); 2321 2322 /* Our implementation of VK_KHR_multiview uses instancing to draw the 2323 * different views. We need to multiply instanceCount by the view count. 2324 */ 2325 instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass); 2326 2327 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { 2328 prim.VertexAccessType = RANDOM; 2329 prim.PrimitiveTopologyType = pipeline->topology; 2330 prim.VertexCountPerInstance = indexCount; 2331 prim.StartVertexLocation = firstIndex; 2332 prim.InstanceCount = instanceCount; 2333 prim.StartInstanceLocation = firstInstance; 2334 prim.BaseVertexLocation = vertexOffset; 2335 } 2336 } 2337 2338 /* Auto-Draw / Indirect Registers */ 2339 #define GEN7_3DPRIM_END_OFFSET 0x2420 2340 #define GEN7_3DPRIM_START_VERTEX 0x2430 2341 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434 2342 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438 2343 #define GEN7_3DPRIM_START_INSTANCE 0x243C 2344 #define GEN7_3DPRIM_BASE_VERTEX 0x2440 2345 2346 /* MI_MATH only exists on Haswell+ */ 2347 #if GEN_IS_HASWELL || GEN_GEN >= 8 2348 2349 static uint32_t 2350 mi_alu(uint32_t opcode, uint32_t op1, uint32_t op2) 2351 { 2352 struct GENX(MI_MATH_ALU_INSTRUCTION) instr = { 2353 .ALUOpcode = opcode, 2354 .Operand1 = op1, 2355 .Operand2 = op2, 2356 }; 2357 2358 uint32_t dw; 2359 GENX(MI_MATH_ALU_INSTRUCTION_pack)(NULL, &dw, &instr); 2360 2361 return dw; 2362 } 2363 2364 #define CS_GPR(n) (0x2600 + (n) * 8) 2365 2366 /* Emit dwords to multiply GPR0 by N */ 2367 static void 2368 build_alu_multiply_gpr0(uint32_t *dw, unsigned *dw_count, uint32_t N) 2369 { 2370 VK_OUTARRAY_MAKE(out, dw, dw_count); 2371 2372 #define append_alu(opcode, operand1, operand2) \ 2373 vk_outarray_append(&out, alu_dw) *alu_dw = mi_alu(opcode, operand1, operand2) 2374 2375 assert(N > 0); 2376 unsigned top_bit = 31 - __builtin_clz(N); 2377 for (int i = top_bit - 1; i >= 0; i--) { 2378 /* We get our initial data in GPR0 and we write the final data out to 2379 * GPR0 but we use GPR1 as our scratch register. 2380 */ 2381 unsigned src_reg = i == top_bit - 1 ? MI_ALU_REG0 : MI_ALU_REG1; 2382 unsigned dst_reg = i == 0 ? MI_ALU_REG0 : MI_ALU_REG1; 2383 2384 /* Shift the current value left by 1 */ 2385 append_alu(MI_ALU_LOAD, MI_ALU_SRCA, src_reg); 2386 append_alu(MI_ALU_LOAD, MI_ALU_SRCB, src_reg); 2387 append_alu(MI_ALU_ADD, 0, 0); 2388 2389 if (N & (1 << i)) { 2390 /* Store ACCU to R1 and add R0 to R1 */ 2391 append_alu(MI_ALU_STORE, MI_ALU_REG1, MI_ALU_ACCU); 2392 append_alu(MI_ALU_LOAD, MI_ALU_SRCA, MI_ALU_REG0); 2393 append_alu(MI_ALU_LOAD, MI_ALU_SRCB, MI_ALU_REG1); 2394 append_alu(MI_ALU_ADD, 0, 0); 2395 } 2396 2397 append_alu(MI_ALU_STORE, dst_reg, MI_ALU_ACCU); 2398 } 2399 2400 #undef append_alu 2401 } 2402 2403 static void 2404 emit_mul_gpr0(struct anv_batch *batch, uint32_t N) 2405 { 2406 uint32_t num_dwords; 2407 build_alu_multiply_gpr0(NULL, &num_dwords, N); 2408 2409 uint32_t *dw = anv_batch_emitn(batch, 1 + num_dwords, GENX(MI_MATH)); 2410 build_alu_multiply_gpr0(dw + 1, &num_dwords, N); 2411 } 2412 2413 #endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */ 2414 2415 static void 2416 load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer, 2417 struct anv_buffer *buffer, uint64_t offset, 2418 bool indexed) 2419 { 2420 struct anv_batch *batch = &cmd_buffer->batch; 2421 struct anv_bo *bo = buffer->bo; 2422 uint32_t bo_offset = buffer->offset + offset; 2423 2424 emit_lrm(batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset); 2425 2426 unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass); 2427 if (view_count > 1) { 2428 #if GEN_IS_HASWELL || GEN_GEN >= 8 2429 emit_lrm(batch, CS_GPR(0), bo, bo_offset + 4); 2430 emit_mul_gpr0(batch, view_count); 2431 emit_lrr(batch, GEN7_3DPRIM_INSTANCE_COUNT, CS_GPR(0)); 2432 #else 2433 anv_finishme("Multiview + indirect draw requires MI_MATH; " 2434 "MI_MATH is not supported on Ivy Bridge"); 2435 emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4); 2436 #endif 2437 } else { 2438 emit_lrm(batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4); 2439 } 2440 2441 emit_lrm(batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8); 2442 2443 if (indexed) { 2444 emit_lrm(batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12); 2445 emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16); 2446 } else { 2447 emit_lrm(batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12); 2448 emit_lri(batch, GEN7_3DPRIM_BASE_VERTEX, 0); 2449 } 2450 } 2451 2452 void genX(CmdDrawIndirect)( 2453 VkCommandBuffer commandBuffer, 2454 VkBuffer _buffer, 2455 VkDeviceSize offset, 2456 uint32_t drawCount, 2457 uint32_t stride) 2458 { 2459 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 2460 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); 2461 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; 2462 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); 2463 2464 if (anv_batch_has_error(&cmd_buffer->batch)) 2465 return; 2466 2467 genX(cmd_buffer_flush_state)(cmd_buffer); 2468 2469 for (uint32_t i = 0; i < drawCount; i++) { 2470 struct anv_bo *bo = buffer->bo; 2471 uint32_t bo_offset = buffer->offset + offset; 2472 2473 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) 2474 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8); 2475 if (vs_prog_data->uses_drawid) 2476 emit_draw_index(cmd_buffer, i); 2477 2478 load_indirect_parameters(cmd_buffer, buffer, offset, false); 2479 2480 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { 2481 prim.IndirectParameterEnable = true; 2482 prim.VertexAccessType = SEQUENTIAL; 2483 prim.PrimitiveTopologyType = pipeline->topology; 2484 } 2485 2486 offset += stride; 2487 } 2488 } 2489 2490 void genX(CmdDrawIndexedIndirect)( 2491 VkCommandBuffer commandBuffer, 2492 VkBuffer _buffer, 2493 VkDeviceSize offset, 2494 uint32_t drawCount, 2495 uint32_t stride) 2496 { 2497 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 2498 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); 2499 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline; 2500 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline); 2501 2502 if (anv_batch_has_error(&cmd_buffer->batch)) 2503 return; 2504 2505 genX(cmd_buffer_flush_state)(cmd_buffer); 2506 2507 for (uint32_t i = 0; i < drawCount; i++) { 2508 struct anv_bo *bo = buffer->bo; 2509 uint32_t bo_offset = buffer->offset + offset; 2510 2511 /* TODO: We need to stomp base vertex to 0 somehow */ 2512 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance) 2513 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12); 2514 if (vs_prog_data->uses_drawid) 2515 emit_draw_index(cmd_buffer, i); 2516 2517 load_indirect_parameters(cmd_buffer, buffer, offset, true); 2518 2519 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) { 2520 prim.IndirectParameterEnable = true; 2521 prim.VertexAccessType = RANDOM; 2522 prim.PrimitiveTopologyType = pipeline->topology; 2523 } 2524 2525 offset += stride; 2526 } 2527 } 2528 2529 static VkResult 2530 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer) 2531 { 2532 struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; 2533 struct anv_state surfaces = { 0, }, samplers = { 0, }; 2534 VkResult result; 2535 2536 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces); 2537 if (result != VK_SUCCESS) { 2538 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY); 2539 2540 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer); 2541 if (result != VK_SUCCESS) 2542 return result; 2543 2544 /* Re-emit state base addresses so we get the new surface state base 2545 * address before we start emitting binding tables etc. 2546 */ 2547 genX(cmd_buffer_emit_state_base_address)(cmd_buffer); 2548 2549 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces); 2550 if (result != VK_SUCCESS) { 2551 anv_batch_set_error(&cmd_buffer->batch, result); 2552 return result; 2553 } 2554 } 2555 2556 result = emit_samplers(cmd_buffer, MESA_SHADER_COMPUTE, &samplers); 2557 if (result != VK_SUCCESS) { 2558 anv_batch_set_error(&cmd_buffer->batch, result); 2559 return result; 2560 } 2561 2562 uint32_t iface_desc_data_dw[GENX(INTERFACE_DESCRIPTOR_DATA_length)]; 2563 struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = { 2564 .BindingTablePointer = surfaces.offset, 2565 .SamplerStatePointer = samplers.offset, 2566 }; 2567 GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, iface_desc_data_dw, &desc); 2568 2569 struct anv_state state = 2570 anv_cmd_buffer_merge_dynamic(cmd_buffer, iface_desc_data_dw, 2571 pipeline->interface_descriptor_data, 2572 GENX(INTERFACE_DESCRIPTOR_DATA_length), 2573 64); 2574 2575 uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t); 2576 anv_batch_emit(&cmd_buffer->batch, 2577 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) { 2578 mid.InterfaceDescriptorTotalLength = size; 2579 mid.InterfaceDescriptorDataStartAddress = state.offset; 2580 } 2581 2582 return VK_SUCCESS; 2583 } 2584 2585 void 2586 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer) 2587 { 2588 struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; 2589 MAYBE_UNUSED VkResult result; 2590 2591 assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT); 2592 2593 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config); 2594 2595 genX(flush_pipeline_select_gpgpu)(cmd_buffer); 2596 2597 if (cmd_buffer->state.compute.pipeline_dirty) { 2598 /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE: 2599 * 2600 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless 2601 * the only bits that are changed are scoreboard related: Scoreboard 2602 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For 2603 * these scoreboard related states, a MEDIA_STATE_FLUSH is 2604 * sufficient." 2605 */ 2606 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT; 2607 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); 2608 2609 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch); 2610 } 2611 2612 if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) || 2613 cmd_buffer->state.compute.pipeline_dirty) { 2614 /* FIXME: figure out descriptors for gen7 */ 2615 result = flush_compute_descriptor_set(cmd_buffer); 2616 if (result != VK_SUCCESS) 2617 return; 2618 2619 cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT; 2620 } 2621 2622 if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) { 2623 struct anv_state push_state = 2624 anv_cmd_buffer_cs_push_constants(cmd_buffer); 2625 2626 if (push_state.alloc_size) { 2627 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) { 2628 curbe.CURBETotalDataLength = push_state.alloc_size; 2629 curbe.CURBEDataStartAddress = push_state.offset; 2630 } 2631 } 2632 } 2633 2634 cmd_buffer->state.compute.pipeline_dirty = false; 2635 2636 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer); 2637 } 2638 2639 #if GEN_GEN == 7 2640 2641 static VkResult 2642 verify_cmd_parser(const struct anv_device *device, 2643 int required_version, 2644 const char *function) 2645 { 2646 if (device->instance->physicalDevice.cmd_parser_version < required_version) { 2647 return vk_errorf(device->instance, device->instance, 2648 VK_ERROR_FEATURE_NOT_PRESENT, 2649 "cmd parser version %d is required for %s", 2650 required_version, function); 2651 } else { 2652 return VK_SUCCESS; 2653 } 2654 } 2655 2656 #endif 2657 2658 void genX(CmdDispatch)( 2659 VkCommandBuffer commandBuffer, 2660 uint32_t x, 2661 uint32_t y, 2662 uint32_t z) 2663 { 2664 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 2665 struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; 2666 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline); 2667 2668 if (anv_batch_has_error(&cmd_buffer->batch)) 2669 return; 2670 2671 if (prog_data->uses_num_work_groups) { 2672 struct anv_state state = 2673 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4); 2674 uint32_t *sizes = state.map; 2675 sizes[0] = x; 2676 sizes[1] = y; 2677 sizes[2] = z; 2678 anv_state_flush(cmd_buffer->device, state); 2679 cmd_buffer->state.compute.num_workgroups = (struct anv_address) { 2680 .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo, 2681 .offset = state.offset, 2682 }; 2683 } 2684 2685 genX(cmd_buffer_flush_compute_state)(cmd_buffer); 2686 2687 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) { 2688 ggw.SIMDSize = prog_data->simd_size / 16; 2689 ggw.ThreadDepthCounterMaximum = 0; 2690 ggw.ThreadHeightCounterMaximum = 0; 2691 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1; 2692 ggw.ThreadGroupIDXDimension = x; 2693 ggw.ThreadGroupIDYDimension = y; 2694 ggw.ThreadGroupIDZDimension = z; 2695 ggw.RightExecutionMask = pipeline->cs_right_mask; 2696 ggw.BottomExecutionMask = 0xffffffff; 2697 } 2698 2699 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf); 2700 } 2701 2702 #define GPGPU_DISPATCHDIMX 0x2500 2703 #define GPGPU_DISPATCHDIMY 0x2504 2704 #define GPGPU_DISPATCHDIMZ 0x2508 2705 2706 void genX(CmdDispatchIndirect)( 2707 VkCommandBuffer commandBuffer, 2708 VkBuffer _buffer, 2709 VkDeviceSize offset) 2710 { 2711 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 2712 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer); 2713 struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline; 2714 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline); 2715 struct anv_bo *bo = buffer->bo; 2716 uint32_t bo_offset = buffer->offset + offset; 2717 struct anv_batch *batch = &cmd_buffer->batch; 2718 2719 #if GEN_GEN == 7 2720 /* Linux 4.4 added command parser version 5 which allows the GPGPU 2721 * indirect dispatch registers to be written. 2722 */ 2723 if (verify_cmd_parser(cmd_buffer->device, 5, 2724 "vkCmdDispatchIndirect") != VK_SUCCESS) 2725 return; 2726 #endif 2727 2728 if (prog_data->uses_num_work_groups) { 2729 cmd_buffer->state.compute.num_workgroups = (struct anv_address) { 2730 .bo = bo, 2731 .offset = bo_offset, 2732 }; 2733 } 2734 2735 genX(cmd_buffer_flush_compute_state)(cmd_buffer); 2736 2737 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset); 2738 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4); 2739 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8); 2740 2741 #if GEN_GEN <= 7 2742 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */ 2743 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0); 2744 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0); 2745 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0); 2746 2747 /* Load compute_dispatch_indirect_x_size into SRC0 */ 2748 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0); 2749 2750 /* predicate = (compute_dispatch_indirect_x_size == 0); */ 2751 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { 2752 mip.LoadOperation = LOAD_LOAD; 2753 mip.CombineOperation = COMBINE_SET; 2754 mip.CompareOperation = COMPARE_SRCS_EQUAL; 2755 } 2756 2757 /* Load compute_dispatch_indirect_y_size into SRC0 */ 2758 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4); 2759 2760 /* predicate |= (compute_dispatch_indirect_y_size == 0); */ 2761 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { 2762 mip.LoadOperation = LOAD_LOAD; 2763 mip.CombineOperation = COMBINE_OR; 2764 mip.CompareOperation = COMPARE_SRCS_EQUAL; 2765 } 2766 2767 /* Load compute_dispatch_indirect_z_size into SRC0 */ 2768 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8); 2769 2770 /* predicate |= (compute_dispatch_indirect_z_size == 0); */ 2771 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { 2772 mip.LoadOperation = LOAD_LOAD; 2773 mip.CombineOperation = COMBINE_OR; 2774 mip.CompareOperation = COMPARE_SRCS_EQUAL; 2775 } 2776 2777 /* predicate = !predicate; */ 2778 #define COMPARE_FALSE 1 2779 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) { 2780 mip.LoadOperation = LOAD_LOADINV; 2781 mip.CombineOperation = COMBINE_OR; 2782 mip.CompareOperation = COMPARE_FALSE; 2783 } 2784 #endif 2785 2786 anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) { 2787 ggw.IndirectParameterEnable = true; 2788 ggw.PredicateEnable = GEN_GEN <= 7; 2789 ggw.SIMDSize = prog_data->simd_size / 16; 2790 ggw.ThreadDepthCounterMaximum = 0; 2791 ggw.ThreadHeightCounterMaximum = 0; 2792 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1; 2793 ggw.RightExecutionMask = pipeline->cs_right_mask; 2794 ggw.BottomExecutionMask = 0xffffffff; 2795 } 2796 2797 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf); 2798 } 2799 2800 static void 2801 genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer, 2802 uint32_t pipeline) 2803 { 2804 UNUSED const struct gen_device_info *devinfo = &cmd_buffer->device->info; 2805 2806 if (cmd_buffer->state.current_pipeline == pipeline) 2807 return; 2808 2809 #if GEN_GEN >= 8 && GEN_GEN < 10 2810 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT: 2811 * 2812 * Software must clear the COLOR_CALC_STATE Valid field in 2813 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT 2814 * with Pipeline Select set to GPGPU. 2815 * 2816 * The internal hardware docs recommend the same workaround for Gen9 2817 * hardware too. 2818 */ 2819 if (pipeline == GPGPU) 2820 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t); 2821 #endif 2822 2823 /* From "BXML GT MI vol1a GPU Overview [Instruction] 2824 * PIPELINE_SELECT [DevBWR+]": 2825 * 2826 * Project: DEVSNB+ 2827 * 2828 * Software must ensure all the write caches are flushed through a 2829 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL 2830 * command to invalidate read only caches prior to programming 2831 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode. 2832 */ 2833 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 2834 pc.RenderTargetCacheFlushEnable = true; 2835 pc.DepthCacheFlushEnable = true; 2836 pc.DCFlushEnable = true; 2837 pc.PostSyncOperation = NoWrite; 2838 pc.CommandStreamerStallEnable = true; 2839 } 2840 2841 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) { 2842 pc.TextureCacheInvalidationEnable = true; 2843 pc.ConstantCacheInvalidationEnable = true; 2844 pc.StateCacheInvalidationEnable = true; 2845 pc.InstructionCacheInvalidateEnable = true; 2846 pc.PostSyncOperation = NoWrite; 2847 } 2848 2849 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) { 2850 #if GEN_GEN >= 9 2851 ps.MaskBits = 3; 2852 #endif 2853 ps.PipelineSelection = pipeline; 2854 } 2855 2856 #if GEN_GEN == 9 2857 if (devinfo->is_geminilake) { 2858 /* Project: DevGLK 2859 * 2860 * "This chicken bit works around a hardware issue with barrier logic 2861 * encountered when switching between GPGPU and 3D pipelines. To 2862 * workaround the issue, this mode bit should be set after a pipeline 2863 * is selected." 2864 */ 2865 uint32_t scec; 2866 anv_pack_struct(&scec, GENX(SLICE_COMMON_ECO_CHICKEN1), 2867 .GLKBarrierMode = 2868 pipeline == GPGPU ? GLK_BARRIER_MODE_GPGPU 2869 : GLK_BARRIER_MODE_3D_HULL, 2870 .GLKBarrierModeMask = 1); 2871 emit_lri(&cmd_buffer->batch, GENX(SLICE_COMMON_ECO_CHICKEN1_num), scec); 2872 } 2873 #endif 2874 2875 cmd_buffer->state.current_pipeline = pipeline; 2876 } 2877 2878 void 2879 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer) 2880 { 2881 genX(flush_pipeline_select)(cmd_buffer, _3D); 2882 } 2883 2884 void 2885 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer) 2886 { 2887 genX(flush_pipeline_select)(cmd_buffer, GPGPU); 2888 } 2889 2890 void 2891 genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer) 2892 { 2893 if (GEN_GEN >= 8) 2894 return; 2895 2896 /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER: 2897 * 2898 * "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any 2899 * combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 2900 * 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first 2901 * issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit 2902 * set), followed by a pipelined depth cache flush (PIPE_CONTROL with 2903 * Depth Flush Bit set, followed by another pipelined depth stall 2904 * (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise 2905 * guarantee that the pipeline from WM onwards is already flushed (e.g., 2906 * via a preceding MI_FLUSH)." 2907 */ 2908 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { 2909 pipe.DepthStallEnable = true; 2910 } 2911 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { 2912 pipe.DepthCacheFlushEnable = true; 2913 } 2914 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) { 2915 pipe.DepthStallEnable = true; 2916 } 2917 } 2918 2919 static void 2920 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer) 2921 { 2922 struct anv_device *device = cmd_buffer->device; 2923 const struct anv_image_view *iview = 2924 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer); 2925 const struct anv_image *image = iview ? iview->image : NULL; 2926 2927 /* FIXME: Width and Height are wrong */ 2928 2929 genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer); 2930 2931 uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch, 2932 device->isl_dev.ds.size / 4); 2933 if (dw == NULL) 2934 return; 2935 2936 struct isl_depth_stencil_hiz_emit_info info = { 2937 .mocs = device->default_mocs, 2938 }; 2939 2940 if (iview) 2941 info.view = &iview->planes[0].isl; 2942 2943 if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) { 2944 uint32_t depth_plane = 2945 anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT); 2946 const struct anv_surface *surface = &image->planes[depth_plane].surface; 2947 2948 info.depth_surf = &surface->isl; 2949 2950 info.depth_address = 2951 anv_batch_emit_reloc(&cmd_buffer->batch, 2952 dw + device->isl_dev.ds.depth_offset / 4, 2953 image->planes[depth_plane].bo, 2954 image->planes[depth_plane].bo_offset + 2955 surface->offset); 2956 2957 const uint32_t ds = 2958 cmd_buffer->state.subpass->depth_stencil_attachment.attachment; 2959 info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage; 2960 if (info.hiz_usage == ISL_AUX_USAGE_HIZ) { 2961 info.hiz_surf = &image->planes[depth_plane].aux_surface.isl; 2962 2963 info.hiz_address = 2964 anv_batch_emit_reloc(&cmd_buffer->batch, 2965 dw + device->isl_dev.ds.hiz_offset / 4, 2966 image->planes[depth_plane].bo, 2967 image->planes[depth_plane].bo_offset + 2968 image->planes[depth_plane].aux_surface.offset); 2969 2970 info.depth_clear_value = ANV_HZ_FC_VAL; 2971 } 2972 } 2973 2974 if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) { 2975 uint32_t stencil_plane = 2976 anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_STENCIL_BIT); 2977 const struct anv_surface *surface = &image->planes[stencil_plane].surface; 2978 2979 info.stencil_surf = &surface->isl; 2980 2981 info.stencil_address = 2982 anv_batch_emit_reloc(&cmd_buffer->batch, 2983 dw + device->isl_dev.ds.stencil_offset / 4, 2984 image->planes[stencil_plane].bo, 2985 image->planes[stencil_plane].bo_offset + surface->offset); 2986 } 2987 2988 isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info); 2989 2990 cmd_buffer->state.hiz_enabled = info.hiz_usage == ISL_AUX_USAGE_HIZ; 2991 } 2992 2993 2994 /** 2995 * @brief Perform any layout transitions required at the beginning and/or end 2996 * of the current subpass for depth buffers. 2997 * 2998 * TODO: Consider preprocessing the attachment reference array at render pass 2999 * create time to determine if no layout transition is needed at the 3000 * beginning and/or end of each subpass. 3001 * 3002 * @param cmd_buffer The command buffer the transition is happening within. 3003 * @param subpass_end If true, marks that the transition is happening at the 3004 * end of the subpass. 3005 */ 3006 static void 3007 cmd_buffer_subpass_transition_layouts(struct anv_cmd_buffer * const cmd_buffer, 3008 const bool subpass_end) 3009 { 3010 /* We need a non-NULL command buffer. */ 3011 assert(cmd_buffer); 3012 3013 const struct anv_cmd_state * const cmd_state = &cmd_buffer->state; 3014 const struct anv_subpass * const subpass = cmd_state->subpass; 3015 3016 /* This function must be called within a subpass. */ 3017 assert(subpass); 3018 3019 /* If there are attachment references, the array shouldn't be NULL. 3020 */ 3021 if (subpass->attachment_count > 0) 3022 assert(subpass->attachments); 3023 3024 /* Iterate over the array of attachment references. */ 3025 for (const VkAttachmentReference *att_ref = subpass->attachments; 3026 att_ref < subpass->attachments + subpass->attachment_count; att_ref++) { 3027 3028 /* If the attachment is unused, we can't perform a layout transition. */ 3029 if (att_ref->attachment == VK_ATTACHMENT_UNUSED) 3030 continue; 3031 3032 /* This attachment index shouldn't go out of bounds. */ 3033 assert(att_ref->attachment < cmd_state->pass->attachment_count); 3034 3035 const struct anv_render_pass_attachment * const att_desc = 3036 &cmd_state->pass->attachments[att_ref->attachment]; 3037 struct anv_attachment_state * const att_state = 3038 &cmd_buffer->state.attachments[att_ref->attachment]; 3039 3040 /* The attachment should not be used in a subpass after its last. */ 3041 assert(att_desc->last_subpass_idx >= anv_get_subpass_id(cmd_state)); 3042 3043 if (subpass_end && anv_get_subpass_id(cmd_state) < 3044 att_desc->last_subpass_idx) { 3045 /* We're calling this function on a buffer twice in one subpass and 3046 * this is not the last use of the buffer. The layout should not have 3047 * changed from the first call and no transition is necessary. 3048 */ 3049 assert(att_state->current_layout == att_ref->layout || 3050 att_state->current_layout == 3051 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); 3052 continue; 3053 } 3054 3055 /* The attachment index must be less than the number of attachments 3056 * within the framebuffer. 3057 */ 3058 assert(att_ref->attachment < cmd_state->framebuffer->attachment_count); 3059 3060 const struct anv_image_view * const iview = 3061 cmd_state->framebuffer->attachments[att_ref->attachment]; 3062 const struct anv_image * const image = iview->image; 3063 3064 /* Get the appropriate target layout for this attachment. */ 3065 VkImageLayout target_layout; 3066 3067 /* A resolve is necessary before use as an input attachment if the clear 3068 * color or auxiliary buffer usage isn't supported by the sampler. 3069 */ 3070 const bool input_needs_resolve = 3071 (att_state->fast_clear && !att_state->clear_color_is_zero_one) || 3072 att_state->input_aux_usage != att_state->aux_usage; 3073 if (subpass_end) { 3074 target_layout = att_desc->final_layout; 3075 } else if (iview->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV && 3076 !input_needs_resolve) { 3077 /* Layout transitions before the final only help to enable sampling as 3078 * an input attachment. If the input attachment supports sampling 3079 * using the auxiliary surface, we can skip such transitions by making 3080 * the target layout one that is CCS-aware. 3081 */ 3082 target_layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; 3083 } else { 3084 target_layout = att_ref->layout; 3085 } 3086 3087 /* Perform the layout transition. */ 3088 if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) { 3089 transition_depth_buffer(cmd_buffer, image, 3090 att_state->current_layout, target_layout); 3091 att_state->aux_usage = 3092 anv_layout_to_aux_usage(&cmd_buffer->device->info, image, 3093 VK_IMAGE_ASPECT_DEPTH_BIT, target_layout); 3094 } else if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) { 3095 assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT); 3096 transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT, 3097 iview->planes[0].isl.base_level, 1, 3098 iview->planes[0].isl.base_array_layer, 3099 iview->planes[0].isl.array_len, 3100 att_state->current_layout, target_layout); 3101 } 3102 3103 att_state->current_layout = target_layout; 3104 } 3105 } 3106 3107 /* Update the clear value dword(s) in surface state objects or the fast clear 3108 * state buffer entry for the color attachments used in this subpass. 3109 */ 3110 static void 3111 cmd_buffer_subpass_sync_fast_clear_values(struct anv_cmd_buffer *cmd_buffer) 3112 { 3113 assert(cmd_buffer && cmd_buffer->state.subpass); 3114 3115 const struct anv_cmd_state *state = &cmd_buffer->state; 3116 3117 /* Iterate through every color attachment used in this subpass. */ 3118 for (uint32_t i = 0; i < state->subpass->color_count; ++i) { 3119 3120 /* The attachment should be one of the attachments described in the 3121 * render pass and used in the subpass. 3122 */ 3123 const uint32_t a = state->subpass->color_attachments[i].attachment; 3124 if (a == VK_ATTACHMENT_UNUSED) 3125 continue; 3126 3127 assert(a < state->pass->attachment_count); 3128 3129 /* Store some information regarding this attachment. */ 3130 const struct anv_attachment_state *att_state = &state->attachments[a]; 3131 const struct anv_image_view *iview = state->framebuffer->attachments[a]; 3132 const struct anv_render_pass_attachment *rp_att = 3133 &state->pass->attachments[a]; 3134 3135 if (att_state->aux_usage == ISL_AUX_USAGE_NONE) 3136 continue; 3137 3138 /* The fast clear state entry must be updated if a fast clear is going to 3139 * happen. The surface state must be updated if the clear value from a 3140 * prior fast clear may be needed. 3141 */ 3142 if (att_state->pending_clear_aspects && att_state->fast_clear) { 3143 /* Update the fast clear state entry. */ 3144 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state, 3145 iview->image, 3146 VK_IMAGE_ASPECT_COLOR_BIT, 3147 iview->planes[0].isl.base_level, 3148 true /* copy from ss */); 3149 3150 /* Fast-clears impact whether or not a resolve will be necessary. */ 3151 if (iview->image->planes[0].aux_usage == ISL_AUX_USAGE_CCS_E && 3152 att_state->clear_color_is_zero) { 3153 /* This image always has the auxiliary buffer enabled. We can mark 3154 * the subresource as not needing a resolve because the clear color 3155 * will match what's in every RENDER_SURFACE_STATE object when it's 3156 * being used for sampling. 3157 */ 3158 genX(set_image_needs_resolve)(cmd_buffer, iview->image, 3159 VK_IMAGE_ASPECT_COLOR_BIT, 3160 iview->planes[0].isl.base_level, 3161 false); 3162 } else { 3163 genX(set_image_needs_resolve)(cmd_buffer, iview->image, 3164 VK_IMAGE_ASPECT_COLOR_BIT, 3165 iview->planes[0].isl.base_level, 3166 true); 3167 } 3168 } else if (rp_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) { 3169 /* The attachment may have been fast-cleared in a previous render 3170 * pass and the value is needed now. Update the surface state(s). 3171 * 3172 * TODO: Do this only once per render pass instead of every subpass. 3173 */ 3174 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->color.state, 3175 iview->image, 3176 VK_IMAGE_ASPECT_COLOR_BIT, 3177 iview->planes[0].isl.base_level, 3178 false /* copy to ss */); 3179 3180 if (need_input_attachment_state(rp_att) && 3181 att_state->input_aux_usage != ISL_AUX_USAGE_NONE) { 3182 genX(copy_fast_clear_dwords)(cmd_buffer, att_state->input.state, 3183 iview->image, 3184 VK_IMAGE_ASPECT_COLOR_BIT, 3185 iview->planes[0].isl.base_level, 3186 false /* copy to ss */); 3187 } 3188 } 3189 } 3190 } 3191 3192 3193 static void 3194 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer, 3195 struct anv_subpass *subpass) 3196 { 3197 cmd_buffer->state.subpass = subpass; 3198 3199 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS; 3200 3201 /* Our implementation of VK_KHR_multiview uses instancing to draw the 3202 * different views. If the client asks for instancing, we need to use the 3203 * Instance Data Step Rate to ensure that we repeat the client's 3204 * per-instance data once for each view. Since this bit is in 3205 * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers at the top 3206 * of each subpass. 3207 */ 3208 if (GEN_GEN == 7) 3209 cmd_buffer->state.gfx.vb_dirty |= ~0; 3210 3211 /* It is possible to start a render pass with an old pipeline. Because the 3212 * render pass and subpass index are both baked into the pipeline, this is 3213 * highly unlikely. In order to do so, it requires that you have a render 3214 * pass with a single subpass and that you use that render pass twice 3215 * back-to-back and use the same pipeline at the start of the second render 3216 * pass as at the end of the first. In order to avoid unpredictable issues 3217 * with this edge case, we just dirty the pipeline at the start of every 3218 * subpass. 3219 */ 3220 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE; 3221 3222 /* Perform transitions to the subpass layout before any writes have 3223 * occurred. 3224 */ 3225 cmd_buffer_subpass_transition_layouts(cmd_buffer, false); 3226 3227 /* Update clear values *after* performing automatic layout transitions. 3228 * This ensures that transitions from the UNDEFINED layout have had a chance 3229 * to populate the clear value buffer with the correct values for the 3230 * LOAD_OP_LOAD loadOp and that the fast-clears will update the buffer 3231 * without the aforementioned layout transition overwriting the fast-clear 3232 * value. 3233 */ 3234 cmd_buffer_subpass_sync_fast_clear_values(cmd_buffer); 3235 3236 cmd_buffer_emit_depth_stencil(cmd_buffer); 3237 3238 anv_cmd_buffer_clear_subpass(cmd_buffer); 3239 } 3240 3241 void genX(CmdBeginRenderPass)( 3242 VkCommandBuffer commandBuffer, 3243 const VkRenderPassBeginInfo* pRenderPassBegin, 3244 VkSubpassContents contents) 3245 { 3246 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 3247 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass); 3248 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer); 3249 3250 cmd_buffer->state.framebuffer = framebuffer; 3251 cmd_buffer->state.pass = pass; 3252 cmd_buffer->state.render_area = pRenderPassBegin->renderArea; 3253 VkResult result = 3254 genX(cmd_buffer_setup_attachments)(cmd_buffer, pass, pRenderPassBegin); 3255 3256 /* If we failed to setup the attachments we should not try to go further */ 3257 if (result != VK_SUCCESS) { 3258 assert(anv_batch_has_error(&cmd_buffer->batch)); 3259 return; 3260 } 3261 3262 genX(flush_pipeline_select_3d)(cmd_buffer); 3263 3264 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses); 3265 3266 cmd_buffer->state.pending_pipe_bits |= 3267 cmd_buffer->state.pass->subpass_flushes[0]; 3268 } 3269 3270 void genX(CmdNextSubpass)( 3271 VkCommandBuffer commandBuffer, 3272 VkSubpassContents contents) 3273 { 3274 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 3275 3276 if (anv_batch_has_error(&cmd_buffer->batch)) 3277 return; 3278 3279 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY); 3280 3281 anv_cmd_buffer_resolve_subpass(cmd_buffer); 3282 3283 /* Perform transitions to the final layout after all writes have occurred. 3284 */ 3285 cmd_buffer_subpass_transition_layouts(cmd_buffer, true); 3286 3287 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1); 3288 3289 uint32_t subpass_id = anv_get_subpass_id(&cmd_buffer->state); 3290 cmd_buffer->state.pending_pipe_bits |= 3291 cmd_buffer->state.pass->subpass_flushes[subpass_id]; 3292 } 3293 3294 void genX(CmdEndRenderPass)( 3295 VkCommandBuffer commandBuffer) 3296 { 3297 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer); 3298 3299 if (anv_batch_has_error(&cmd_buffer->batch)) 3300 return; 3301 3302 anv_cmd_buffer_resolve_subpass(cmd_buffer); 3303 3304 /* Perform transitions to the final layout after all writes have occurred. 3305 */ 3306 cmd_buffer_subpass_transition_layouts(cmd_buffer, true); 3307 3308 cmd_buffer->state.pending_pipe_bits |= 3309 cmd_buffer->state.pass->subpass_flushes[cmd_buffer->state.pass->subpass_count]; 3310 3311 cmd_buffer->state.hiz_enabled = false; 3312 3313 #ifndef NDEBUG 3314 anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer); 3315 #endif 3316 3317 /* Remove references to render pass specific state. This enables us to 3318 * detect whether or not we're in a renderpass. 3319 */ 3320 cmd_buffer->state.framebuffer = NULL; 3321 cmd_buffer->state.pass = NULL; 3322 cmd_buffer->state.subpass = NULL; 3323 } 3324