1 /* 2 * Copyright 2006 VMware, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include <GL/gl.h> 27 #include <GL/internal/dri_interface.h> 28 #include <drm_fourcc.h> 29 30 #include "intel_batchbuffer.h" 31 #include "intel_image.h" 32 #include "intel_mipmap_tree.h" 33 #include "intel_tex.h" 34 #include "intel_blit.h" 35 #include "intel_fbo.h" 36 37 #include "brw_blorp.h" 38 #include "brw_context.h" 39 #include "brw_state.h" 40 41 #include "main/enums.h" 42 #include "main/fbobject.h" 43 #include "main/formats.h" 44 #include "main/glformats.h" 45 #include "main/texcompress_etc.h" 46 #include "main/teximage.h" 47 #include "main/streaming-load-memcpy.h" 48 #include "x86/common_x86_asm.h" 49 50 #define FILE_DEBUG_FLAG DEBUG_MIPTREE 51 52 static void *intel_miptree_map_raw(struct brw_context *brw, 53 struct intel_mipmap_tree *mt, 54 GLbitfield mode); 55 56 static void intel_miptree_unmap_raw(struct intel_mipmap_tree *mt); 57 58 static bool 59 intel_miptree_alloc_aux(struct brw_context *brw, 60 struct intel_mipmap_tree *mt); 61 62 static bool 63 intel_miptree_supports_mcs(struct brw_context *brw, 64 const struct intel_mipmap_tree *mt) 65 { 66 const struct gen_device_info *devinfo = &brw->screen->devinfo; 67 68 /* MCS compression only applies to multisampled miptrees */ 69 if (mt->surf.samples <= 1) 70 return false; 71 72 /* Prior to Gen7, all MSAA surfaces used IMS layout. */ 73 if (devinfo->gen < 7) 74 return false; 75 76 /* See isl_surf_get_mcs_surf for details. */ 77 if (mt->surf.samples == 16 && mt->surf.logical_level0_px.width > 8192) 78 return false; 79 80 /* In Gen7, IMS layout is only used for depth and stencil buffers. */ 81 switch (_mesa_get_format_base_format(mt->format)) { 82 case GL_DEPTH_COMPONENT: 83 case GL_STENCIL_INDEX: 84 case GL_DEPTH_STENCIL: 85 return false; 86 default: 87 /* From the Ivy Bridge PRM, Vol4 Part1 p77 ("MCS Enable"): 88 * 89 * This field must be set to 0 for all SINT MSRTs when all RT channels 90 * are not written 91 * 92 * In practice this means that we have to disable MCS for all signed 93 * integer MSAA buffers. The alternative, to disable MCS only when one 94 * of the render target channels is disabled, is impractical because it 95 * would require converting between CMS and UMS MSAA layouts on the fly, 96 * which is expensive. 97 */ 98 if (devinfo->gen == 7 && _mesa_get_format_datatype(mt->format) == GL_INT) { 99 return false; 100 } else { 101 return true; 102 } 103 } 104 } 105 106 static bool 107 intel_tiling_supports_ccs(const struct brw_context *brw, 108 enum isl_tiling tiling) 109 { 110 const struct gen_device_info *devinfo = &brw->screen->devinfo; 111 112 /* From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render 113 * Target(s)", beneath the "Fast Color Clear" bullet (p326): 114 * 115 * - Support is limited to tiled render targets. 116 * 117 * Gen9 changes the restriction to Y-tile only. 118 */ 119 if (devinfo->gen >= 9) 120 return tiling == ISL_TILING_Y0; 121 else if (devinfo->gen >= 7) 122 return tiling != ISL_TILING_LINEAR; 123 else 124 return false; 125 } 126 127 /** 128 * For a single-sampled render target ("non-MSRT"), determine if an MCS buffer 129 * can be used. This doesn't (and should not) inspect any of the properties of 130 * the miptree's BO. 131 * 132 * From the Ivy Bridge PRM, Vol2 Part1 11.7 "MCS Buffer for Render Target(s)", 133 * beneath the "Fast Color Clear" bullet (p326): 134 * 135 * - Support is for non-mip-mapped and non-array surface types only. 136 * 137 * And then later, on p327: 138 * 139 * - MCS buffer for non-MSRT is supported only for RT formats 32bpp, 140 * 64bpp, and 128bpp. 141 * 142 * From the Skylake documentation, it is made clear that X-tiling is no longer 143 * supported: 144 * 145 * - MCS and Lossless compression is supported for TiledY/TileYs/TileYf 146 * non-MSRTs only. 147 */ 148 static bool 149 intel_miptree_supports_ccs(struct brw_context *brw, 150 const struct intel_mipmap_tree *mt) 151 { 152 const struct gen_device_info *devinfo = &brw->screen->devinfo; 153 154 /* MCS support does not exist prior to Gen7 */ 155 if (devinfo->gen < 7) 156 return false; 157 158 /* This function applies only to non-multisampled render targets. */ 159 if (mt->surf.samples > 1) 160 return false; 161 162 /* MCS is only supported for color buffers */ 163 switch (_mesa_get_format_base_format(mt->format)) { 164 case GL_DEPTH_COMPONENT: 165 case GL_DEPTH_STENCIL: 166 case GL_STENCIL_INDEX: 167 return false; 168 } 169 170 if (mt->cpp != 4 && mt->cpp != 8 && mt->cpp != 16) 171 return false; 172 173 const bool mip_mapped = mt->first_level != 0 || mt->last_level != 0; 174 const bool arrayed = mt->surf.logical_level0_px.array_len > 1 || 175 mt->surf.logical_level0_px.depth > 1; 176 177 if (arrayed) { 178 /* Multisample surfaces with the CMS layout are not layered surfaces, 179 * yet still have physical_depth0 > 1. Assert that we don't 180 * accidentally reject a multisampled surface here. We should have 181 * rejected it earlier by explicitly checking the sample count. 182 */ 183 assert(mt->surf.samples == 1); 184 } 185 186 /* Handle the hardware restrictions... 187 * 188 * All GENs have the following restriction: "MCS buffer for non-MSRT is 189 * supported only for RT formats 32bpp, 64bpp, and 128bpp." 190 * 191 * From the HSW PRM Volume 7: 3D-Media-GPGPU, page 652: (Color Clear of 192 * Non-MultiSampler Render Target Restrictions) Support is for 193 * non-mip-mapped and non-array surface types only. 194 * 195 * From the BDW PRM Volume 7: 3D-Media-GPGPU, page 649: (Color Clear of 196 * Non-MultiSampler Render Target Restriction). Mip-mapped and arrayed 197 * surfaces are supported with MCS buffer layout with these alignments in 198 * the RT space: Horizontal Alignment = 256 and Vertical Alignment = 128. 199 * 200 * From the SKL PRM Volume 7: 3D-Media-GPGPU, page 632: (Color Clear of 201 * Non-MultiSampler Render Target Restriction). Mip-mapped and arrayed 202 * surfaces are supported with MCS buffer layout with these alignments in 203 * the RT space: Horizontal Alignment = 128 and Vertical Alignment = 64. 204 */ 205 if (devinfo->gen < 8 && (mip_mapped || arrayed)) 206 return false; 207 208 /* There's no point in using an MCS buffer if the surface isn't in a 209 * renderable format. 210 */ 211 if (!brw->mesa_format_supports_render[mt->format]) 212 return false; 213 214 return true; 215 } 216 217 static bool 218 intel_tiling_supports_hiz(const struct brw_context *brw, 219 enum isl_tiling tiling) 220 { 221 const struct gen_device_info *devinfo = &brw->screen->devinfo; 222 223 if (devinfo->gen < 6) 224 return false; 225 226 return tiling == ISL_TILING_Y0; 227 } 228 229 static bool 230 intel_miptree_supports_hiz(const struct brw_context *brw, 231 const struct intel_mipmap_tree *mt) 232 { 233 if (!brw->has_hiz) 234 return false; 235 236 switch (mt->format) { 237 case MESA_FORMAT_Z_FLOAT32: 238 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT: 239 case MESA_FORMAT_Z24_UNORM_X8_UINT: 240 case MESA_FORMAT_Z24_UNORM_S8_UINT: 241 case MESA_FORMAT_Z_UNORM16: 242 return true; 243 default: 244 return false; 245 } 246 } 247 248 /** 249 * Return true if the format that will be used to access the miptree is 250 * CCS_E-compatible with the miptree's linear/non-sRGB format. 251 * 252 * Why use the linear format? Well, although the miptree may be specified with 253 * an sRGB format, the usage of that color space/format can be toggled. Since 254 * our HW tends to support more linear formats than sRGB ones, we use this 255 * format variant for check for CCS_E compatibility. 256 */ 257 static bool 258 format_ccs_e_compat_with_miptree(const struct gen_device_info *devinfo, 259 const struct intel_mipmap_tree *mt, 260 enum isl_format access_format) 261 { 262 assert(mt->aux_usage == ISL_AUX_USAGE_CCS_E); 263 264 mesa_format linear_format = _mesa_get_srgb_format_linear(mt->format); 265 enum isl_format isl_format = brw_isl_format_for_mesa_format(linear_format); 266 return isl_formats_are_ccs_e_compatible(devinfo, isl_format, access_format); 267 } 268 269 static bool 270 intel_miptree_supports_ccs_e(struct brw_context *brw, 271 const struct intel_mipmap_tree *mt) 272 { 273 const struct gen_device_info *devinfo = &brw->screen->devinfo; 274 275 if (devinfo->gen < 9) 276 return false; 277 278 /* For now compression is only enabled for integer formats even though 279 * there exist supported floating point formats also. This is a heuristic 280 * decision based on current public benchmarks. In none of the cases these 281 * formats provided any improvement but a few cases were seen to regress. 282 * Hence these are left to to be enabled in the future when they are known 283 * to improve things. 284 */ 285 if (_mesa_get_format_datatype(mt->format) == GL_FLOAT) 286 return false; 287 288 if (!intel_miptree_supports_ccs(brw, mt)) 289 return false; 290 291 /* Many window system buffers are sRGB even if they are never rendered as 292 * sRGB. For those, we want CCS_E for when sRGBEncode is false. When the 293 * surface is used as sRGB, we fall back to CCS_D. 294 */ 295 mesa_format linear_format = _mesa_get_srgb_format_linear(mt->format); 296 enum isl_format isl_format = brw_isl_format_for_mesa_format(linear_format); 297 return isl_format_supports_ccs_e(&brw->screen->devinfo, isl_format); 298 } 299 300 /** 301 * Determine depth format corresponding to a depth+stencil format, 302 * for separate stencil. 303 */ 304 mesa_format 305 intel_depth_format_for_depthstencil_format(mesa_format format) { 306 switch (format) { 307 case MESA_FORMAT_Z24_UNORM_S8_UINT: 308 return MESA_FORMAT_Z24_UNORM_X8_UINT; 309 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT: 310 return MESA_FORMAT_Z_FLOAT32; 311 default: 312 return format; 313 } 314 } 315 316 static bool 317 create_mapping_table(GLenum target, unsigned first_level, unsigned last_level, 318 unsigned depth0, struct intel_mipmap_level *table) 319 { 320 for (unsigned level = first_level; level <= last_level; level++) { 321 const unsigned d = 322 target == GL_TEXTURE_3D ? minify(depth0, level) : depth0; 323 324 table[level].slice = calloc(d, sizeof(*table[0].slice)); 325 if (!table[level].slice) 326 goto unwind; 327 } 328 329 return true; 330 331 unwind: 332 for (unsigned level = first_level; level <= last_level; level++) 333 free(table[level].slice); 334 335 return false; 336 } 337 338 static bool 339 needs_separate_stencil(const struct brw_context *brw, 340 struct intel_mipmap_tree *mt, 341 mesa_format format) 342 { 343 const struct gen_device_info *devinfo = &brw->screen->devinfo; 344 345 if (_mesa_get_format_base_format(format) != GL_DEPTH_STENCIL) 346 return false; 347 348 if (devinfo->must_use_separate_stencil) 349 return true; 350 351 return brw->has_separate_stencil && 352 intel_miptree_supports_hiz(brw, mt); 353 } 354 355 /** 356 * Choose the aux usage for this miptree. This function must be called fairly 357 * late in the miptree create process after we have a tiling. 358 */ 359 static void 360 intel_miptree_choose_aux_usage(struct brw_context *brw, 361 struct intel_mipmap_tree *mt) 362 { 363 assert(mt->aux_usage == ISL_AUX_USAGE_NONE); 364 365 if (intel_miptree_supports_mcs(brw, mt)) { 366 assert(mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY); 367 mt->aux_usage = ISL_AUX_USAGE_MCS; 368 } else if (intel_tiling_supports_ccs(brw, mt->surf.tiling) && 369 intel_miptree_supports_ccs(brw, mt)) { 370 if (!unlikely(INTEL_DEBUG & DEBUG_NO_RBC) && 371 intel_miptree_supports_ccs_e(brw, mt)) { 372 mt->aux_usage = ISL_AUX_USAGE_CCS_E; 373 } else { 374 mt->aux_usage = ISL_AUX_USAGE_CCS_D; 375 } 376 } else if (intel_tiling_supports_hiz(brw, mt->surf.tiling) && 377 intel_miptree_supports_hiz(brw, mt)) { 378 mt->aux_usage = ISL_AUX_USAGE_HIZ; 379 } 380 381 /* We can do fast-clear on all auxiliary surface types that are 382 * allocated through the normal texture creation paths. 383 */ 384 if (mt->aux_usage != ISL_AUX_USAGE_NONE) 385 mt->supports_fast_clear = true; 386 } 387 388 389 /** 390 * Choose an appropriate uncompressed format for a requested 391 * compressed format, if unsupported. 392 */ 393 mesa_format 394 intel_lower_compressed_format(struct brw_context *brw, mesa_format format) 395 { 396 const struct gen_device_info *devinfo = &brw->screen->devinfo; 397 398 /* No need to lower ETC formats on these platforms, 399 * they are supported natively. 400 */ 401 if (devinfo->gen >= 8 || devinfo->is_baytrail) 402 return format; 403 404 switch (format) { 405 case MESA_FORMAT_ETC1_RGB8: 406 return MESA_FORMAT_R8G8B8X8_UNORM; 407 case MESA_FORMAT_ETC2_RGB8: 408 return MESA_FORMAT_R8G8B8X8_UNORM; 409 case MESA_FORMAT_ETC2_SRGB8: 410 case MESA_FORMAT_ETC2_SRGB8_ALPHA8_EAC: 411 case MESA_FORMAT_ETC2_SRGB8_PUNCHTHROUGH_ALPHA1: 412 return MESA_FORMAT_B8G8R8A8_SRGB; 413 case MESA_FORMAT_ETC2_RGBA8_EAC: 414 case MESA_FORMAT_ETC2_RGB8_PUNCHTHROUGH_ALPHA1: 415 return MESA_FORMAT_R8G8B8A8_UNORM; 416 case MESA_FORMAT_ETC2_R11_EAC: 417 return MESA_FORMAT_R_UNORM16; 418 case MESA_FORMAT_ETC2_SIGNED_R11_EAC: 419 return MESA_FORMAT_R_SNORM16; 420 case MESA_FORMAT_ETC2_RG11_EAC: 421 return MESA_FORMAT_R16G16_UNORM; 422 case MESA_FORMAT_ETC2_SIGNED_RG11_EAC: 423 return MESA_FORMAT_R16G16_SNORM; 424 default: 425 /* Non ETC1 / ETC2 format */ 426 return format; 427 } 428 } 429 430 unsigned 431 brw_get_num_logical_layers(const struct intel_mipmap_tree *mt, unsigned level) 432 { 433 if (mt->surf.dim == ISL_SURF_DIM_3D) 434 return minify(mt->surf.logical_level0_px.depth, level); 435 else 436 return mt->surf.logical_level0_px.array_len; 437 } 438 439 UNUSED static unsigned 440 get_num_phys_layers(const struct isl_surf *surf, unsigned level) 441 { 442 /* In case of physical dimensions one needs to consider also the layout. 443 * See isl_calc_phys_level0_extent_sa(). 444 */ 445 if (surf->dim != ISL_SURF_DIM_3D) 446 return surf->phys_level0_sa.array_len; 447 448 if (surf->dim_layout == ISL_DIM_LAYOUT_GEN4_2D) 449 return minify(surf->phys_level0_sa.array_len, level); 450 451 return minify(surf->phys_level0_sa.depth, level); 452 } 453 454 /** \brief Assert that the level and layer are valid for the miptree. */ 455 void 456 intel_miptree_check_level_layer(const struct intel_mipmap_tree *mt, 457 uint32_t level, 458 uint32_t layer) 459 { 460 (void) mt; 461 (void) level; 462 (void) layer; 463 464 assert(level >= mt->first_level); 465 assert(level <= mt->last_level); 466 assert(layer < get_num_phys_layers(&mt->surf, level)); 467 } 468 469 static enum isl_aux_state ** 470 create_aux_state_map(struct intel_mipmap_tree *mt, 471 enum isl_aux_state initial) 472 { 473 const uint32_t levels = mt->last_level + 1; 474 475 uint32_t total_slices = 0; 476 for (uint32_t level = 0; level < levels; level++) 477 total_slices += brw_get_num_logical_layers(mt, level); 478 479 const size_t per_level_array_size = levels * sizeof(enum isl_aux_state *); 480 481 /* We're going to allocate a single chunk of data for both the per-level 482 * reference array and the arrays of aux_state. This makes cleanup 483 * significantly easier. 484 */ 485 const size_t total_size = per_level_array_size + 486 total_slices * sizeof(enum isl_aux_state); 487 void *data = malloc(total_size); 488 if (data == NULL) 489 return NULL; 490 491 enum isl_aux_state **per_level_arr = data; 492 enum isl_aux_state *s = data + per_level_array_size; 493 for (uint32_t level = 0; level < levels; level++) { 494 per_level_arr[level] = s; 495 const unsigned level_layers = brw_get_num_logical_layers(mt, level); 496 for (uint32_t a = 0; a < level_layers; a++) 497 *(s++) = initial; 498 } 499 assert((void *)s == data + total_size); 500 501 return per_level_arr; 502 } 503 504 static void 505 free_aux_state_map(enum isl_aux_state **state) 506 { 507 free(state); 508 } 509 510 static bool 511 need_to_retile_as_linear(struct brw_context *brw, unsigned row_pitch, 512 enum isl_tiling tiling, unsigned samples) 513 { 514 if (samples > 1) 515 return false; 516 517 if (tiling == ISL_TILING_LINEAR) 518 return false; 519 520 /* If the width is much smaller than a tile, don't bother tiling. */ 521 if (row_pitch < 64) 522 return true; 523 524 if (ALIGN(row_pitch, 512) >= 32768) { 525 perf_debug("row pitch %u too large to blit, falling back to untiled", 526 row_pitch); 527 return true; 528 } 529 530 return false; 531 } 532 533 static bool 534 need_to_retile_as_x(const struct brw_context *brw, uint64_t size, 535 enum isl_tiling tiling) 536 { 537 const struct gen_device_info *devinfo = &brw->screen->devinfo; 538 539 /* If the BO is too large to fit in the aperture, we need to use the 540 * BLT engine to support it. Prior to Sandybridge, the BLT paths can't 541 * handle Y-tiling, so we need to fall back to X. 542 */ 543 if (devinfo->gen < 6 && size >= brw->max_gtt_map_object_size && 544 tiling == ISL_TILING_Y0) 545 return true; 546 547 return false; 548 } 549 550 static struct intel_mipmap_tree * 551 make_surface(struct brw_context *brw, GLenum target, mesa_format format, 552 unsigned first_level, unsigned last_level, 553 unsigned width0, unsigned height0, unsigned depth0, 554 unsigned num_samples, isl_tiling_flags_t tiling_flags, 555 isl_surf_usage_flags_t isl_usage_flags, uint32_t alloc_flags, 556 unsigned row_pitch, struct brw_bo *bo) 557 { 558 struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1); 559 if (!mt) 560 return NULL; 561 562 if (!create_mapping_table(target, first_level, last_level, depth0, 563 mt->level)) { 564 free(mt); 565 return NULL; 566 } 567 568 mt->refcount = 1; 569 570 if (target == GL_TEXTURE_CUBE_MAP || 571 target == GL_TEXTURE_CUBE_MAP_ARRAY) 572 isl_usage_flags |= ISL_SURF_USAGE_CUBE_BIT; 573 574 DBG("%s: %s %s %ux %u:%u:%u %d..%d <-- %p\n", 575 __func__, 576 _mesa_enum_to_string(target), 577 _mesa_get_format_name(format), 578 num_samples, width0, height0, depth0, 579 first_level, last_level, mt); 580 581 struct isl_surf_init_info init_info = { 582 .dim = get_isl_surf_dim(target), 583 .format = translate_tex_format(brw, format, false), 584 .width = width0, 585 .height = height0, 586 .depth = target == GL_TEXTURE_3D ? depth0 : 1, 587 .levels = last_level - first_level + 1, 588 .array_len = target == GL_TEXTURE_3D ? 1 : depth0, 589 .samples = num_samples, 590 .row_pitch = row_pitch, 591 .usage = isl_usage_flags, 592 .tiling_flags = tiling_flags, 593 }; 594 595 if (!isl_surf_init_s(&brw->isl_dev, &mt->surf, &init_info)) 596 goto fail; 597 598 /* Depth surfaces are always Y-tiled and stencil is always W-tiled, although 599 * on gen7 platforms we also need to create Y-tiled copies of stencil for 600 * texturing since the hardware can't sample from W-tiled surfaces. For 601 * everything else, check for corner cases needing special treatment. 602 */ 603 bool is_depth_stencil = 604 mt->surf.usage & (ISL_SURF_USAGE_STENCIL_BIT | ISL_SURF_USAGE_DEPTH_BIT); 605 if (!is_depth_stencil) { 606 if (need_to_retile_as_linear(brw, mt->surf.row_pitch, 607 mt->surf.tiling, mt->surf.samples)) { 608 init_info.tiling_flags = 1u << ISL_TILING_LINEAR; 609 if (!isl_surf_init_s(&brw->isl_dev, &mt->surf, &init_info)) 610 goto fail; 611 } else if (need_to_retile_as_x(brw, mt->surf.size, mt->surf.tiling)) { 612 init_info.tiling_flags = 1u << ISL_TILING_X; 613 if (!isl_surf_init_s(&brw->isl_dev, &mt->surf, &init_info)) 614 goto fail; 615 } 616 } 617 618 /* In case of linear the buffer gets padded by fixed 64 bytes and therefore 619 * the size may not be multiple of row_pitch. 620 * See isl_apply_surface_padding(). 621 */ 622 if (mt->surf.tiling != ISL_TILING_LINEAR) 623 assert(mt->surf.size % mt->surf.row_pitch == 0); 624 625 if (!bo) { 626 mt->bo = brw_bo_alloc_tiled(brw->bufmgr, "isl-miptree", 627 mt->surf.size, 628 isl_tiling_to_i915_tiling( 629 mt->surf.tiling), 630 mt->surf.row_pitch, alloc_flags); 631 if (!mt->bo) 632 goto fail; 633 } else { 634 mt->bo = bo; 635 } 636 637 mt->first_level = first_level; 638 mt->last_level = last_level; 639 mt->target = target; 640 mt->format = format; 641 mt->aux_state = NULL; 642 mt->cpp = isl_format_get_layout(mt->surf.format)->bpb / 8; 643 mt->compressed = _mesa_is_format_compressed(format); 644 mt->drm_modifier = DRM_FORMAT_MOD_INVALID; 645 646 return mt; 647 648 fail: 649 intel_miptree_release(&mt); 650 return NULL; 651 } 652 653 static bool 654 make_separate_stencil_surface(struct brw_context *brw, 655 struct intel_mipmap_tree *mt) 656 { 657 mt->stencil_mt = make_surface(brw, mt->target, MESA_FORMAT_S_UINT8, 658 0, mt->surf.levels - 1, 659 mt->surf.logical_level0_px.width, 660 mt->surf.logical_level0_px.height, 661 mt->surf.dim == ISL_SURF_DIM_3D ? 662 mt->surf.logical_level0_px.depth : 663 mt->surf.logical_level0_px.array_len, 664 mt->surf.samples, ISL_TILING_W_BIT, 665 ISL_SURF_USAGE_STENCIL_BIT | 666 ISL_SURF_USAGE_TEXTURE_BIT, 667 BO_ALLOC_BUSY, 0, NULL); 668 669 if (!mt->stencil_mt) 670 return false; 671 672 mt->stencil_mt->r8stencil_needs_update = true; 673 674 return true; 675 } 676 677 static struct intel_mipmap_tree * 678 miptree_create(struct brw_context *brw, 679 GLenum target, 680 mesa_format format, 681 GLuint first_level, 682 GLuint last_level, 683 GLuint width0, 684 GLuint height0, 685 GLuint depth0, 686 GLuint num_samples, 687 enum intel_miptree_create_flags flags) 688 { 689 const struct gen_device_info *devinfo = &brw->screen->devinfo; 690 691 if (format == MESA_FORMAT_S_UINT8) 692 return make_surface(brw, target, format, first_level, last_level, 693 width0, height0, depth0, num_samples, 694 ISL_TILING_W_BIT, 695 ISL_SURF_USAGE_STENCIL_BIT | 696 ISL_SURF_USAGE_TEXTURE_BIT, 697 BO_ALLOC_BUSY, 698 0, 699 NULL); 700 701 const GLenum base_format = _mesa_get_format_base_format(format); 702 if ((base_format == GL_DEPTH_COMPONENT || 703 base_format == GL_DEPTH_STENCIL) && 704 !(flags & MIPTREE_CREATE_LINEAR)) { 705 /* Fix up the Z miptree format for how we're splitting out separate 706 * stencil. Gen7 expects there to be no stencil bits in its depth buffer. 707 */ 708 const mesa_format depth_only_format = 709 intel_depth_format_for_depthstencil_format(format); 710 struct intel_mipmap_tree *mt = make_surface( 711 brw, target, devinfo->gen >= 6 ? depth_only_format : format, 712 first_level, last_level, 713 width0, height0, depth0, num_samples, ISL_TILING_Y0_BIT, 714 ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT, 715 BO_ALLOC_BUSY, 0, NULL); 716 717 if (needs_separate_stencil(brw, mt, format) && 718 !make_separate_stencil_surface(brw, mt)) { 719 intel_miptree_release(&mt); 720 return NULL; 721 } 722 723 if (!(flags & MIPTREE_CREATE_NO_AUX)) 724 intel_miptree_choose_aux_usage(brw, mt); 725 726 return mt; 727 } 728 729 mesa_format tex_format = format; 730 mesa_format etc_format = MESA_FORMAT_NONE; 731 uint32_t alloc_flags = 0; 732 733 format = intel_lower_compressed_format(brw, format); 734 735 etc_format = (format != tex_format) ? tex_format : MESA_FORMAT_NONE; 736 737 if (flags & MIPTREE_CREATE_BUSY) 738 alloc_flags |= BO_ALLOC_BUSY; 739 740 isl_tiling_flags_t tiling_flags = (flags & MIPTREE_CREATE_LINEAR) ? 741 ISL_TILING_LINEAR_BIT : ISL_TILING_ANY_MASK; 742 743 /* TODO: This used to be because there wasn't BLORP to handle Y-tiling. */ 744 if (devinfo->gen < 6) 745 tiling_flags &= ~ISL_TILING_Y0_BIT; 746 747 struct intel_mipmap_tree *mt = make_surface( 748 brw, target, format, 749 first_level, last_level, 750 width0, height0, depth0, 751 num_samples, tiling_flags, 752 ISL_SURF_USAGE_RENDER_TARGET_BIT | 753 ISL_SURF_USAGE_TEXTURE_BIT, 754 alloc_flags, 0, NULL); 755 if (!mt) 756 return NULL; 757 758 mt->etc_format = etc_format; 759 760 if (!(flags & MIPTREE_CREATE_NO_AUX)) 761 intel_miptree_choose_aux_usage(brw, mt); 762 763 return mt; 764 } 765 766 struct intel_mipmap_tree * 767 intel_miptree_create(struct brw_context *brw, 768 GLenum target, 769 mesa_format format, 770 GLuint first_level, 771 GLuint last_level, 772 GLuint width0, 773 GLuint height0, 774 GLuint depth0, 775 GLuint num_samples, 776 enum intel_miptree_create_flags flags) 777 { 778 assert(num_samples > 0); 779 780 struct intel_mipmap_tree *mt = miptree_create( 781 brw, target, format, 782 first_level, last_level, 783 width0, height0, depth0, num_samples, 784 flags); 785 if (!mt) 786 return NULL; 787 788 mt->offset = 0; 789 790 if (!intel_miptree_alloc_aux(brw, mt)) { 791 intel_miptree_release(&mt); 792 return NULL; 793 } 794 795 return mt; 796 } 797 798 struct intel_mipmap_tree * 799 intel_miptree_create_for_bo(struct brw_context *brw, 800 struct brw_bo *bo, 801 mesa_format format, 802 uint32_t offset, 803 uint32_t width, 804 uint32_t height, 805 uint32_t depth, 806 int pitch, 807 enum isl_tiling tiling, 808 enum intel_miptree_create_flags flags) 809 { 810 const struct gen_device_info *devinfo = &brw->screen->devinfo; 811 struct intel_mipmap_tree *mt; 812 const GLenum target = depth > 1 ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D; 813 const GLenum base_format = _mesa_get_format_base_format(format); 814 815 if ((base_format == GL_DEPTH_COMPONENT || 816 base_format == GL_DEPTH_STENCIL)) { 817 const mesa_format depth_only_format = 818 intel_depth_format_for_depthstencil_format(format); 819 mt = make_surface(brw, target, 820 devinfo->gen >= 6 ? depth_only_format : format, 821 0, 0, width, height, depth, 1, ISL_TILING_Y0_BIT, 822 ISL_SURF_USAGE_DEPTH_BIT | ISL_SURF_USAGE_TEXTURE_BIT, 823 BO_ALLOC_BUSY, pitch, bo); 824 if (!mt) 825 return NULL; 826 827 brw_bo_reference(bo); 828 829 if (!(flags & MIPTREE_CREATE_NO_AUX)) 830 intel_miptree_choose_aux_usage(brw, mt); 831 832 return mt; 833 } else if (format == MESA_FORMAT_S_UINT8) { 834 mt = make_surface(brw, target, MESA_FORMAT_S_UINT8, 835 0, 0, width, height, depth, 1, 836 ISL_TILING_W_BIT, 837 ISL_SURF_USAGE_STENCIL_BIT | 838 ISL_SURF_USAGE_TEXTURE_BIT, 839 BO_ALLOC_BUSY, pitch, bo); 840 if (!mt) 841 return NULL; 842 843 assert(bo->size >= mt->surf.size); 844 845 brw_bo_reference(bo); 846 return mt; 847 } 848 849 /* Nothing will be able to use this miptree with the BO if the offset isn't 850 * aligned. 851 */ 852 if (tiling != ISL_TILING_LINEAR) 853 assert(offset % 4096 == 0); 854 855 /* miptrees can't handle negative pitch. If you need flipping of images, 856 * that's outside of the scope of the mt. 857 */ 858 assert(pitch >= 0); 859 860 /* The BO already has a tiling format and we shouldn't confuse the lower 861 * layers by making it try to find a tiling format again. 862 */ 863 assert((flags & MIPTREE_CREATE_LINEAR) == 0); 864 865 mt = make_surface(brw, target, format, 866 0, 0, width, height, depth, 1, 867 1lu << tiling, 868 ISL_SURF_USAGE_RENDER_TARGET_BIT | 869 ISL_SURF_USAGE_TEXTURE_BIT, 870 0, pitch, bo); 871 if (!mt) 872 return NULL; 873 874 brw_bo_reference(bo); 875 mt->bo = bo; 876 mt->offset = offset; 877 878 if (!(flags & MIPTREE_CREATE_NO_AUX)) { 879 intel_miptree_choose_aux_usage(brw, mt); 880 881 if (!intel_miptree_alloc_aux(brw, mt)) { 882 intel_miptree_release(&mt); 883 return NULL; 884 } 885 } 886 887 return mt; 888 } 889 890 static struct intel_mipmap_tree * 891 miptree_create_for_planar_image(struct brw_context *brw, 892 __DRIimage *image, GLenum target, 893 enum isl_tiling tiling) 894 { 895 const struct intel_image_format *f = image->planar_format; 896 struct intel_mipmap_tree *planar_mt = NULL; 897 898 for (int i = 0; i < f->nplanes; i++) { 899 const int index = f->planes[i].buffer_index; 900 const uint32_t dri_format = f->planes[i].dri_format; 901 const mesa_format format = driImageFormatToGLFormat(dri_format); 902 const uint32_t width = image->width >> f->planes[i].width_shift; 903 const uint32_t height = image->height >> f->planes[i].height_shift; 904 905 /* Disable creation of the texture's aux buffers because the driver 906 * exposes no EGL API to manage them. That is, there is no API for 907 * resolving the aux buffer's content to the main buffer nor for 908 * invalidating the aux buffer's content. 909 */ 910 struct intel_mipmap_tree *mt = 911 intel_miptree_create_for_bo(brw, image->bo, format, 912 image->offsets[index], 913 width, height, 1, 914 image->strides[index], 915 tiling, 916 MIPTREE_CREATE_NO_AUX); 917 if (mt == NULL) 918 return NULL; 919 920 mt->target = target; 921 922 if (i == 0) 923 planar_mt = mt; 924 else 925 planar_mt->plane[i - 1] = mt; 926 } 927 928 planar_mt->drm_modifier = image->modifier; 929 930 return planar_mt; 931 } 932 933 static bool 934 create_ccs_buf_for_image(struct brw_context *brw, 935 __DRIimage *image, 936 struct intel_mipmap_tree *mt, 937 enum isl_aux_state initial_state) 938 { 939 struct isl_surf temp_ccs_surf; 940 941 /* CCS is only supported for very simple miptrees */ 942 assert(image->aux_offset != 0 && image->aux_pitch != 0); 943 assert(image->tile_x == 0 && image->tile_y == 0); 944 assert(mt->surf.samples == 1); 945 assert(mt->surf.levels == 1); 946 assert(mt->surf.logical_level0_px.depth == 1); 947 assert(mt->surf.logical_level0_px.array_len == 1); 948 assert(mt->first_level == 0); 949 assert(mt->last_level == 0); 950 951 /* We shouldn't already have a CCS */ 952 assert(!mt->mcs_buf); 953 954 if (!isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &temp_ccs_surf, 955 image->aux_pitch)) 956 return false; 957 958 assert(image->aux_offset < image->bo->size); 959 assert(temp_ccs_surf.size <= image->bo->size - image->aux_offset); 960 961 mt->mcs_buf = calloc(sizeof(*mt->mcs_buf), 1); 962 if (mt->mcs_buf == NULL) 963 return false; 964 965 mt->aux_state = create_aux_state_map(mt, initial_state); 966 if (!mt->aux_state) { 967 free(mt->mcs_buf); 968 mt->mcs_buf = NULL; 969 return false; 970 } 971 972 mt->mcs_buf->bo = image->bo; 973 brw_bo_reference(image->bo); 974 975 mt->mcs_buf->offset = image->aux_offset; 976 mt->mcs_buf->size = image->bo->size - image->aux_offset; 977 mt->mcs_buf->pitch = image->aux_pitch; 978 mt->mcs_buf->qpitch = 0; 979 mt->mcs_buf->surf = temp_ccs_surf; 980 981 return true; 982 } 983 984 struct intel_mipmap_tree * 985 intel_miptree_create_for_dri_image(struct brw_context *brw, 986 __DRIimage *image, GLenum target, 987 mesa_format format, 988 bool is_winsys_image) 989 { 990 uint32_t bo_tiling, bo_swizzle; 991 brw_bo_get_tiling(image->bo, &bo_tiling, &bo_swizzle); 992 993 const struct isl_drm_modifier_info *mod_info = 994 isl_drm_modifier_get_info(image->modifier); 995 996 const enum isl_tiling tiling = 997 mod_info ? mod_info->tiling : isl_tiling_from_i915_tiling(bo_tiling); 998 999 if (image->planar_format && image->planar_format->nplanes > 1) 1000 return miptree_create_for_planar_image(brw, image, target, tiling); 1001 1002 if (image->planar_format) 1003 assert(image->planar_format->planes[0].dri_format == image->dri_format); 1004 1005 if (!brw->ctx.TextureFormatSupported[format]) { 1006 /* The texture storage paths in core Mesa detect if the driver does not 1007 * support the user-requested format, and then searches for a 1008 * fallback format. The DRIimage code bypasses core Mesa, though. So we 1009 * do the fallbacks here for important formats. 1010 * 1011 * We must support DRM_FOURCC_XBGR8888 textures because the Android 1012 * framework produces HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces, which 1013 * the Chrome OS compositor consumes as dma_buf EGLImages. 1014 */ 1015 format = _mesa_format_fallback_rgbx_to_rgba(format); 1016 } 1017 1018 if (!brw->ctx.TextureFormatSupported[format]) 1019 return NULL; 1020 1021 enum intel_miptree_create_flags mt_create_flags = 0; 1022 1023 /* If this image comes in from a window system, we have different 1024 * requirements than if it comes in via an EGL import operation. Window 1025 * system images can use any form of auxiliary compression we wish because 1026 * they get "flushed" before being handed off to the window system and we 1027 * have the opportunity to do resolves. Non window-system images, on the 1028 * other hand, have no resolve point so we can't have aux without a 1029 * modifier. 1030 */ 1031 if (!is_winsys_image) 1032 mt_create_flags |= MIPTREE_CREATE_NO_AUX; 1033 1034 /* If we have a modifier which specifies aux, don't create one yet */ 1035 if (mod_info && mod_info->aux_usage != ISL_AUX_USAGE_NONE) 1036 mt_create_flags |= MIPTREE_CREATE_NO_AUX; 1037 1038 /* Disable creation of the texture's aux buffers because the driver exposes 1039 * no EGL API to manage them. That is, there is no API for resolving the aux 1040 * buffer's content to the main buffer nor for invalidating the aux buffer's 1041 * content. 1042 */ 1043 struct intel_mipmap_tree *mt = 1044 intel_miptree_create_for_bo(brw, image->bo, format, 1045 image->offset, image->width, image->height, 1, 1046 image->pitch, tiling, mt_create_flags); 1047 if (mt == NULL) 1048 return NULL; 1049 1050 mt->target = target; 1051 mt->level[0].level_x = image->tile_x; 1052 mt->level[0].level_y = image->tile_y; 1053 mt->drm_modifier = image->modifier; 1054 1055 /* From "OES_EGL_image" error reporting. We report GL_INVALID_OPERATION 1056 * for EGL images from non-tile aligned sufaces in gen4 hw and earlier which has 1057 * trouble resolving back to destination image due to alignment issues. 1058 */ 1059 const struct gen_device_info *devinfo = &brw->screen->devinfo; 1060 if (!devinfo->has_surface_tile_offset) { 1061 uint32_t draw_x, draw_y; 1062 intel_miptree_get_tile_offsets(mt, 0, 0, &draw_x, &draw_y); 1063 1064 if (draw_x != 0 || draw_y != 0) { 1065 _mesa_error(&brw->ctx, GL_INVALID_OPERATION, __func__); 1066 intel_miptree_release(&mt); 1067 return NULL; 1068 } 1069 } 1070 1071 if (mod_info && mod_info->aux_usage != ISL_AUX_USAGE_NONE) { 1072 assert(mod_info->aux_usage == ISL_AUX_USAGE_CCS_E); 1073 1074 mt->aux_usage = mod_info->aux_usage; 1075 /* If we are a window system buffer, then we can support fast-clears 1076 * even if the modifier doesn't support them by doing a partial resolve 1077 * as part of the flush operation. 1078 */ 1079 mt->supports_fast_clear = 1080 is_winsys_image || mod_info->supports_clear_color; 1081 1082 /* We don't know the actual state of the surface when we get it but we 1083 * can make a pretty good guess based on the modifier. What we do know 1084 * for sure is that it isn't in the AUX_INVALID state, so we just assume 1085 * a worst case of compression. 1086 */ 1087 enum isl_aux_state initial_state = 1088 isl_drm_modifier_get_default_aux_state(image->modifier); 1089 1090 if (!create_ccs_buf_for_image(brw, image, mt, initial_state)) { 1091 intel_miptree_release(&mt); 1092 return NULL; 1093 } 1094 } 1095 1096 /* Don't assume coherency for imported EGLimages. We don't know what 1097 * external clients are going to do with it. They may scan it out. 1098 */ 1099 image->bo->cache_coherent = false; 1100 1101 return mt; 1102 } 1103 1104 /** 1105 * For a singlesample renderbuffer, this simply wraps the given BO with a 1106 * miptree. 1107 * 1108 * For a multisample renderbuffer, this wraps the window system's 1109 * (singlesample) BO with a singlesample miptree attached to the 1110 * intel_renderbuffer, then creates a multisample miptree attached to irb->mt 1111 * that will contain the actual rendering (which is lazily resolved to 1112 * irb->singlesample_mt). 1113 */ 1114 bool 1115 intel_update_winsys_renderbuffer_miptree(struct brw_context *intel, 1116 struct intel_renderbuffer *irb, 1117 struct intel_mipmap_tree *singlesample_mt, 1118 uint32_t width, uint32_t height, 1119 uint32_t pitch) 1120 { 1121 struct intel_mipmap_tree *multisample_mt = NULL; 1122 struct gl_renderbuffer *rb = &irb->Base.Base; 1123 mesa_format format = rb->Format; 1124 const unsigned num_samples = MAX2(rb->NumSamples, 1); 1125 1126 /* Only the front and back buffers, which are color buffers, are allocated 1127 * through the image loader. 1128 */ 1129 assert(_mesa_get_format_base_format(format) == GL_RGB || 1130 _mesa_get_format_base_format(format) == GL_RGBA); 1131 1132 assert(singlesample_mt); 1133 1134 if (num_samples == 1) { 1135 intel_miptree_release(&irb->mt); 1136 irb->mt = singlesample_mt; 1137 1138 assert(!irb->singlesample_mt); 1139 } else { 1140 intel_miptree_release(&irb->singlesample_mt); 1141 irb->singlesample_mt = singlesample_mt; 1142 1143 if (!irb->mt || 1144 irb->mt->surf.logical_level0_px.width != width || 1145 irb->mt->surf.logical_level0_px.height != height) { 1146 multisample_mt = intel_miptree_create_for_renderbuffer(intel, 1147 format, 1148 width, 1149 height, 1150 num_samples); 1151 if (!multisample_mt) 1152 goto fail; 1153 1154 irb->need_downsample = false; 1155 intel_miptree_release(&irb->mt); 1156 irb->mt = multisample_mt; 1157 } 1158 } 1159 return true; 1160 1161 fail: 1162 intel_miptree_release(&irb->mt); 1163 return false; 1164 } 1165 1166 struct intel_mipmap_tree* 1167 intel_miptree_create_for_renderbuffer(struct brw_context *brw, 1168 mesa_format format, 1169 uint32_t width, 1170 uint32_t height, 1171 uint32_t num_samples) 1172 { 1173 struct intel_mipmap_tree *mt; 1174 uint32_t depth = 1; 1175 GLenum target = num_samples > 1 ? GL_TEXTURE_2D_MULTISAMPLE : GL_TEXTURE_2D; 1176 1177 mt = intel_miptree_create(brw, target, format, 0, 0, 1178 width, height, depth, num_samples, 1179 MIPTREE_CREATE_BUSY); 1180 if (!mt) 1181 goto fail; 1182 1183 return mt; 1184 1185 fail: 1186 intel_miptree_release(&mt); 1187 return NULL; 1188 } 1189 1190 void 1191 intel_miptree_reference(struct intel_mipmap_tree **dst, 1192 struct intel_mipmap_tree *src) 1193 { 1194 if (*dst == src) 1195 return; 1196 1197 intel_miptree_release(dst); 1198 1199 if (src) { 1200 src->refcount++; 1201 DBG("%s %p refcount now %d\n", __func__, src, src->refcount); 1202 } 1203 1204 *dst = src; 1205 } 1206 1207 static void 1208 intel_miptree_aux_buffer_free(struct intel_miptree_aux_buffer *aux_buf) 1209 { 1210 if (aux_buf == NULL) 1211 return; 1212 1213 brw_bo_unreference(aux_buf->bo); 1214 1215 free(aux_buf); 1216 } 1217 1218 void 1219 intel_miptree_release(struct intel_mipmap_tree **mt) 1220 { 1221 if (!*mt) 1222 return; 1223 1224 DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1); 1225 if (--(*mt)->refcount <= 0) { 1226 GLuint i; 1227 1228 DBG("%s deleting %p\n", __func__, *mt); 1229 1230 brw_bo_unreference((*mt)->bo); 1231 intel_miptree_release(&(*mt)->stencil_mt); 1232 intel_miptree_release(&(*mt)->r8stencil_mt); 1233 intel_miptree_aux_buffer_free((*mt)->hiz_buf); 1234 intel_miptree_aux_buffer_free((*mt)->mcs_buf); 1235 free_aux_state_map((*mt)->aux_state); 1236 1237 intel_miptree_release(&(*mt)->plane[0]); 1238 intel_miptree_release(&(*mt)->plane[1]); 1239 1240 for (i = 0; i < MAX_TEXTURE_LEVELS; i++) { 1241 free((*mt)->level[i].slice); 1242 } 1243 1244 free(*mt); 1245 } 1246 *mt = NULL; 1247 } 1248 1249 1250 void 1251 intel_get_image_dims(struct gl_texture_image *image, 1252 int *width, int *height, int *depth) 1253 { 1254 switch (image->TexObject->Target) { 1255 case GL_TEXTURE_1D_ARRAY: 1256 /* For a 1D Array texture the OpenGL API will treat the image height as 1257 * the number of array slices. For Intel hardware, we treat the 1D array 1258 * as a 2D Array with a height of 1. So, here we want to swap image 1259 * height and depth. 1260 */ 1261 assert(image->Depth == 1); 1262 *width = image->Width; 1263 *height = 1; 1264 *depth = image->Height; 1265 break; 1266 case GL_TEXTURE_CUBE_MAP: 1267 /* For Cube maps, the mesa/main api layer gives us a depth of 1 even 1268 * though we really have 6 slices. 1269 */ 1270 assert(image->Depth == 1); 1271 *width = image->Width; 1272 *height = image->Height; 1273 *depth = 6; 1274 break; 1275 default: 1276 *width = image->Width; 1277 *height = image->Height; 1278 *depth = image->Depth; 1279 break; 1280 } 1281 } 1282 1283 /** 1284 * Can the image be pulled into a unified mipmap tree? This mirrors 1285 * the completeness test in a lot of ways. 1286 * 1287 * Not sure whether I want to pass gl_texture_image here. 1288 */ 1289 bool 1290 intel_miptree_match_image(struct intel_mipmap_tree *mt, 1291 struct gl_texture_image *image) 1292 { 1293 struct intel_texture_image *intelImage = intel_texture_image(image); 1294 GLuint level = intelImage->base.Base.Level; 1295 int width, height, depth; 1296 1297 /* glTexImage* choose the texture object based on the target passed in, and 1298 * objects can't change targets over their lifetimes, so this should be 1299 * true. 1300 */ 1301 assert(image->TexObject->Target == mt->target); 1302 1303 mesa_format mt_format = mt->format; 1304 if (mt->format == MESA_FORMAT_Z24_UNORM_X8_UINT && mt->stencil_mt) 1305 mt_format = MESA_FORMAT_Z24_UNORM_S8_UINT; 1306 if (mt->format == MESA_FORMAT_Z_FLOAT32 && mt->stencil_mt) 1307 mt_format = MESA_FORMAT_Z32_FLOAT_S8X24_UINT; 1308 if (mt->etc_format != MESA_FORMAT_NONE) 1309 mt_format = mt->etc_format; 1310 1311 if (image->TexFormat != mt_format) 1312 return false; 1313 1314 intel_get_image_dims(image, &width, &height, &depth); 1315 1316 if (mt->target == GL_TEXTURE_CUBE_MAP) 1317 depth = 6; 1318 1319 if (level >= mt->surf.levels) 1320 return false; 1321 1322 const unsigned level_depth = 1323 mt->surf.dim == ISL_SURF_DIM_3D ? 1324 minify(mt->surf.logical_level0_px.depth, level) : 1325 mt->surf.logical_level0_px.array_len; 1326 1327 return width == minify(mt->surf.logical_level0_px.width, level) && 1328 height == minify(mt->surf.logical_level0_px.height, level) && 1329 depth == level_depth && 1330 MAX2(image->NumSamples, 1) == mt->surf.samples; 1331 } 1332 1333 void 1334 intel_miptree_get_image_offset(const struct intel_mipmap_tree *mt, 1335 GLuint level, GLuint slice, 1336 GLuint *x, GLuint *y) 1337 { 1338 if (level == 0 && slice == 0) { 1339 *x = mt->level[0].level_x; 1340 *y = mt->level[0].level_y; 1341 return; 1342 } 1343 1344 uint32_t x_offset_sa, y_offset_sa; 1345 1346 /* Miptree itself can have an offset only if it represents a single 1347 * slice in an imported buffer object. 1348 * See intel_miptree_create_for_dri_image(). 1349 */ 1350 assert(mt->level[0].level_x == 0); 1351 assert(mt->level[0].level_y == 0); 1352 1353 /* Given level is relative to level zero while the miptree may be 1354 * represent just a subset of all levels starting from 'first_level'. 1355 */ 1356 assert(level >= mt->first_level); 1357 level -= mt->first_level; 1358 1359 const unsigned z = mt->surf.dim == ISL_SURF_DIM_3D ? slice : 0; 1360 slice = mt->surf.dim == ISL_SURF_DIM_3D ? 0 : slice; 1361 isl_surf_get_image_offset_el(&mt->surf, level, slice, z, 1362 &x_offset_sa, &y_offset_sa); 1363 1364 *x = x_offset_sa; 1365 *y = y_offset_sa; 1366 } 1367 1368 1369 /** 1370 * This function computes the tile_w (in bytes) and tile_h (in rows) of 1371 * different tiling patterns. If the BO is untiled, tile_w is set to cpp 1372 * and tile_h is set to 1. 1373 */ 1374 void 1375 intel_get_tile_dims(enum isl_tiling tiling, uint32_t cpp, 1376 uint32_t *tile_w, uint32_t *tile_h) 1377 { 1378 switch (tiling) { 1379 case ISL_TILING_X: 1380 *tile_w = 512; 1381 *tile_h = 8; 1382 break; 1383 case ISL_TILING_Y0: 1384 *tile_w = 128; 1385 *tile_h = 32; 1386 break; 1387 case ISL_TILING_LINEAR: 1388 *tile_w = cpp; 1389 *tile_h = 1; 1390 break; 1391 default: 1392 unreachable("not reached"); 1393 } 1394 } 1395 1396 1397 /** 1398 * This function computes masks that may be used to select the bits of the X 1399 * and Y coordinates that indicate the offset within a tile. If the BO is 1400 * untiled, the masks are set to 0. 1401 */ 1402 void 1403 intel_get_tile_masks(enum isl_tiling tiling, uint32_t cpp, 1404 uint32_t *mask_x, uint32_t *mask_y) 1405 { 1406 uint32_t tile_w_bytes, tile_h; 1407 1408 intel_get_tile_dims(tiling, cpp, &tile_w_bytes, &tile_h); 1409 1410 *mask_x = tile_w_bytes / cpp - 1; 1411 *mask_y = tile_h - 1; 1412 } 1413 1414 /** 1415 * Compute the offset (in bytes) from the start of the BO to the given x 1416 * and y coordinate. For tiled BOs, caller must ensure that x and y are 1417 * multiples of the tile size. 1418 */ 1419 uint32_t 1420 intel_miptree_get_aligned_offset(const struct intel_mipmap_tree *mt, 1421 uint32_t x, uint32_t y) 1422 { 1423 int cpp = mt->cpp; 1424 uint32_t pitch = mt->surf.row_pitch; 1425 1426 switch (mt->surf.tiling) { 1427 default: 1428 unreachable("not reached"); 1429 case ISL_TILING_LINEAR: 1430 return y * pitch + x * cpp; 1431 case ISL_TILING_X: 1432 assert((x % (512 / cpp)) == 0); 1433 assert((y % 8) == 0); 1434 return y * pitch + x / (512 / cpp) * 4096; 1435 case ISL_TILING_Y0: 1436 assert((x % (128 / cpp)) == 0); 1437 assert((y % 32) == 0); 1438 return y * pitch + x / (128 / cpp) * 4096; 1439 } 1440 } 1441 1442 /** 1443 * Rendering with tiled buffers requires that the base address of the buffer 1444 * be aligned to a page boundary. For renderbuffers, and sometimes with 1445 * textures, we may want the surface to point at a texture image level that 1446 * isn't at a page boundary. 1447 * 1448 * This function returns an appropriately-aligned base offset 1449 * according to the tiling restrictions, plus any required x/y offset 1450 * from there. 1451 */ 1452 uint32_t 1453 intel_miptree_get_tile_offsets(const struct intel_mipmap_tree *mt, 1454 GLuint level, GLuint slice, 1455 uint32_t *tile_x, 1456 uint32_t *tile_y) 1457 { 1458 uint32_t x, y; 1459 uint32_t mask_x, mask_y; 1460 1461 intel_get_tile_masks(mt->surf.tiling, mt->cpp, &mask_x, &mask_y); 1462 intel_miptree_get_image_offset(mt, level, slice, &x, &y); 1463 1464 *tile_x = x & mask_x; 1465 *tile_y = y & mask_y; 1466 1467 return intel_miptree_get_aligned_offset(mt, x & ~mask_x, y & ~mask_y); 1468 } 1469 1470 static void 1471 intel_miptree_copy_slice_sw(struct brw_context *brw, 1472 struct intel_mipmap_tree *src_mt, 1473 unsigned src_level, unsigned src_layer, 1474 struct intel_mipmap_tree *dst_mt, 1475 unsigned dst_level, unsigned dst_layer, 1476 unsigned width, unsigned height) 1477 { 1478 void *src, *dst; 1479 ptrdiff_t src_stride, dst_stride; 1480 const unsigned cpp = (isl_format_get_layout(dst_mt->surf.format)->bpb / 8); 1481 1482 intel_miptree_map(brw, src_mt, 1483 src_level, src_layer, 1484 0, 0, 1485 width, height, 1486 GL_MAP_READ_BIT | BRW_MAP_DIRECT_BIT, 1487 &src, &src_stride); 1488 1489 intel_miptree_map(brw, dst_mt, 1490 dst_level, dst_layer, 1491 0, 0, 1492 width, height, 1493 GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT | 1494 BRW_MAP_DIRECT_BIT, 1495 &dst, &dst_stride); 1496 1497 DBG("sw blit %s mt %p %p/%"PRIdPTR" -> %s mt %p %p/%"PRIdPTR" (%dx%d)\n", 1498 _mesa_get_format_name(src_mt->format), 1499 src_mt, src, src_stride, 1500 _mesa_get_format_name(dst_mt->format), 1501 dst_mt, dst, dst_stride, 1502 width, height); 1503 1504 int row_size = cpp * width; 1505 if (src_stride == row_size && 1506 dst_stride == row_size) { 1507 memcpy(dst, src, row_size * height); 1508 } else { 1509 for (int i = 0; i < height; i++) { 1510 memcpy(dst, src, row_size); 1511 dst += dst_stride; 1512 src += src_stride; 1513 } 1514 } 1515 1516 intel_miptree_unmap(brw, dst_mt, dst_level, dst_layer); 1517 intel_miptree_unmap(brw, src_mt, src_level, src_layer); 1518 1519 /* Don't forget to copy the stencil data over, too. We could have skipped 1520 * passing BRW_MAP_DIRECT_BIT, but that would have meant intel_miptree_map 1521 * shuffling the two data sources in/out of temporary storage instead of 1522 * the direct mapping we get this way. 1523 */ 1524 if (dst_mt->stencil_mt) { 1525 assert(src_mt->stencil_mt); 1526 intel_miptree_copy_slice_sw(brw, 1527 src_mt->stencil_mt, src_level, src_layer, 1528 dst_mt->stencil_mt, dst_level, dst_layer, 1529 width, height); 1530 } 1531 } 1532 1533 void 1534 intel_miptree_copy_slice(struct brw_context *brw, 1535 struct intel_mipmap_tree *src_mt, 1536 unsigned src_level, unsigned src_layer, 1537 struct intel_mipmap_tree *dst_mt, 1538 unsigned dst_level, unsigned dst_layer) 1539 1540 { 1541 mesa_format format = src_mt->format; 1542 unsigned width = minify(src_mt->surf.phys_level0_sa.width, 1543 src_level - src_mt->first_level); 1544 unsigned height = minify(src_mt->surf.phys_level0_sa.height, 1545 src_level - src_mt->first_level); 1546 1547 assert(src_layer < get_num_phys_layers(&src_mt->surf, 1548 src_level - src_mt->first_level)); 1549 1550 assert(src_mt->format == dst_mt->format); 1551 1552 if (dst_mt->compressed) { 1553 unsigned int i, j; 1554 _mesa_get_format_block_size(dst_mt->format, &i, &j); 1555 height = ALIGN_NPOT(height, j) / j; 1556 width = ALIGN_NPOT(width, i) / i; 1557 } 1558 1559 /* If it's a packed depth/stencil buffer with separate stencil, the blit 1560 * below won't apply since we can't do the depth's Y tiling or the 1561 * stencil's W tiling in the blitter. 1562 */ 1563 if (src_mt->stencil_mt) { 1564 intel_miptree_copy_slice_sw(brw, 1565 src_mt, src_level, src_layer, 1566 dst_mt, dst_level, dst_layer, 1567 width, height); 1568 return; 1569 } 1570 1571 uint32_t dst_x, dst_y, src_x, src_y; 1572 intel_miptree_get_image_offset(dst_mt, dst_level, dst_layer, 1573 &dst_x, &dst_y); 1574 intel_miptree_get_image_offset(src_mt, src_level, src_layer, 1575 &src_x, &src_y); 1576 1577 DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n", 1578 _mesa_get_format_name(src_mt->format), 1579 src_mt, src_x, src_y, src_mt->surf.row_pitch, 1580 _mesa_get_format_name(dst_mt->format), 1581 dst_mt, dst_x, dst_y, dst_mt->surf.row_pitch, 1582 width, height); 1583 1584 if (!intel_miptree_blit(brw, 1585 src_mt, src_level, src_layer, 0, 0, false, 1586 dst_mt, dst_level, dst_layer, 0, 0, false, 1587 width, height, GL_COPY)) { 1588 perf_debug("miptree validate blit for %s failed\n", 1589 _mesa_get_format_name(format)); 1590 1591 intel_miptree_copy_slice_sw(brw, 1592 src_mt, src_level, src_layer, 1593 dst_mt, dst_level, dst_layer, 1594 width, height); 1595 } 1596 } 1597 1598 /** 1599 * Copies the image's current data to the given miptree, and associates that 1600 * miptree with the image. 1601 */ 1602 void 1603 intel_miptree_copy_teximage(struct brw_context *brw, 1604 struct intel_texture_image *intelImage, 1605 struct intel_mipmap_tree *dst_mt) 1606 { 1607 struct intel_mipmap_tree *src_mt = intelImage->mt; 1608 struct intel_texture_object *intel_obj = 1609 intel_texture_object(intelImage->base.Base.TexObject); 1610 int level = intelImage->base.Base.Level; 1611 const unsigned face = intelImage->base.Base.Face; 1612 unsigned start_layer, end_layer; 1613 1614 if (intel_obj->base.Target == GL_TEXTURE_1D_ARRAY) { 1615 assert(face == 0); 1616 assert(intelImage->base.Base.Height); 1617 start_layer = 0; 1618 end_layer = intelImage->base.Base.Height - 1; 1619 } else if (face > 0) { 1620 start_layer = face; 1621 end_layer = face; 1622 } else { 1623 assert(intelImage->base.Base.Depth); 1624 start_layer = 0; 1625 end_layer = intelImage->base.Base.Depth - 1; 1626 } 1627 1628 for (unsigned i = start_layer; i <= end_layer; i++) { 1629 intel_miptree_copy_slice(brw, 1630 src_mt, level, i, 1631 dst_mt, level, i); 1632 } 1633 1634 intel_miptree_reference(&intelImage->mt, dst_mt); 1635 intel_obj->needs_validate = true; 1636 } 1637 1638 static void 1639 intel_miptree_init_mcs(struct brw_context *brw, 1640 struct intel_mipmap_tree *mt, 1641 int init_value) 1642 { 1643 assert(mt->mcs_buf != NULL); 1644 1645 /* From the Ivy Bridge PRM, Vol 2 Part 1 p326: 1646 * 1647 * When MCS buffer is enabled and bound to MSRT, it is required that it 1648 * is cleared prior to any rendering. 1649 * 1650 * Since we don't use the MCS buffer for any purpose other than rendering, 1651 * it makes sense to just clear it immediately upon allocation. 1652 * 1653 * Note: the clear value for MCS buffers is all 1's, so we memset to 0xff. 1654 */ 1655 void *map = brw_bo_map(brw, mt->mcs_buf->bo, MAP_WRITE); 1656 if (unlikely(map == NULL)) { 1657 fprintf(stderr, "Failed to map mcs buffer into GTT\n"); 1658 brw_bo_unreference(mt->mcs_buf->bo); 1659 free(mt->mcs_buf); 1660 return; 1661 } 1662 void *data = map; 1663 memset(data, init_value, mt->mcs_buf->size); 1664 brw_bo_unmap(mt->mcs_buf->bo); 1665 } 1666 1667 static struct intel_miptree_aux_buffer * 1668 intel_alloc_aux_buffer(struct brw_context *brw, 1669 const char *name, 1670 const struct isl_surf *aux_surf, 1671 uint32_t alloc_flags, 1672 struct intel_mipmap_tree *mt) 1673 { 1674 struct intel_miptree_aux_buffer *buf = calloc(sizeof(*buf), 1); 1675 if (!buf) 1676 return false; 1677 1678 buf->size = aux_surf->size; 1679 buf->pitch = aux_surf->row_pitch; 1680 buf->qpitch = isl_surf_get_array_pitch_sa_rows(aux_surf); 1681 1682 /* ISL has stricter set of alignment rules then the drm allocator. 1683 * Therefore one can pass the ISL dimensions in terms of bytes instead of 1684 * trying to recalculate based on different format block sizes. 1685 */ 1686 buf->bo = brw_bo_alloc_tiled(brw->bufmgr, name, buf->size, 1687 I915_TILING_Y, buf->pitch, alloc_flags); 1688 if (!buf->bo) { 1689 free(buf); 1690 return NULL; 1691 } 1692 1693 buf->surf = *aux_surf; 1694 1695 return buf; 1696 } 1697 1698 static bool 1699 intel_miptree_alloc_mcs(struct brw_context *brw, 1700 struct intel_mipmap_tree *mt, 1701 GLuint num_samples) 1702 { 1703 assert(brw->screen->devinfo.gen >= 7); /* MCS only used on Gen7+ */ 1704 assert(mt->mcs_buf == NULL); 1705 assert(mt->aux_usage == ISL_AUX_USAGE_MCS); 1706 1707 /* Multisampled miptrees are only supported for single level. */ 1708 assert(mt->first_level == 0); 1709 enum isl_aux_state **aux_state = 1710 create_aux_state_map(mt, ISL_AUX_STATE_CLEAR); 1711 if (!aux_state) 1712 return false; 1713 1714 struct isl_surf temp_mcs_surf; 1715 1716 MAYBE_UNUSED bool ok = 1717 isl_surf_get_mcs_surf(&brw->isl_dev, &mt->surf, &temp_mcs_surf); 1718 assert(ok); 1719 1720 /* Buffer needs to be initialised requiring the buffer to be immediately 1721 * mapped to cpu space for writing. Therefore do not use the gpu access 1722 * flag which can cause an unnecessary delay if the backing pages happened 1723 * to be just used by the GPU. 1724 */ 1725 const uint32_t alloc_flags = 0; 1726 mt->mcs_buf = intel_alloc_aux_buffer(brw, "mcs-miptree", 1727 &temp_mcs_surf, alloc_flags, mt); 1728 if (!mt->mcs_buf) { 1729 free(aux_state); 1730 return false; 1731 } 1732 1733 mt->aux_state = aux_state; 1734 1735 intel_miptree_init_mcs(brw, mt, 0xFF); 1736 1737 return true; 1738 } 1739 1740 bool 1741 intel_miptree_alloc_ccs(struct brw_context *brw, 1742 struct intel_mipmap_tree *mt) 1743 { 1744 assert(mt->mcs_buf == NULL); 1745 assert(mt->aux_usage == ISL_AUX_USAGE_CCS_E || 1746 mt->aux_usage == ISL_AUX_USAGE_CCS_D); 1747 1748 struct isl_surf temp_ccs_surf; 1749 1750 if (!isl_surf_get_ccs_surf(&brw->isl_dev, &mt->surf, &temp_ccs_surf, 0)) 1751 return false; 1752 1753 assert(temp_ccs_surf.size && 1754 (temp_ccs_surf.size % temp_ccs_surf.row_pitch == 0)); 1755 1756 enum isl_aux_state **aux_state = 1757 create_aux_state_map(mt, ISL_AUX_STATE_PASS_THROUGH); 1758 if (!aux_state) 1759 return false; 1760 1761 /* When CCS_E is used, we need to ensure that the CCS starts off in a valid 1762 * state. From the Sky Lake PRM, "MCS Buffer for Render Target(s)": 1763 * 1764 * "If Software wants to enable Color Compression without Fast clear, 1765 * Software needs to initialize MCS with zeros." 1766 * 1767 * A CCS value of 0 indicates that the corresponding block is in the 1768 * pass-through state which is what we want. 1769 * 1770 * For CCS_D, on the other hand, we don't care as we're about to perform a 1771 * fast-clear operation. In that case, being hot in caches more useful. 1772 */ 1773 const uint32_t alloc_flags = mt->aux_usage == ISL_AUX_USAGE_CCS_E ? 1774 BO_ALLOC_ZEROED : BO_ALLOC_BUSY; 1775 mt->mcs_buf = intel_alloc_aux_buffer(brw, "ccs-miptree", 1776 &temp_ccs_surf, alloc_flags, mt); 1777 if (!mt->mcs_buf) { 1778 free(aux_state); 1779 return false; 1780 } 1781 1782 mt->aux_state = aux_state; 1783 1784 return true; 1785 } 1786 1787 /** 1788 * Helper for intel_miptree_alloc_hiz() that sets 1789 * \c mt->level[level].has_hiz. Return true if and only if 1790 * \c has_hiz was set. 1791 */ 1792 static bool 1793 intel_miptree_level_enable_hiz(struct brw_context *brw, 1794 struct intel_mipmap_tree *mt, 1795 uint32_t level) 1796 { 1797 const struct gen_device_info *devinfo = &brw->screen->devinfo; 1798 1799 assert(mt->hiz_buf); 1800 assert(mt->surf.size > 0); 1801 1802 if (devinfo->gen >= 8 || devinfo->is_haswell) { 1803 uint32_t width = minify(mt->surf.phys_level0_sa.width, level); 1804 uint32_t height = minify(mt->surf.phys_level0_sa.height, level); 1805 1806 /* Disable HiZ for LOD > 0 unless the width is 8 aligned 1807 * and the height is 4 aligned. This allows our HiZ support 1808 * to fulfill Haswell restrictions for HiZ ops. For LOD == 0, 1809 * we can grow the width & height to allow the HiZ op to 1810 * force the proper size alignments. 1811 */ 1812 if (level > 0 && ((width & 7) || (height & 3))) { 1813 DBG("mt %p level %d: HiZ DISABLED\n", mt, level); 1814 return false; 1815 } 1816 } 1817 1818 DBG("mt %p level %d: HiZ enabled\n", mt, level); 1819 mt->level[level].has_hiz = true; 1820 return true; 1821 } 1822 1823 bool 1824 intel_miptree_alloc_hiz(struct brw_context *brw, 1825 struct intel_mipmap_tree *mt) 1826 { 1827 assert(mt->hiz_buf == NULL); 1828 assert(mt->aux_usage == ISL_AUX_USAGE_HIZ); 1829 1830 enum isl_aux_state **aux_state = 1831 create_aux_state_map(mt, ISL_AUX_STATE_AUX_INVALID); 1832 if (!aux_state) 1833 return false; 1834 1835 struct isl_surf temp_hiz_surf; 1836 1837 MAYBE_UNUSED bool ok = 1838 isl_surf_get_hiz_surf(&brw->isl_dev, &mt->surf, &temp_hiz_surf); 1839 assert(ok); 1840 1841 const uint32_t alloc_flags = BO_ALLOC_BUSY; 1842 mt->hiz_buf = intel_alloc_aux_buffer(brw, "hiz-miptree", 1843 &temp_hiz_surf, alloc_flags, mt); 1844 1845 if (!mt->hiz_buf) { 1846 free(aux_state); 1847 return false; 1848 } 1849 1850 for (unsigned level = mt->first_level; level <= mt->last_level; ++level) 1851 intel_miptree_level_enable_hiz(brw, mt, level); 1852 1853 mt->aux_state = aux_state; 1854 1855 return true; 1856 } 1857 1858 1859 /** 1860 * Allocate the initial aux surface for a miptree based on mt->aux_usage 1861 * 1862 * Since MCS, HiZ, and CCS_E can compress more than just clear color, we 1863 * create the auxiliary surfaces up-front. CCS_D, on the other hand, can only 1864 * compress clear color so we wait until an actual fast-clear to allocate it. 1865 */ 1866 static bool 1867 intel_miptree_alloc_aux(struct brw_context *brw, 1868 struct intel_mipmap_tree *mt) 1869 { 1870 switch (mt->aux_usage) { 1871 case ISL_AUX_USAGE_NONE: 1872 return true; 1873 1874 case ISL_AUX_USAGE_HIZ: 1875 assert(!_mesa_is_format_color_format(mt->format)); 1876 if (!intel_miptree_alloc_hiz(brw, mt)) 1877 return false; 1878 return true; 1879 1880 case ISL_AUX_USAGE_MCS: 1881 assert(_mesa_is_format_color_format(mt->format)); 1882 assert(mt->surf.samples > 1); 1883 if (!intel_miptree_alloc_mcs(brw, mt, mt->surf.samples)) 1884 return false; 1885 return true; 1886 1887 case ISL_AUX_USAGE_CCS_D: 1888 /* Since CCS_D can only compress clear color so we wait until an actual 1889 * fast-clear to allocate it. 1890 */ 1891 return true; 1892 1893 case ISL_AUX_USAGE_CCS_E: 1894 assert(_mesa_is_format_color_format(mt->format)); 1895 assert(mt->surf.samples == 1); 1896 if (!intel_miptree_alloc_ccs(brw, mt)) 1897 return false; 1898 return true; 1899 } 1900 1901 unreachable("Invalid aux usage"); 1902 } 1903 1904 1905 /** 1906 * Can the miptree sample using the hiz buffer? 1907 */ 1908 bool 1909 intel_miptree_sample_with_hiz(struct brw_context *brw, 1910 struct intel_mipmap_tree *mt) 1911 { 1912 const struct gen_device_info *devinfo = &brw->screen->devinfo; 1913 1914 /* It's unclear how well supported sampling from the hiz buffer is on GEN8, 1915 * so keep things conservative for now and never enable it unless we're SKL+. 1916 */ 1917 if (devinfo->gen < 9) { 1918 return false; 1919 } 1920 1921 if (!mt->hiz_buf) { 1922 return false; 1923 } 1924 1925 /* It seems the hardware won't fallback to the depth buffer if some of the 1926 * mipmap levels aren't available in the HiZ buffer. So we need all levels 1927 * of the texture to be HiZ enabled. 1928 */ 1929 for (unsigned level = 0; level < mt->surf.levels; ++level) { 1930 if (!intel_miptree_level_has_hiz(mt, level)) 1931 return false; 1932 } 1933 1934 /* If compressed multisampling is enabled, then we use it for the auxiliary 1935 * buffer instead. 1936 * 1937 * From the BDW PRM (Volume 2d: Command Reference: Structures 1938 * RENDER_SURFACE_STATE.AuxiliarySurfaceMode): 1939 * 1940 * "If this field is set to AUX_HIZ, Number of Multisamples must be 1941 * MULTISAMPLECOUNT_1, and Surface Type cannot be SURFTYPE_3D. 1942 * 1943 * There is no such blurb for 1D textures, but there is sufficient evidence 1944 * that this is broken on SKL+. 1945 */ 1946 return (mt->surf.samples == 1 && 1947 mt->target != GL_TEXTURE_3D && 1948 mt->target != GL_TEXTURE_1D /* gen9+ restriction */); 1949 } 1950 1951 /** 1952 * Does the miptree slice have hiz enabled? 1953 */ 1954 bool 1955 intel_miptree_level_has_hiz(const struct intel_mipmap_tree *mt, uint32_t level) 1956 { 1957 intel_miptree_check_level_layer(mt, level, 0); 1958 return mt->level[level].has_hiz; 1959 } 1960 1961 static inline uint32_t 1962 miptree_level_range_length(const struct intel_mipmap_tree *mt, 1963 uint32_t start_level, uint32_t num_levels) 1964 { 1965 assert(start_level >= mt->first_level); 1966 assert(start_level <= mt->last_level); 1967 1968 if (num_levels == INTEL_REMAINING_LAYERS) 1969 num_levels = mt->last_level - start_level + 1; 1970 /* Check for overflow */ 1971 assert(start_level + num_levels >= start_level); 1972 assert(start_level + num_levels <= mt->last_level + 1); 1973 1974 return num_levels; 1975 } 1976 1977 static inline uint32_t 1978 miptree_layer_range_length(const struct intel_mipmap_tree *mt, uint32_t level, 1979 uint32_t start_layer, uint32_t num_layers) 1980 { 1981 assert(level <= mt->last_level); 1982 1983 const uint32_t total_num_layers = brw_get_num_logical_layers(mt, level); 1984 assert(start_layer < total_num_layers); 1985 if (num_layers == INTEL_REMAINING_LAYERS) 1986 num_layers = total_num_layers - start_layer; 1987 /* Check for overflow */ 1988 assert(start_layer + num_layers >= start_layer); 1989 assert(start_layer + num_layers <= total_num_layers); 1990 1991 return num_layers; 1992 } 1993 1994 bool 1995 intel_miptree_has_color_unresolved(const struct intel_mipmap_tree *mt, 1996 unsigned start_level, unsigned num_levels, 1997 unsigned start_layer, unsigned num_layers) 1998 { 1999 assert(_mesa_is_format_color_format(mt->format)); 2000 2001 if (!mt->mcs_buf) 2002 return false; 2003 2004 /* Clamp the level range to fit the miptree */ 2005 num_levels = miptree_level_range_length(mt, start_level, num_levels); 2006 2007 for (uint32_t l = 0; l < num_levels; l++) { 2008 const uint32_t level = start_level + l; 2009 const uint32_t level_layers = 2010 miptree_layer_range_length(mt, level, start_layer, num_layers); 2011 for (unsigned a = 0; a < level_layers; a++) { 2012 enum isl_aux_state aux_state = 2013 intel_miptree_get_aux_state(mt, level, start_layer + a); 2014 assert(aux_state != ISL_AUX_STATE_AUX_INVALID); 2015 if (aux_state != ISL_AUX_STATE_PASS_THROUGH) 2016 return true; 2017 } 2018 } 2019 2020 return false; 2021 } 2022 2023 static void 2024 intel_miptree_check_color_resolve(const struct brw_context *brw, 2025 const struct intel_mipmap_tree *mt, 2026 unsigned level, unsigned layer) 2027 { 2028 if (!mt->mcs_buf) 2029 return; 2030 2031 /* Fast color clear is supported for mipmapped surfaces only on Gen8+. */ 2032 assert(brw->screen->devinfo.gen >= 8 || 2033 (level == 0 && mt->first_level == 0 && mt->last_level == 0)); 2034 2035 /* Compression of arrayed msaa surfaces is supported. */ 2036 if (mt->surf.samples > 1) 2037 return; 2038 2039 /* Fast color clear is supported for non-msaa arrays only on Gen8+. */ 2040 assert(brw->screen->devinfo.gen >= 8 || 2041 (layer == 0 && 2042 mt->surf.logical_level0_px.depth == 1 && 2043 mt->surf.logical_level0_px.array_len == 1)); 2044 2045 (void)level; 2046 (void)layer; 2047 } 2048 2049 static enum blorp_fast_clear_op 2050 get_ccs_d_resolve_op(enum isl_aux_state aux_state, 2051 enum isl_aux_usage aux_usage, 2052 bool fast_clear_supported) 2053 { 2054 assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_CCS_D); 2055 2056 const bool ccs_supported = aux_usage == ISL_AUX_USAGE_CCS_D; 2057 2058 assert(ccs_supported == fast_clear_supported); 2059 2060 switch (aux_state) { 2061 case ISL_AUX_STATE_CLEAR: 2062 case ISL_AUX_STATE_PARTIAL_CLEAR: 2063 if (!ccs_supported) 2064 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL; 2065 else 2066 return BLORP_FAST_CLEAR_OP_NONE; 2067 2068 case ISL_AUX_STATE_PASS_THROUGH: 2069 return BLORP_FAST_CLEAR_OP_NONE; 2070 2071 case ISL_AUX_STATE_RESOLVED: 2072 case ISL_AUX_STATE_AUX_INVALID: 2073 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2074 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2075 break; 2076 } 2077 2078 unreachable("Invalid aux state for CCS_D"); 2079 } 2080 2081 static enum blorp_fast_clear_op 2082 get_ccs_e_resolve_op(enum isl_aux_state aux_state, 2083 enum isl_aux_usage aux_usage, 2084 bool fast_clear_supported) 2085 { 2086 /* CCS_E surfaces can be accessed as CCS_D if we're careful. */ 2087 assert(aux_usage == ISL_AUX_USAGE_NONE || 2088 aux_usage == ISL_AUX_USAGE_CCS_D || 2089 aux_usage == ISL_AUX_USAGE_CCS_E); 2090 2091 if (aux_usage == ISL_AUX_USAGE_CCS_D) 2092 assert(fast_clear_supported); 2093 2094 switch (aux_state) { 2095 case ISL_AUX_STATE_CLEAR: 2096 case ISL_AUX_STATE_PARTIAL_CLEAR: 2097 if (fast_clear_supported) 2098 return BLORP_FAST_CLEAR_OP_NONE; 2099 else if (aux_usage == ISL_AUX_USAGE_CCS_E) 2100 return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL; 2101 else 2102 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL; 2103 2104 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2105 if (aux_usage != ISL_AUX_USAGE_CCS_E) 2106 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL; 2107 else if (!fast_clear_supported) 2108 return BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL; 2109 else 2110 return BLORP_FAST_CLEAR_OP_NONE; 2111 2112 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2113 if (aux_usage != ISL_AUX_USAGE_CCS_E) 2114 return BLORP_FAST_CLEAR_OP_RESOLVE_FULL; 2115 else 2116 return BLORP_FAST_CLEAR_OP_NONE; 2117 2118 case ISL_AUX_STATE_PASS_THROUGH: 2119 return BLORP_FAST_CLEAR_OP_NONE; 2120 2121 case ISL_AUX_STATE_RESOLVED: 2122 case ISL_AUX_STATE_AUX_INVALID: 2123 break; 2124 } 2125 2126 unreachable("Invalid aux state for CCS_E"); 2127 } 2128 2129 static void 2130 intel_miptree_prepare_ccs_access(struct brw_context *brw, 2131 struct intel_mipmap_tree *mt, 2132 uint32_t level, uint32_t layer, 2133 enum isl_aux_usage aux_usage, 2134 bool fast_clear_supported) 2135 { 2136 enum isl_aux_state aux_state = intel_miptree_get_aux_state(mt, level, layer); 2137 2138 enum blorp_fast_clear_op resolve_op; 2139 if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) { 2140 resolve_op = get_ccs_e_resolve_op(aux_state, aux_usage, 2141 fast_clear_supported); 2142 } else { 2143 assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D); 2144 resolve_op = get_ccs_d_resolve_op(aux_state, aux_usage, 2145 fast_clear_supported); 2146 } 2147 2148 if (resolve_op != BLORP_FAST_CLEAR_OP_NONE) { 2149 intel_miptree_check_color_resolve(brw, mt, level, layer); 2150 brw_blorp_resolve_color(brw, mt, level, layer, resolve_op); 2151 2152 switch (resolve_op) { 2153 case BLORP_FAST_CLEAR_OP_RESOLVE_FULL: 2154 /* The CCS full resolve operation destroys the CCS and sets it to the 2155 * pass-through state. (You can also think of this as being both a 2156 * resolve and an ambiguate in one operation.) 2157 */ 2158 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2159 ISL_AUX_STATE_PASS_THROUGH); 2160 break; 2161 2162 case BLORP_FAST_CLEAR_OP_RESOLVE_PARTIAL: 2163 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2164 ISL_AUX_STATE_COMPRESSED_NO_CLEAR); 2165 break; 2166 2167 default: 2168 unreachable("Invalid resolve op"); 2169 } 2170 } 2171 } 2172 2173 static void 2174 intel_miptree_finish_ccs_write(struct brw_context *brw, 2175 struct intel_mipmap_tree *mt, 2176 uint32_t level, uint32_t layer, 2177 enum isl_aux_usage aux_usage) 2178 { 2179 assert(aux_usage == ISL_AUX_USAGE_NONE || 2180 aux_usage == ISL_AUX_USAGE_CCS_D || 2181 aux_usage == ISL_AUX_USAGE_CCS_E); 2182 2183 enum isl_aux_state aux_state = intel_miptree_get_aux_state(mt, level, layer); 2184 2185 if (mt->aux_usage == ISL_AUX_USAGE_CCS_E) { 2186 switch (aux_state) { 2187 case ISL_AUX_STATE_CLEAR: 2188 case ISL_AUX_STATE_PARTIAL_CLEAR: 2189 assert(aux_usage == ISL_AUX_USAGE_CCS_E || 2190 aux_usage == ISL_AUX_USAGE_CCS_D); 2191 2192 if (aux_usage == ISL_AUX_USAGE_CCS_E) { 2193 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2194 ISL_AUX_STATE_COMPRESSED_CLEAR); 2195 } else if (aux_state != ISL_AUX_STATE_PARTIAL_CLEAR) { 2196 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2197 ISL_AUX_STATE_PARTIAL_CLEAR); 2198 } 2199 break; 2200 2201 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2202 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2203 assert(aux_usage == ISL_AUX_USAGE_CCS_E); 2204 break; /* Nothing to do */ 2205 2206 case ISL_AUX_STATE_PASS_THROUGH: 2207 if (aux_usage == ISL_AUX_USAGE_CCS_E) { 2208 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2209 ISL_AUX_STATE_COMPRESSED_NO_CLEAR); 2210 } else { 2211 /* Nothing to do */ 2212 } 2213 break; 2214 2215 case ISL_AUX_STATE_RESOLVED: 2216 case ISL_AUX_STATE_AUX_INVALID: 2217 unreachable("Invalid aux state for CCS_E"); 2218 } 2219 } else { 2220 assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D); 2221 /* CCS_D is a bit simpler */ 2222 switch (aux_state) { 2223 case ISL_AUX_STATE_CLEAR: 2224 assert(aux_usage == ISL_AUX_USAGE_CCS_D); 2225 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2226 ISL_AUX_STATE_PARTIAL_CLEAR); 2227 break; 2228 2229 case ISL_AUX_STATE_PARTIAL_CLEAR: 2230 assert(aux_usage == ISL_AUX_USAGE_CCS_D); 2231 break; /* Nothing to do */ 2232 2233 case ISL_AUX_STATE_PASS_THROUGH: 2234 /* Nothing to do */ 2235 break; 2236 2237 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2238 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2239 case ISL_AUX_STATE_RESOLVED: 2240 case ISL_AUX_STATE_AUX_INVALID: 2241 unreachable("Invalid aux state for CCS_D"); 2242 } 2243 } 2244 } 2245 2246 static void 2247 intel_miptree_prepare_mcs_access(struct brw_context *brw, 2248 struct intel_mipmap_tree *mt, 2249 uint32_t layer, 2250 enum isl_aux_usage aux_usage, 2251 bool fast_clear_supported) 2252 { 2253 assert(aux_usage == ISL_AUX_USAGE_MCS); 2254 2255 switch (intel_miptree_get_aux_state(mt, 0, layer)) { 2256 case ISL_AUX_STATE_CLEAR: 2257 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2258 if (!fast_clear_supported) { 2259 brw_blorp_mcs_partial_resolve(brw, mt, layer, 1); 2260 intel_miptree_set_aux_state(brw, mt, 0, layer, 1, 2261 ISL_AUX_STATE_COMPRESSED_NO_CLEAR); 2262 } 2263 break; 2264 2265 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2266 break; /* Nothing to do */ 2267 2268 case ISL_AUX_STATE_RESOLVED: 2269 case ISL_AUX_STATE_PASS_THROUGH: 2270 case ISL_AUX_STATE_AUX_INVALID: 2271 case ISL_AUX_STATE_PARTIAL_CLEAR: 2272 unreachable("Invalid aux state for MCS"); 2273 } 2274 } 2275 2276 static void 2277 intel_miptree_finish_mcs_write(struct brw_context *brw, 2278 struct intel_mipmap_tree *mt, 2279 uint32_t layer, 2280 enum isl_aux_usage aux_usage) 2281 { 2282 assert(aux_usage == ISL_AUX_USAGE_MCS); 2283 2284 switch (intel_miptree_get_aux_state(mt, 0, layer)) { 2285 case ISL_AUX_STATE_CLEAR: 2286 intel_miptree_set_aux_state(brw, mt, 0, layer, 1, 2287 ISL_AUX_STATE_COMPRESSED_CLEAR); 2288 break; 2289 2290 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2291 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2292 break; /* Nothing to do */ 2293 2294 case ISL_AUX_STATE_RESOLVED: 2295 case ISL_AUX_STATE_PASS_THROUGH: 2296 case ISL_AUX_STATE_AUX_INVALID: 2297 case ISL_AUX_STATE_PARTIAL_CLEAR: 2298 unreachable("Invalid aux state for MCS"); 2299 } 2300 } 2301 2302 static void 2303 intel_miptree_prepare_hiz_access(struct brw_context *brw, 2304 struct intel_mipmap_tree *mt, 2305 uint32_t level, uint32_t layer, 2306 enum isl_aux_usage aux_usage, 2307 bool fast_clear_supported) 2308 { 2309 assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ); 2310 2311 enum blorp_hiz_op hiz_op = BLORP_HIZ_OP_NONE; 2312 switch (intel_miptree_get_aux_state(mt, level, layer)) { 2313 case ISL_AUX_STATE_CLEAR: 2314 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2315 if (aux_usage != ISL_AUX_USAGE_HIZ || !fast_clear_supported) 2316 hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE; 2317 break; 2318 2319 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2320 if (aux_usage != ISL_AUX_USAGE_HIZ) 2321 hiz_op = BLORP_HIZ_OP_DEPTH_RESOLVE; 2322 break; 2323 2324 case ISL_AUX_STATE_PASS_THROUGH: 2325 case ISL_AUX_STATE_RESOLVED: 2326 break; 2327 2328 case ISL_AUX_STATE_AUX_INVALID: 2329 if (aux_usage == ISL_AUX_USAGE_HIZ) 2330 hiz_op = BLORP_HIZ_OP_HIZ_RESOLVE; 2331 break; 2332 2333 case ISL_AUX_STATE_PARTIAL_CLEAR: 2334 unreachable("Invalid HiZ state"); 2335 } 2336 2337 if (hiz_op != BLORP_HIZ_OP_NONE) { 2338 intel_hiz_exec(brw, mt, level, layer, 1, hiz_op); 2339 2340 switch (hiz_op) { 2341 case BLORP_HIZ_OP_DEPTH_RESOLVE: 2342 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2343 ISL_AUX_STATE_RESOLVED); 2344 break; 2345 2346 case BLORP_HIZ_OP_HIZ_RESOLVE: 2347 /* The HiZ resolve operation is actually an ambiguate */ 2348 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2349 ISL_AUX_STATE_PASS_THROUGH); 2350 break; 2351 2352 default: 2353 unreachable("Invalid HiZ op"); 2354 } 2355 } 2356 } 2357 2358 static void 2359 intel_miptree_finish_hiz_write(struct brw_context *brw, 2360 struct intel_mipmap_tree *mt, 2361 uint32_t level, uint32_t layer, 2362 enum isl_aux_usage aux_usage) 2363 { 2364 assert(aux_usage == ISL_AUX_USAGE_NONE || aux_usage == ISL_AUX_USAGE_HIZ); 2365 2366 switch (intel_miptree_get_aux_state(mt, level, layer)) { 2367 case ISL_AUX_STATE_CLEAR: 2368 assert(aux_usage == ISL_AUX_USAGE_HIZ); 2369 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2370 ISL_AUX_STATE_COMPRESSED_CLEAR); 2371 break; 2372 2373 case ISL_AUX_STATE_COMPRESSED_NO_CLEAR: 2374 case ISL_AUX_STATE_COMPRESSED_CLEAR: 2375 assert(aux_usage == ISL_AUX_USAGE_HIZ); 2376 break; /* Nothing to do */ 2377 2378 case ISL_AUX_STATE_RESOLVED: 2379 if (aux_usage == ISL_AUX_USAGE_HIZ) { 2380 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2381 ISL_AUX_STATE_COMPRESSED_NO_CLEAR); 2382 } else { 2383 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2384 ISL_AUX_STATE_AUX_INVALID); 2385 } 2386 break; 2387 2388 case ISL_AUX_STATE_PASS_THROUGH: 2389 if (aux_usage == ISL_AUX_USAGE_HIZ) { 2390 intel_miptree_set_aux_state(brw, mt, level, layer, 1, 2391 ISL_AUX_STATE_COMPRESSED_NO_CLEAR); 2392 } 2393 break; 2394 2395 case ISL_AUX_STATE_AUX_INVALID: 2396 assert(aux_usage != ISL_AUX_USAGE_HIZ); 2397 break; 2398 2399 case ISL_AUX_STATE_PARTIAL_CLEAR: 2400 unreachable("Invalid HiZ state"); 2401 } 2402 } 2403 2404 void 2405 intel_miptree_prepare_access(struct brw_context *brw, 2406 struct intel_mipmap_tree *mt, 2407 uint32_t start_level, uint32_t num_levels, 2408 uint32_t start_layer, uint32_t num_layers, 2409 enum isl_aux_usage aux_usage, 2410 bool fast_clear_supported) 2411 { 2412 num_levels = miptree_level_range_length(mt, start_level, num_levels); 2413 2414 switch (mt->aux_usage) { 2415 case ISL_AUX_USAGE_NONE: 2416 /* Nothing to do */ 2417 break; 2418 2419 case ISL_AUX_USAGE_MCS: 2420 assert(mt->mcs_buf); 2421 assert(start_level == 0 && num_levels == 1); 2422 const uint32_t level_layers = 2423 miptree_layer_range_length(mt, 0, start_layer, num_layers); 2424 for (uint32_t a = 0; a < level_layers; a++) { 2425 intel_miptree_prepare_mcs_access(brw, mt, start_layer + a, 2426 aux_usage, fast_clear_supported); 2427 } 2428 break; 2429 2430 case ISL_AUX_USAGE_CCS_D: 2431 case ISL_AUX_USAGE_CCS_E: 2432 if (!mt->mcs_buf) 2433 return; 2434 2435 for (uint32_t l = 0; l < num_levels; l++) { 2436 const uint32_t level = start_level + l; 2437 const uint32_t level_layers = 2438 miptree_layer_range_length(mt, level, start_layer, num_layers); 2439 for (uint32_t a = 0; a < level_layers; a++) { 2440 intel_miptree_prepare_ccs_access(brw, mt, level, 2441 start_layer + a, 2442 aux_usage, fast_clear_supported); 2443 } 2444 } 2445 break; 2446 2447 case ISL_AUX_USAGE_HIZ: 2448 assert(mt->hiz_buf); 2449 for (uint32_t l = 0; l < num_levels; l++) { 2450 const uint32_t level = start_level + l; 2451 if (!intel_miptree_level_has_hiz(mt, level)) 2452 continue; 2453 2454 const uint32_t level_layers = 2455 miptree_layer_range_length(mt, level, start_layer, num_layers); 2456 for (uint32_t a = 0; a < level_layers; a++) { 2457 intel_miptree_prepare_hiz_access(brw, mt, level, start_layer + a, 2458 aux_usage, fast_clear_supported); 2459 } 2460 } 2461 break; 2462 2463 default: 2464 unreachable("Invalid aux usage"); 2465 } 2466 } 2467 2468 void 2469 intel_miptree_finish_write(struct brw_context *brw, 2470 struct intel_mipmap_tree *mt, uint32_t level, 2471 uint32_t start_layer, uint32_t num_layers, 2472 enum isl_aux_usage aux_usage) 2473 { 2474 num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers); 2475 2476 switch (mt->aux_usage) { 2477 case ISL_AUX_USAGE_NONE: 2478 /* Nothing to do */ 2479 break; 2480 2481 case ISL_AUX_USAGE_MCS: 2482 assert(mt->mcs_buf); 2483 for (uint32_t a = 0; a < num_layers; a++) { 2484 intel_miptree_finish_mcs_write(brw, mt, start_layer + a, 2485 aux_usage); 2486 } 2487 break; 2488 2489 case ISL_AUX_USAGE_CCS_D: 2490 case ISL_AUX_USAGE_CCS_E: 2491 if (!mt->mcs_buf) 2492 return; 2493 2494 for (uint32_t a = 0; a < num_layers; a++) { 2495 intel_miptree_finish_ccs_write(brw, mt, level, start_layer + a, 2496 aux_usage); 2497 } 2498 break; 2499 2500 case ISL_AUX_USAGE_HIZ: 2501 if (!intel_miptree_level_has_hiz(mt, level)) 2502 return; 2503 2504 for (uint32_t a = 0; a < num_layers; a++) { 2505 intel_miptree_finish_hiz_write(brw, mt, level, start_layer + a, 2506 aux_usage); 2507 } 2508 break; 2509 2510 default: 2511 unreachable("Invavlid aux usage"); 2512 } 2513 } 2514 2515 enum isl_aux_state 2516 intel_miptree_get_aux_state(const struct intel_mipmap_tree *mt, 2517 uint32_t level, uint32_t layer) 2518 { 2519 intel_miptree_check_level_layer(mt, level, layer); 2520 2521 if (_mesa_is_format_color_format(mt->format)) { 2522 assert(mt->mcs_buf != NULL); 2523 assert(mt->surf.samples == 1 || 2524 mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY); 2525 } else if (mt->format == MESA_FORMAT_S_UINT8) { 2526 unreachable("Cannot get aux state for stencil"); 2527 } else { 2528 assert(intel_miptree_level_has_hiz(mt, level)); 2529 } 2530 2531 return mt->aux_state[level][layer]; 2532 } 2533 2534 void 2535 intel_miptree_set_aux_state(struct brw_context *brw, 2536 struct intel_mipmap_tree *mt, uint32_t level, 2537 uint32_t start_layer, uint32_t num_layers, 2538 enum isl_aux_state aux_state) 2539 { 2540 num_layers = miptree_layer_range_length(mt, level, start_layer, num_layers); 2541 2542 if (_mesa_is_format_color_format(mt->format)) { 2543 assert(mt->mcs_buf != NULL); 2544 assert(mt->surf.samples == 1 || 2545 mt->surf.msaa_layout == ISL_MSAA_LAYOUT_ARRAY); 2546 } else if (mt->format == MESA_FORMAT_S_UINT8) { 2547 unreachable("Cannot get aux state for stencil"); 2548 } else { 2549 assert(intel_miptree_level_has_hiz(mt, level)); 2550 } 2551 2552 for (unsigned a = 0; a < num_layers; a++) { 2553 if (mt->aux_state[level][start_layer + a] != aux_state) { 2554 mt->aux_state[level][start_layer + a] = aux_state; 2555 brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE; 2556 } 2557 } 2558 } 2559 2560 /* On Gen9 color buffers may be compressed by the hardware (lossless 2561 * compression). There are, however, format restrictions and care needs to be 2562 * taken that the sampler engine is capable for re-interpreting a buffer with 2563 * format different the buffer was originally written with. 2564 * 2565 * For example, SRGB formats are not compressible and the sampler engine isn't 2566 * capable of treating RGBA_UNORM as SRGB_ALPHA. In such a case the underlying 2567 * color buffer needs to be resolved so that the sampling surface can be 2568 * sampled as non-compressed (i.e., without the auxiliary MCS buffer being 2569 * set). 2570 */ 2571 static bool 2572 can_texture_with_ccs(struct brw_context *brw, 2573 struct intel_mipmap_tree *mt, 2574 enum isl_format view_format) 2575 { 2576 if (mt->aux_usage != ISL_AUX_USAGE_CCS_E) 2577 return false; 2578 2579 if (!format_ccs_e_compat_with_miptree(&brw->screen->devinfo, 2580 mt, view_format)) { 2581 perf_debug("Incompatible sampling format (%s) for rbc (%s)\n", 2582 isl_format_get_layout(view_format)->name, 2583 _mesa_get_format_name(mt->format)); 2584 return false; 2585 } 2586 2587 return true; 2588 } 2589 2590 enum isl_aux_usage 2591 intel_miptree_texture_aux_usage(struct brw_context *brw, 2592 struct intel_mipmap_tree *mt, 2593 enum isl_format view_format) 2594 { 2595 switch (mt->aux_usage) { 2596 case ISL_AUX_USAGE_HIZ: 2597 if (intel_miptree_sample_with_hiz(brw, mt)) 2598 return ISL_AUX_USAGE_HIZ; 2599 break; 2600 2601 case ISL_AUX_USAGE_MCS: 2602 return ISL_AUX_USAGE_MCS; 2603 2604 case ISL_AUX_USAGE_CCS_D: 2605 case ISL_AUX_USAGE_CCS_E: 2606 if (!mt->mcs_buf) { 2607 assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D); 2608 return ISL_AUX_USAGE_NONE; 2609 } 2610 2611 /* If we don't have any unresolved color, report an aux usage of 2612 * ISL_AUX_USAGE_NONE. This way, texturing won't even look at the 2613 * aux surface and we can save some bandwidth. 2614 */ 2615 if (!intel_miptree_has_color_unresolved(mt, 0, INTEL_REMAINING_LEVELS, 2616 0, INTEL_REMAINING_LAYERS)) 2617 return ISL_AUX_USAGE_NONE; 2618 2619 if (can_texture_with_ccs(brw, mt, view_format)) 2620 return ISL_AUX_USAGE_CCS_E; 2621 break; 2622 2623 default: 2624 break; 2625 } 2626 2627 return ISL_AUX_USAGE_NONE; 2628 } 2629 2630 static bool 2631 isl_formats_are_fast_clear_compatible(enum isl_format a, enum isl_format b) 2632 { 2633 /* On gen8 and earlier, the hardware was only capable of handling 0/1 clear 2634 * values so sRGB curve application was a no-op for all fast-clearable 2635 * formats. 2636 * 2637 * On gen9+, the hardware supports arbitrary clear values. For sRGB clear 2638 * values, the hardware interprets the floats, not as what would be 2639 * returned from the sampler (or written by the shader), but as being 2640 * between format conversion and sRGB curve application. This means that 2641 * we can switch between sRGB and UNORM without having to whack the clear 2642 * color. 2643 */ 2644 return isl_format_srgb_to_linear(a) == isl_format_srgb_to_linear(b); 2645 } 2646 2647 void 2648 intel_miptree_prepare_texture(struct brw_context *brw, 2649 struct intel_mipmap_tree *mt, 2650 enum isl_format view_format, 2651 uint32_t start_level, uint32_t num_levels, 2652 uint32_t start_layer, uint32_t num_layers, 2653 bool disable_aux) 2654 { 2655 enum isl_aux_usage aux_usage = disable_aux ? ISL_AUX_USAGE_NONE : 2656 intel_miptree_texture_aux_usage(brw, mt, view_format); 2657 bool clear_supported = aux_usage != ISL_AUX_USAGE_NONE; 2658 2659 /* Clear color is specified as ints or floats and the conversion is done by 2660 * the sampler. If we have a texture view, we would have to perform the 2661 * clear color conversion manually. Just disable clear color. 2662 */ 2663 if (!isl_formats_are_fast_clear_compatible(mt->surf.format, view_format)) 2664 clear_supported = false; 2665 2666 intel_miptree_prepare_access(brw, mt, start_level, num_levels, 2667 start_layer, num_layers, 2668 aux_usage, clear_supported); 2669 } 2670 2671 void 2672 intel_miptree_prepare_image(struct brw_context *brw, 2673 struct intel_mipmap_tree *mt) 2674 { 2675 /* The data port doesn't understand any compression */ 2676 intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS, 2677 0, INTEL_REMAINING_LAYERS, 2678 ISL_AUX_USAGE_NONE, false); 2679 } 2680 2681 enum isl_aux_usage 2682 intel_miptree_render_aux_usage(struct brw_context *brw, 2683 struct intel_mipmap_tree *mt, 2684 enum isl_format render_format, 2685 bool blend_enabled, 2686 bool draw_aux_disabled) 2687 { 2688 struct gen_device_info *devinfo = &brw->screen->devinfo; 2689 2690 if (draw_aux_disabled) 2691 return ISL_AUX_USAGE_NONE; 2692 2693 switch (mt->aux_usage) { 2694 case ISL_AUX_USAGE_MCS: 2695 assert(mt->mcs_buf); 2696 return ISL_AUX_USAGE_MCS; 2697 2698 case ISL_AUX_USAGE_CCS_D: 2699 case ISL_AUX_USAGE_CCS_E: 2700 if (!mt->mcs_buf) { 2701 assert(mt->aux_usage == ISL_AUX_USAGE_CCS_D); 2702 return ISL_AUX_USAGE_NONE; 2703 } 2704 2705 /* gen9 hardware technically supports non-0/1 clear colors with sRGB 2706 * formats. However, there are issues with blending where it doesn't 2707 * properly apply the sRGB curve to the clear color when blending. 2708 */ 2709 if (devinfo->gen == 9 && blend_enabled && 2710 isl_format_is_srgb(render_format) && 2711 !isl_color_value_is_zero_one(mt->fast_clear_color, render_format)) 2712 return ISL_AUX_USAGE_NONE; 2713 2714 if (mt->aux_usage == ISL_AUX_USAGE_CCS_E && 2715 format_ccs_e_compat_with_miptree(&brw->screen->devinfo, 2716 mt, render_format)) 2717 return ISL_AUX_USAGE_CCS_E; 2718 2719 /* Otherwise, we have to fall back to CCS_D */ 2720 return ISL_AUX_USAGE_CCS_D; 2721 2722 default: 2723 return ISL_AUX_USAGE_NONE; 2724 } 2725 } 2726 2727 void 2728 intel_miptree_prepare_render(struct brw_context *brw, 2729 struct intel_mipmap_tree *mt, uint32_t level, 2730 uint32_t start_layer, uint32_t layer_count, 2731 enum isl_aux_usage aux_usage) 2732 { 2733 intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count, 2734 aux_usage, aux_usage != ISL_AUX_USAGE_NONE); 2735 } 2736 2737 void 2738 intel_miptree_finish_render(struct brw_context *brw, 2739 struct intel_mipmap_tree *mt, uint32_t level, 2740 uint32_t start_layer, uint32_t layer_count, 2741 enum isl_aux_usage aux_usage) 2742 { 2743 assert(_mesa_is_format_color_format(mt->format)); 2744 2745 intel_miptree_finish_write(brw, mt, level, start_layer, layer_count, 2746 aux_usage); 2747 } 2748 2749 void 2750 intel_miptree_prepare_depth(struct brw_context *brw, 2751 struct intel_mipmap_tree *mt, uint32_t level, 2752 uint32_t start_layer, uint32_t layer_count) 2753 { 2754 intel_miptree_prepare_access(brw, mt, level, 1, start_layer, layer_count, 2755 mt->aux_usage, mt->hiz_buf != NULL); 2756 } 2757 2758 void 2759 intel_miptree_finish_depth(struct brw_context *brw, 2760 struct intel_mipmap_tree *mt, uint32_t level, 2761 uint32_t start_layer, uint32_t layer_count, 2762 bool depth_written) 2763 { 2764 if (depth_written) { 2765 intel_miptree_finish_write(brw, mt, level, start_layer, layer_count, 2766 mt->hiz_buf != NULL); 2767 } 2768 } 2769 2770 void 2771 intel_miptree_prepare_external(struct brw_context *brw, 2772 struct intel_mipmap_tree *mt) 2773 { 2774 enum isl_aux_usage aux_usage = ISL_AUX_USAGE_NONE; 2775 bool supports_fast_clear = false; 2776 2777 const struct isl_drm_modifier_info *mod_info = 2778 isl_drm_modifier_get_info(mt->drm_modifier); 2779 2780 if (mod_info && mod_info->aux_usage != ISL_AUX_USAGE_NONE) { 2781 /* CCS_E is the only supported aux for external images and it's only 2782 * supported on very simple images. 2783 */ 2784 assert(mod_info->aux_usage == ISL_AUX_USAGE_CCS_E); 2785 assert(_mesa_is_format_color_format(mt->format)); 2786 assert(mt->first_level == 0 && mt->last_level == 0); 2787 assert(mt->surf.logical_level0_px.depth == 1); 2788 assert(mt->surf.logical_level0_px.array_len == 1); 2789 assert(mt->surf.samples == 1); 2790 assert(mt->mcs_buf != NULL); 2791 2792 aux_usage = mod_info->aux_usage; 2793 supports_fast_clear = mod_info->supports_clear_color; 2794 } 2795 2796 intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS, 2797 0, INTEL_REMAINING_LAYERS, 2798 aux_usage, supports_fast_clear); 2799 } 2800 2801 /** 2802 * Make it possible to share the BO backing the given miptree with another 2803 * process or another miptree. 2804 * 2805 * Fast color clears are unsafe with shared buffers, so we need to resolve and 2806 * then discard the MCS buffer, if present. We also set the no_ccs flag to 2807 * ensure that no MCS buffer gets allocated in the future. 2808 * 2809 * HiZ is similarly unsafe with shared buffers. 2810 */ 2811 void 2812 intel_miptree_make_shareable(struct brw_context *brw, 2813 struct intel_mipmap_tree *mt) 2814 { 2815 /* MCS buffers are also used for multisample buffers, but we can't resolve 2816 * away a multisample MCS buffer because it's an integral part of how the 2817 * pixel data is stored. Fortunately this code path should never be 2818 * reached for multisample buffers. 2819 */ 2820 assert(mt->surf.msaa_layout == ISL_MSAA_LAYOUT_NONE || 2821 mt->surf.samples == 1); 2822 2823 intel_miptree_prepare_access(brw, mt, 0, INTEL_REMAINING_LEVELS, 2824 0, INTEL_REMAINING_LAYERS, 2825 ISL_AUX_USAGE_NONE, false); 2826 2827 if (mt->mcs_buf) { 2828 brw_bo_unreference(mt->mcs_buf->bo); 2829 free(mt->mcs_buf); 2830 mt->mcs_buf = NULL; 2831 2832 /* Any pending MCS/CCS operations are no longer needed. Trying to 2833 * execute any will likely crash due to the missing aux buffer. So let's 2834 * delete all pending ops. 2835 */ 2836 free(mt->aux_state); 2837 mt->aux_state = NULL; 2838 brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE; 2839 } 2840 2841 if (mt->hiz_buf) { 2842 intel_miptree_aux_buffer_free(mt->hiz_buf); 2843 mt->hiz_buf = NULL; 2844 2845 for (uint32_t l = mt->first_level; l <= mt->last_level; ++l) { 2846 mt->level[l].has_hiz = false; 2847 } 2848 2849 /* Any pending HiZ operations are no longer needed. Trying to execute 2850 * any will likely crash due to the missing aux buffer. So let's delete 2851 * all pending ops. 2852 */ 2853 free(mt->aux_state); 2854 mt->aux_state = NULL; 2855 brw->ctx.NewDriverState |= BRW_NEW_AUX_STATE; 2856 } 2857 2858 mt->aux_usage = ISL_AUX_USAGE_NONE; 2859 mt->supports_fast_clear = false; 2860 } 2861 2862 2863 /** 2864 * \brief Get pointer offset into stencil buffer. 2865 * 2866 * The stencil buffer is W tiled. Since the GTT is incapable of W fencing, we 2867 * must decode the tile's layout in software. 2868 * 2869 * See 2870 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.2.1 W-Major Tile 2871 * Format. 2872 * - PRM, 2011 Sandy Bridge, Volume 1, Part 2, Section 4.5.3 Tiling Algorithm 2873 * 2874 * Even though the returned offset is always positive, the return type is 2875 * signed due to 2876 * commit e8b1c6d6f55f5be3bef25084fdd8b6127517e137 2877 * mesa: Fix return type of _mesa_get_format_bytes() (#37351) 2878 */ 2879 static intptr_t 2880 intel_offset_S8(uint32_t stride, uint32_t x, uint32_t y, bool swizzled) 2881 { 2882 uint32_t tile_size = 4096; 2883 uint32_t tile_width = 64; 2884 uint32_t tile_height = 64; 2885 uint32_t row_size = 64 * stride / 2; /* Two rows are interleaved. */ 2886 2887 uint32_t tile_x = x / tile_width; 2888 uint32_t tile_y = y / tile_height; 2889 2890 /* The byte's address relative to the tile's base addres. */ 2891 uint32_t byte_x = x % tile_width; 2892 uint32_t byte_y = y % tile_height; 2893 2894 uintptr_t u = tile_y * row_size 2895 + tile_x * tile_size 2896 + 512 * (byte_x / 8) 2897 + 64 * (byte_y / 8) 2898 + 32 * ((byte_y / 4) % 2) 2899 + 16 * ((byte_x / 4) % 2) 2900 + 8 * ((byte_y / 2) % 2) 2901 + 4 * ((byte_x / 2) % 2) 2902 + 2 * (byte_y % 2) 2903 + 1 * (byte_x % 2); 2904 2905 if (swizzled) { 2906 /* adjust for bit6 swizzling */ 2907 if (((byte_x / 8) % 2) == 1) { 2908 if (((byte_y / 8) % 2) == 0) { 2909 u += 64; 2910 } else { 2911 u -= 64; 2912 } 2913 } 2914 } 2915 2916 return u; 2917 } 2918 2919 void 2920 intel_miptree_updownsample(struct brw_context *brw, 2921 struct intel_mipmap_tree *src, 2922 struct intel_mipmap_tree *dst) 2923 { 2924 unsigned src_w = src->surf.logical_level0_px.width; 2925 unsigned src_h = src->surf.logical_level0_px.height; 2926 unsigned dst_w = dst->surf.logical_level0_px.width; 2927 unsigned dst_h = dst->surf.logical_level0_px.height; 2928 2929 brw_blorp_blit_miptrees(brw, 2930 src, 0 /* level */, 0 /* layer */, 2931 src->format, SWIZZLE_XYZW, 2932 dst, 0 /* level */, 0 /* layer */, dst->format, 2933 0, 0, src_w, src_h, 2934 0, 0, dst_w, dst_h, 2935 GL_NEAREST, false, false /*mirror x, y*/, 2936 false, false); 2937 2938 if (src->stencil_mt) { 2939 src_w = src->stencil_mt->surf.logical_level0_px.width; 2940 src_h = src->stencil_mt->surf.logical_level0_px.height; 2941 dst_w = dst->stencil_mt->surf.logical_level0_px.width; 2942 dst_h = dst->stencil_mt->surf.logical_level0_px.height; 2943 2944 brw_blorp_blit_miptrees(brw, 2945 src->stencil_mt, 0 /* level */, 0 /* layer */, 2946 src->stencil_mt->format, SWIZZLE_XYZW, 2947 dst->stencil_mt, 0 /* level */, 0 /* layer */, 2948 dst->stencil_mt->format, 2949 0, 0, src_w, src_h, 2950 0, 0, dst_w, dst_h, 2951 GL_NEAREST, false, false /*mirror x, y*/, 2952 false, false /* decode/encode srgb */); 2953 } 2954 } 2955 2956 void 2957 intel_update_r8stencil(struct brw_context *brw, 2958 struct intel_mipmap_tree *mt) 2959 { 2960 const struct gen_device_info *devinfo = &brw->screen->devinfo; 2961 2962 assert(devinfo->gen >= 7); 2963 struct intel_mipmap_tree *src = 2964 mt->format == MESA_FORMAT_S_UINT8 ? mt : mt->stencil_mt; 2965 if (!src || devinfo->gen >= 8 || !src->r8stencil_needs_update) 2966 return; 2967 2968 assert(src->surf.size > 0); 2969 2970 if (!mt->r8stencil_mt) { 2971 assert(devinfo->gen > 6); /* Handle MIPTREE_LAYOUT_GEN6_HIZ_STENCIL */ 2972 mt->r8stencil_mt = make_surface( 2973 brw, 2974 src->target, 2975 MESA_FORMAT_R_UINT8, 2976 src->first_level, src->last_level, 2977 src->surf.logical_level0_px.width, 2978 src->surf.logical_level0_px.height, 2979 src->surf.dim == ISL_SURF_DIM_3D ? 2980 src->surf.logical_level0_px.depth : 2981 src->surf.logical_level0_px.array_len, 2982 src->surf.samples, 2983 ISL_TILING_Y0_BIT, 2984 ISL_SURF_USAGE_TEXTURE_BIT, 2985 BO_ALLOC_BUSY, 0, NULL); 2986 assert(mt->r8stencil_mt); 2987 } 2988 2989 struct intel_mipmap_tree *dst = mt->r8stencil_mt; 2990 2991 for (int level = src->first_level; level <= src->last_level; level++) { 2992 const unsigned depth = src->surf.dim == ISL_SURF_DIM_3D ? 2993 minify(src->surf.phys_level0_sa.depth, level) : 2994 src->surf.phys_level0_sa.array_len; 2995 2996 for (unsigned layer = 0; layer < depth; layer++) { 2997 brw_blorp_copy_miptrees(brw, 2998 src, level, layer, 2999 dst, level, layer, 3000 0, 0, 0, 0, 3001 minify(src->surf.logical_level0_px.width, 3002 level), 3003 minify(src->surf.logical_level0_px.height, 3004 level)); 3005 } 3006 } 3007 3008 brw_cache_flush_for_read(brw, dst->bo); 3009 src->r8stencil_needs_update = false; 3010 } 3011 3012 static void * 3013 intel_miptree_map_raw(struct brw_context *brw, 3014 struct intel_mipmap_tree *mt, 3015 GLbitfield mode) 3016 { 3017 struct brw_bo *bo = mt->bo; 3018 3019 if (brw_batch_references(&brw->batch, bo)) 3020 intel_batchbuffer_flush(brw); 3021 3022 return brw_bo_map(brw, bo, mode); 3023 } 3024 3025 static void 3026 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt) 3027 { 3028 brw_bo_unmap(mt->bo); 3029 } 3030 3031 static void 3032 intel_miptree_map_gtt(struct brw_context *brw, 3033 struct intel_mipmap_tree *mt, 3034 struct intel_miptree_map *map, 3035 unsigned int level, unsigned int slice) 3036 { 3037 unsigned int bw, bh; 3038 void *base; 3039 unsigned int image_x, image_y; 3040 intptr_t x = map->x; 3041 intptr_t y = map->y; 3042 3043 /* For compressed formats, the stride is the number of bytes per 3044 * row of blocks. intel_miptree_get_image_offset() already does 3045 * the divide. 3046 */ 3047 _mesa_get_format_block_size(mt->format, &bw, &bh); 3048 assert(y % bh == 0); 3049 assert(x % bw == 0); 3050 y /= bh; 3051 x /= bw; 3052 3053 base = intel_miptree_map_raw(brw, mt, map->mode); 3054 3055 if (base == NULL) 3056 map->ptr = NULL; 3057 else { 3058 base += mt->offset; 3059 3060 /* Note that in the case of cube maps, the caller must have passed the 3061 * slice number referencing the face. 3062 */ 3063 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); 3064 x += image_x; 3065 y += image_y; 3066 3067 map->stride = mt->surf.row_pitch; 3068 map->ptr = base + y * map->stride + x * mt->cpp; 3069 } 3070 3071 DBG("%s: %d,%d %dx%d from mt %p (%s) " 3072 "%"PRIiPTR",%"PRIiPTR" = %p/%d\n", __func__, 3073 map->x, map->y, map->w, map->h, 3074 mt, _mesa_get_format_name(mt->format), 3075 x, y, map->ptr, map->stride); 3076 } 3077 3078 static void 3079 intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt) 3080 { 3081 intel_miptree_unmap_raw(mt); 3082 } 3083 3084 static void 3085 intel_miptree_map_blit(struct brw_context *brw, 3086 struct intel_mipmap_tree *mt, 3087 struct intel_miptree_map *map, 3088 unsigned int level, unsigned int slice) 3089 { 3090 map->linear_mt = intel_miptree_create(brw, GL_TEXTURE_2D, mt->format, 3091 /* first_level */ 0, 3092 /* last_level */ 0, 3093 map->w, map->h, 1, 3094 /* samples */ 1, 3095 MIPTREE_CREATE_LINEAR); 3096 3097 if (!map->linear_mt) { 3098 fprintf(stderr, "Failed to allocate blit temporary\n"); 3099 goto fail; 3100 } 3101 map->stride = map->linear_mt->surf.row_pitch; 3102 3103 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no 3104 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless 3105 * invalidate is set, since we'll be writing the whole rectangle from our 3106 * temporary buffer back out. 3107 */ 3108 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) { 3109 if (!intel_miptree_copy(brw, 3110 mt, level, slice, map->x, map->y, 3111 map->linear_mt, 0, 0, 0, 0, 3112 map->w, map->h)) { 3113 fprintf(stderr, "Failed to blit\n"); 3114 goto fail; 3115 } 3116 } 3117 3118 map->ptr = intel_miptree_map_raw(brw, map->linear_mt, map->mode); 3119 3120 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, 3121 map->x, map->y, map->w, map->h, 3122 mt, _mesa_get_format_name(mt->format), 3123 level, slice, map->ptr, map->stride); 3124 3125 return; 3126 3127 fail: 3128 intel_miptree_release(&map->linear_mt); 3129 map->ptr = NULL; 3130 map->stride = 0; 3131 } 3132 3133 static void 3134 intel_miptree_unmap_blit(struct brw_context *brw, 3135 struct intel_mipmap_tree *mt, 3136 struct intel_miptree_map *map, 3137 unsigned int level, 3138 unsigned int slice) 3139 { 3140 struct gl_context *ctx = &brw->ctx; 3141 3142 intel_miptree_unmap_raw(map->linear_mt); 3143 3144 if (map->mode & GL_MAP_WRITE_BIT) { 3145 bool ok = intel_miptree_copy(brw, 3146 map->linear_mt, 0, 0, 0, 0, 3147 mt, level, slice, map->x, map->y, 3148 map->w, map->h); 3149 WARN_ONCE(!ok, "Failed to blit from linear temporary mapping"); 3150 } 3151 3152 intel_miptree_release(&map->linear_mt); 3153 } 3154 3155 /** 3156 * "Map" a buffer by copying it to an untiled temporary using MOVNTDQA. 3157 */ 3158 #if defined(USE_SSE41) 3159 static void 3160 intel_miptree_map_movntdqa(struct brw_context *brw, 3161 struct intel_mipmap_tree *mt, 3162 struct intel_miptree_map *map, 3163 unsigned int level, unsigned int slice) 3164 { 3165 assert(map->mode & GL_MAP_READ_BIT); 3166 assert(!(map->mode & GL_MAP_WRITE_BIT)); 3167 3168 DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__, 3169 map->x, map->y, map->w, map->h, 3170 mt, _mesa_get_format_name(mt->format), 3171 level, slice, map->ptr, map->stride); 3172 3173 /* Map the original image */ 3174 uint32_t image_x; 3175 uint32_t image_y; 3176 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); 3177 image_x += map->x; 3178 image_y += map->y; 3179 3180 void *src = intel_miptree_map_raw(brw, mt, map->mode); 3181 if (!src) 3182 return; 3183 3184 src += mt->offset; 3185 3186 src += image_y * mt->surf.row_pitch; 3187 src += image_x * mt->cpp; 3188 3189 /* Due to the pixel offsets for the particular image being mapped, our 3190 * src pointer may not be 16-byte aligned. However, if the pitch is 3191 * divisible by 16, then the amount by which it's misaligned will remain 3192 * consistent from row to row. 3193 */ 3194 assert((mt->surf.row_pitch % 16) == 0); 3195 const int misalignment = ((uintptr_t) src) & 15; 3196 3197 /* Create an untiled temporary buffer for the mapping. */ 3198 const unsigned width_bytes = _mesa_format_row_stride(mt->format, map->w); 3199 3200 map->stride = ALIGN(misalignment + width_bytes, 16); 3201 3202 map->buffer = _mesa_align_malloc(map->stride * map->h, 16); 3203 /* Offset the destination so it has the same misalignment as src. */ 3204 map->ptr = map->buffer + misalignment; 3205 3206 assert((((uintptr_t) map->ptr) & 15) == misalignment); 3207 3208 for (uint32_t y = 0; y < map->h; y++) { 3209 void *dst_ptr = map->ptr + y * map->stride; 3210 void *src_ptr = src + y * mt->surf.row_pitch; 3211 3212 _mesa_streaming_load_memcpy(dst_ptr, src_ptr, width_bytes); 3213 } 3214 3215 intel_miptree_unmap_raw(mt); 3216 } 3217 3218 static void 3219 intel_miptree_unmap_movntdqa(struct brw_context *brw, 3220 struct intel_mipmap_tree *mt, 3221 struct intel_miptree_map *map, 3222 unsigned int level, 3223 unsigned int slice) 3224 { 3225 _mesa_align_free(map->buffer); 3226 map->buffer = NULL; 3227 map->ptr = NULL; 3228 } 3229 #endif 3230 3231 static void 3232 intel_miptree_map_s8(struct brw_context *brw, 3233 struct intel_mipmap_tree *mt, 3234 struct intel_miptree_map *map, 3235 unsigned int level, unsigned int slice) 3236 { 3237 map->stride = map->w; 3238 map->buffer = map->ptr = malloc(map->stride * map->h); 3239 if (!map->buffer) 3240 return; 3241 3242 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no 3243 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless 3244 * invalidate is set, since we'll be writing the whole rectangle from our 3245 * temporary buffer back out. 3246 */ 3247 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) { 3248 uint8_t *untiled_s8_map = map->ptr; 3249 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_READ_BIT); 3250 unsigned int image_x, image_y; 3251 3252 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); 3253 3254 for (uint32_t y = 0; y < map->h; y++) { 3255 for (uint32_t x = 0; x < map->w; x++) { 3256 ptrdiff_t offset = intel_offset_S8(mt->surf.row_pitch, 3257 x + image_x + map->x, 3258 y + image_y + map->y, 3259 brw->has_swizzling); 3260 untiled_s8_map[y * map->w + x] = tiled_s8_map[offset]; 3261 } 3262 } 3263 3264 intel_miptree_unmap_raw(mt); 3265 3266 DBG("%s: %d,%d %dx%d from mt %p %d,%d = %p/%d\n", __func__, 3267 map->x, map->y, map->w, map->h, 3268 mt, map->x + image_x, map->y + image_y, map->ptr, map->stride); 3269 } else { 3270 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__, 3271 map->x, map->y, map->w, map->h, 3272 mt, map->ptr, map->stride); 3273 } 3274 } 3275 3276 static void 3277 intel_miptree_unmap_s8(struct brw_context *brw, 3278 struct intel_mipmap_tree *mt, 3279 struct intel_miptree_map *map, 3280 unsigned int level, 3281 unsigned int slice) 3282 { 3283 if (map->mode & GL_MAP_WRITE_BIT) { 3284 unsigned int image_x, image_y; 3285 uint8_t *untiled_s8_map = map->ptr; 3286 uint8_t *tiled_s8_map = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT); 3287 3288 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); 3289 3290 for (uint32_t y = 0; y < map->h; y++) { 3291 for (uint32_t x = 0; x < map->w; x++) { 3292 ptrdiff_t offset = intel_offset_S8(mt->surf.row_pitch, 3293 image_x + x + map->x, 3294 image_y + y + map->y, 3295 brw->has_swizzling); 3296 tiled_s8_map[offset] = untiled_s8_map[y * map->w + x]; 3297 } 3298 } 3299 3300 intel_miptree_unmap_raw(mt); 3301 } 3302 3303 free(map->buffer); 3304 } 3305 3306 static void 3307 intel_miptree_map_etc(struct brw_context *brw, 3308 struct intel_mipmap_tree *mt, 3309 struct intel_miptree_map *map, 3310 unsigned int level, 3311 unsigned int slice) 3312 { 3313 assert(mt->etc_format != MESA_FORMAT_NONE); 3314 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) { 3315 assert(mt->format == MESA_FORMAT_R8G8B8X8_UNORM); 3316 } 3317 3318 assert(map->mode & GL_MAP_WRITE_BIT); 3319 assert(map->mode & GL_MAP_INVALIDATE_RANGE_BIT); 3320 3321 map->stride = _mesa_format_row_stride(mt->etc_format, map->w); 3322 map->buffer = malloc(_mesa_format_image_size(mt->etc_format, 3323 map->w, map->h, 1)); 3324 map->ptr = map->buffer; 3325 } 3326 3327 static void 3328 intel_miptree_unmap_etc(struct brw_context *brw, 3329 struct intel_mipmap_tree *mt, 3330 struct intel_miptree_map *map, 3331 unsigned int level, 3332 unsigned int slice) 3333 { 3334 uint32_t image_x; 3335 uint32_t image_y; 3336 intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y); 3337 3338 image_x += map->x; 3339 image_y += map->y; 3340 3341 uint8_t *dst = intel_miptree_map_raw(brw, mt, GL_MAP_WRITE_BIT) 3342 + image_y * mt->surf.row_pitch 3343 + image_x * mt->cpp; 3344 3345 if (mt->etc_format == MESA_FORMAT_ETC1_RGB8) 3346 _mesa_etc1_unpack_rgba8888(dst, mt->surf.row_pitch, 3347 map->ptr, map->stride, 3348 map->w, map->h); 3349 else 3350 _mesa_unpack_etc2_format(dst, mt->surf.row_pitch, 3351 map->ptr, map->stride, 3352 map->w, map->h, mt->etc_format); 3353 3354 intel_miptree_unmap_raw(mt); 3355 free(map->buffer); 3356 } 3357 3358 /** 3359 * Mapping function for packed depth/stencil miptrees backed by real separate 3360 * miptrees for depth and stencil. 3361 * 3362 * On gen7, and to support HiZ pre-gen7, we have to have the stencil buffer 3363 * separate from the depth buffer. Yet at the GL API level, we have to expose 3364 * packed depth/stencil textures and FBO attachments, and Mesa core expects to 3365 * be able to map that memory for texture storage and glReadPixels-type 3366 * operations. We give Mesa core that access by mallocing a temporary and 3367 * copying the data between the actual backing store and the temporary. 3368 */ 3369 static void 3370 intel_miptree_map_depthstencil(struct brw_context *brw, 3371 struct intel_mipmap_tree *mt, 3372 struct intel_miptree_map *map, 3373 unsigned int level, unsigned int slice) 3374 { 3375 struct intel_mipmap_tree *z_mt = mt; 3376 struct intel_mipmap_tree *s_mt = mt->stencil_mt; 3377 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32; 3378 int packed_bpp = map_z32f_x24s8 ? 8 : 4; 3379 3380 map->stride = map->w * packed_bpp; 3381 map->buffer = map->ptr = malloc(map->stride * map->h); 3382 if (!map->buffer) 3383 return; 3384 3385 /* One of either READ_BIT or WRITE_BIT or both is set. READ_BIT implies no 3386 * INVALIDATE_RANGE_BIT. WRITE_BIT needs the original values read in unless 3387 * invalidate is set, since we'll be writing the whole rectangle from our 3388 * temporary buffer back out. 3389 */ 3390 if (!(map->mode & GL_MAP_INVALIDATE_RANGE_BIT)) { 3391 uint32_t *packed_map = map->ptr; 3392 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_READ_BIT); 3393 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_READ_BIT); 3394 unsigned int s_image_x, s_image_y; 3395 unsigned int z_image_x, z_image_y; 3396 3397 intel_miptree_get_image_offset(s_mt, level, slice, 3398 &s_image_x, &s_image_y); 3399 intel_miptree_get_image_offset(z_mt, level, slice, 3400 &z_image_x, &z_image_y); 3401 3402 for (uint32_t y = 0; y < map->h; y++) { 3403 for (uint32_t x = 0; x < map->w; x++) { 3404 int map_x = map->x + x, map_y = map->y + y; 3405 ptrdiff_t s_offset = intel_offset_S8(s_mt->surf.row_pitch, 3406 map_x + s_image_x, 3407 map_y + s_image_y, 3408 brw->has_swizzling); 3409 ptrdiff_t z_offset = ((map_y + z_image_y) * 3410 (z_mt->surf.row_pitch / 4) + 3411 (map_x + z_image_x)); 3412 uint8_t s = s_map[s_offset]; 3413 uint32_t z = z_map[z_offset]; 3414 3415 if (map_z32f_x24s8) { 3416 packed_map[(y * map->w + x) * 2 + 0] = z; 3417 packed_map[(y * map->w + x) * 2 + 1] = s; 3418 } else { 3419 packed_map[y * map->w + x] = (s << 24) | (z & 0x00ffffff); 3420 } 3421 } 3422 } 3423 3424 intel_miptree_unmap_raw(s_mt); 3425 intel_miptree_unmap_raw(z_mt); 3426 3427 DBG("%s: %d,%d %dx%d from z mt %p %d,%d, s mt %p %d,%d = %p/%d\n", 3428 __func__, 3429 map->x, map->y, map->w, map->h, 3430 z_mt, map->x + z_image_x, map->y + z_image_y, 3431 s_mt, map->x + s_image_x, map->y + s_image_y, 3432 map->ptr, map->stride); 3433 } else { 3434 DBG("%s: %d,%d %dx%d from mt %p = %p/%d\n", __func__, 3435 map->x, map->y, map->w, map->h, 3436 mt, map->ptr, map->stride); 3437 } 3438 } 3439 3440 static void 3441 intel_miptree_unmap_depthstencil(struct brw_context *brw, 3442 struct intel_mipmap_tree *mt, 3443 struct intel_miptree_map *map, 3444 unsigned int level, 3445 unsigned int slice) 3446 { 3447 struct intel_mipmap_tree *z_mt = mt; 3448 struct intel_mipmap_tree *s_mt = mt->stencil_mt; 3449 bool map_z32f_x24s8 = mt->format == MESA_FORMAT_Z_FLOAT32; 3450 3451 if (map->mode & GL_MAP_WRITE_BIT) { 3452 uint32_t *packed_map = map->ptr; 3453 uint8_t *s_map = intel_miptree_map_raw(brw, s_mt, GL_MAP_WRITE_BIT); 3454 uint32_t *z_map = intel_miptree_map_raw(brw, z_mt, GL_MAP_WRITE_BIT); 3455 unsigned int s_image_x, s_image_y; 3456 unsigned int z_image_x, z_image_y; 3457 3458 intel_miptree_get_image_offset(s_mt, level, slice, 3459 &s_image_x, &s_image_y); 3460 intel_miptree_get_image_offset(z_mt, level, slice, 3461 &z_image_x, &z_image_y); 3462 3463 for (uint32_t y = 0; y < map->h; y++) { 3464 for (uint32_t x = 0; x < map->w; x++) { 3465 ptrdiff_t s_offset = intel_offset_S8(s_mt->surf.row_pitch, 3466 x + s_image_x + map->x, 3467 y + s_image_y + map->y, 3468 brw->has_swizzling); 3469 ptrdiff_t z_offset = ((y + z_image_y + map->y) * 3470 (z_mt->surf.row_pitch / 4) + 3471 (x + z_image_x + map->x)); 3472 3473 if (map_z32f_x24s8) { 3474 z_map[z_offset] = packed_map[(y * map->w + x) * 2 + 0]; 3475 s_map[s_offset] = packed_map[(y * map->w + x) * 2 + 1]; 3476 } else { 3477 uint32_t packed = packed_map[y * map->w + x]; 3478 s_map[s_offset] = packed >> 24; 3479 z_map[z_offset] = packed; 3480 } 3481 } 3482 } 3483 3484 intel_miptree_unmap_raw(s_mt); 3485 intel_miptree_unmap_raw(z_mt); 3486 3487 DBG("%s: %d,%d %dx%d from z mt %p (%s) %d,%d, s mt %p %d,%d = %p/%d\n", 3488 __func__, 3489 map->x, map->y, map->w, map->h, 3490 z_mt, _mesa_get_format_name(z_mt->format), 3491 map->x + z_image_x, map->y + z_image_y, 3492 s_mt, map->x + s_image_x, map->y + s_image_y, 3493 map->ptr, map->stride); 3494 } 3495 3496 free(map->buffer); 3497 } 3498 3499 /** 3500 * Create and attach a map to the miptree at (level, slice). Return the 3501 * attached map. 3502 */ 3503 static struct intel_miptree_map* 3504 intel_miptree_attach_map(struct intel_mipmap_tree *mt, 3505 unsigned int level, 3506 unsigned int slice, 3507 unsigned int x, 3508 unsigned int y, 3509 unsigned int w, 3510 unsigned int h, 3511 GLbitfield mode) 3512 { 3513 struct intel_miptree_map *map = calloc(1, sizeof(*map)); 3514 3515 if (!map) 3516 return NULL; 3517 3518 assert(mt->level[level].slice[slice].map == NULL); 3519 mt->level[level].slice[slice].map = map; 3520 3521 map->mode = mode; 3522 map->x = x; 3523 map->y = y; 3524 map->w = w; 3525 map->h = h; 3526 3527 return map; 3528 } 3529 3530 /** 3531 * Release the map at (level, slice). 3532 */ 3533 static void 3534 intel_miptree_release_map(struct intel_mipmap_tree *mt, 3535 unsigned int level, 3536 unsigned int slice) 3537 { 3538 struct intel_miptree_map **map; 3539 3540 map = &mt->level[level].slice[slice].map; 3541 free(*map); 3542 *map = NULL; 3543 } 3544 3545 static bool 3546 can_blit_slice(struct intel_mipmap_tree *mt, 3547 unsigned int level, unsigned int slice) 3548 { 3549 /* See intel_miptree_blit() for details on the 32k pitch limit. */ 3550 if (mt->surf.row_pitch >= 32768) 3551 return false; 3552 3553 return true; 3554 } 3555 3556 static bool 3557 use_intel_mipree_map_blit(struct brw_context *brw, 3558 struct intel_mipmap_tree *mt, 3559 GLbitfield mode, 3560 unsigned int level, 3561 unsigned int slice) 3562 { 3563 const struct gen_device_info *devinfo = &brw->screen->devinfo; 3564 3565 if (devinfo->has_llc && 3566 /* It's probably not worth swapping to the blit ring because of 3567 * all the overhead involved. 3568 */ 3569 !(mode & GL_MAP_WRITE_BIT) && 3570 !mt->compressed && 3571 (mt->surf.tiling == ISL_TILING_X || 3572 /* Prior to Sandybridge, the blitter can't handle Y tiling */ 3573 (devinfo->gen >= 6 && mt->surf.tiling == ISL_TILING_Y0) || 3574 /* Fast copy blit on skl+ supports all tiling formats. */ 3575 devinfo->gen >= 9) && 3576 can_blit_slice(mt, level, slice)) 3577 return true; 3578 3579 if (mt->surf.tiling != ISL_TILING_LINEAR && 3580 mt->bo->size >= brw->max_gtt_map_object_size) { 3581 assert(can_blit_slice(mt, level, slice)); 3582 return true; 3583 } 3584 3585 return false; 3586 } 3587 3588 /** 3589 * Parameter \a out_stride has type ptrdiff_t not because the buffer stride may 3590 * exceed 32 bits but to diminish the likelihood subtle bugs in pointer 3591 * arithmetic overflow. 3592 * 3593 * If you call this function and use \a out_stride, then you're doing pointer 3594 * arithmetic on \a out_ptr. The type of \a out_stride doesn't prevent all 3595 * bugs. The caller must still take care to avoid 32-bit overflow errors in 3596 * all arithmetic expressions that contain buffer offsets and pixel sizes, 3597 * which usually have type uint32_t or GLuint. 3598 */ 3599 void 3600 intel_miptree_map(struct brw_context *brw, 3601 struct intel_mipmap_tree *mt, 3602 unsigned int level, 3603 unsigned int slice, 3604 unsigned int x, 3605 unsigned int y, 3606 unsigned int w, 3607 unsigned int h, 3608 GLbitfield mode, 3609 void **out_ptr, 3610 ptrdiff_t *out_stride) 3611 { 3612 struct intel_miptree_map *map; 3613 3614 assert(mt->surf.samples == 1); 3615 3616 map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode); 3617 if (!map){ 3618 *out_ptr = NULL; 3619 *out_stride = 0; 3620 return; 3621 } 3622 3623 intel_miptree_access_raw(brw, mt, level, slice, 3624 map->mode & GL_MAP_WRITE_BIT); 3625 3626 if (mt->format == MESA_FORMAT_S_UINT8) { 3627 intel_miptree_map_s8(brw, mt, map, level, slice); 3628 } else if (mt->etc_format != MESA_FORMAT_NONE && 3629 !(mode & BRW_MAP_DIRECT_BIT)) { 3630 intel_miptree_map_etc(brw, mt, map, level, slice); 3631 } else if (mt->stencil_mt && !(mode & BRW_MAP_DIRECT_BIT)) { 3632 intel_miptree_map_depthstencil(brw, mt, map, level, slice); 3633 } else if (use_intel_mipree_map_blit(brw, mt, mode, level, slice)) { 3634 intel_miptree_map_blit(brw, mt, map, level, slice); 3635 #if defined(USE_SSE41) 3636 } else if (!(mode & GL_MAP_WRITE_BIT) && 3637 !mt->compressed && cpu_has_sse4_1 && 3638 (mt->surf.row_pitch % 16 == 0)) { 3639 intel_miptree_map_movntdqa(brw, mt, map, level, slice); 3640 #endif 3641 } else { 3642 intel_miptree_map_gtt(brw, mt, map, level, slice); 3643 } 3644 3645 *out_ptr = map->ptr; 3646 *out_stride = map->stride; 3647 3648 if (map->ptr == NULL) 3649 intel_miptree_release_map(mt, level, slice); 3650 } 3651 3652 void 3653 intel_miptree_unmap(struct brw_context *brw, 3654 struct intel_mipmap_tree *mt, 3655 unsigned int level, 3656 unsigned int slice) 3657 { 3658 struct intel_miptree_map *map = mt->level[level].slice[slice].map; 3659 3660 assert(mt->surf.samples == 1); 3661 3662 if (!map) 3663 return; 3664 3665 DBG("%s: mt %p (%s) level %d slice %d\n", __func__, 3666 mt, _mesa_get_format_name(mt->format), level, slice); 3667 3668 if (mt->format == MESA_FORMAT_S_UINT8) { 3669 intel_miptree_unmap_s8(brw, mt, map, level, slice); 3670 } else if (mt->etc_format != MESA_FORMAT_NONE && 3671 !(map->mode & BRW_MAP_DIRECT_BIT)) { 3672 intel_miptree_unmap_etc(brw, mt, map, level, slice); 3673 } else if (mt->stencil_mt && !(map->mode & BRW_MAP_DIRECT_BIT)) { 3674 intel_miptree_unmap_depthstencil(brw, mt, map, level, slice); 3675 } else if (map->linear_mt) { 3676 intel_miptree_unmap_blit(brw, mt, map, level, slice); 3677 #if defined(USE_SSE41) 3678 } else if (map->buffer && cpu_has_sse4_1) { 3679 intel_miptree_unmap_movntdqa(brw, mt, map, level, slice); 3680 #endif 3681 } else { 3682 intel_miptree_unmap_gtt(mt); 3683 } 3684 3685 intel_miptree_release_map(mt, level, slice); 3686 } 3687 3688 enum isl_surf_dim 3689 get_isl_surf_dim(GLenum target) 3690 { 3691 switch (target) { 3692 case GL_TEXTURE_1D: 3693 case GL_TEXTURE_1D_ARRAY: 3694 return ISL_SURF_DIM_1D; 3695 3696 case GL_TEXTURE_2D: 3697 case GL_TEXTURE_2D_ARRAY: 3698 case GL_TEXTURE_RECTANGLE: 3699 case GL_TEXTURE_CUBE_MAP: 3700 case GL_TEXTURE_CUBE_MAP_ARRAY: 3701 case GL_TEXTURE_2D_MULTISAMPLE: 3702 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY: 3703 case GL_TEXTURE_EXTERNAL_OES: 3704 return ISL_SURF_DIM_2D; 3705 3706 case GL_TEXTURE_3D: 3707 return ISL_SURF_DIM_3D; 3708 } 3709 3710 unreachable("Invalid texture target"); 3711 } 3712 3713 enum isl_dim_layout 3714 get_isl_dim_layout(const struct gen_device_info *devinfo, 3715 enum isl_tiling tiling, GLenum target) 3716 { 3717 switch (target) { 3718 case GL_TEXTURE_1D: 3719 case GL_TEXTURE_1D_ARRAY: 3720 return (devinfo->gen >= 9 && tiling == ISL_TILING_LINEAR ? 3721 ISL_DIM_LAYOUT_GEN9_1D : ISL_DIM_LAYOUT_GEN4_2D); 3722 3723 case GL_TEXTURE_2D: 3724 case GL_TEXTURE_2D_ARRAY: 3725 case GL_TEXTURE_RECTANGLE: 3726 case GL_TEXTURE_2D_MULTISAMPLE: 3727 case GL_TEXTURE_2D_MULTISAMPLE_ARRAY: 3728 case GL_TEXTURE_EXTERNAL_OES: 3729 return ISL_DIM_LAYOUT_GEN4_2D; 3730 3731 case GL_TEXTURE_CUBE_MAP: 3732 case GL_TEXTURE_CUBE_MAP_ARRAY: 3733 return (devinfo->gen == 4 ? ISL_DIM_LAYOUT_GEN4_3D : 3734 ISL_DIM_LAYOUT_GEN4_2D); 3735 3736 case GL_TEXTURE_3D: 3737 return (devinfo->gen >= 9 ? 3738 ISL_DIM_LAYOUT_GEN4_2D : ISL_DIM_LAYOUT_GEN4_3D); 3739 } 3740 3741 unreachable("Invalid texture target"); 3742 } 3743 3744 enum isl_aux_usage 3745 intel_miptree_get_aux_isl_usage(const struct brw_context *brw, 3746 const struct intel_mipmap_tree *mt) 3747 { 3748 if (mt->hiz_buf) 3749 return ISL_AUX_USAGE_HIZ; 3750 3751 if (!mt->mcs_buf) 3752 return ISL_AUX_USAGE_NONE; 3753 3754 return mt->aux_usage; 3755 } 3756