1 /* 2 * Copyright 2006 VMware, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include "main/enums.h" 27 #include "main/imports.h" 28 #include "main/macros.h" 29 #include "main/mtypes.h" 30 #include "main/fbobject.h" 31 #include "main/framebuffer.h" 32 #include "main/renderbuffer.h" 33 #include "main/context.h" 34 #include "main/teximage.h" 35 #include "main/image.h" 36 #include "main/condrender.h" 37 #include "util/hash_table.h" 38 #include "util/set.h" 39 40 #include "swrast/swrast.h" 41 #include "drivers/common/meta.h" 42 43 #include "intel_batchbuffer.h" 44 #include "intel_buffers.h" 45 #include "intel_blit.h" 46 #include "intel_fbo.h" 47 #include "intel_mipmap_tree.h" 48 #include "intel_image.h" 49 #include "intel_screen.h" 50 #include "intel_tex.h" 51 #include "brw_context.h" 52 #include "brw_defines.h" 53 54 #define FILE_DEBUG_FLAG DEBUG_FBO 55 56 /** Called by gl_renderbuffer::Delete() */ 57 static void 58 intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb) 59 { 60 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 61 62 assert(irb); 63 64 intel_miptree_release(&irb->mt); 65 intel_miptree_release(&irb->singlesample_mt); 66 67 _mesa_delete_renderbuffer(ctx, rb); 68 } 69 70 /** 71 * \brief Downsample a winsys renderbuffer from mt to singlesample_mt. 72 * 73 * If the miptree needs no downsample, then skip. 74 */ 75 void 76 intel_renderbuffer_downsample(struct brw_context *brw, 77 struct intel_renderbuffer *irb) 78 { 79 if (!irb->need_downsample) 80 return; 81 intel_miptree_updownsample(brw, irb->mt, irb->singlesample_mt); 82 irb->need_downsample = false; 83 } 84 85 /** 86 * \brief Upsample a winsys renderbuffer from singlesample_mt to mt. 87 * 88 * The upsample is done unconditionally. 89 */ 90 void 91 intel_renderbuffer_upsample(struct brw_context *brw, 92 struct intel_renderbuffer *irb) 93 { 94 assert(!irb->need_downsample); 95 96 intel_miptree_updownsample(brw, irb->singlesample_mt, irb->mt); 97 } 98 99 /** 100 * \see dd_function_table::MapRenderbuffer 101 */ 102 static void 103 intel_map_renderbuffer(struct gl_context *ctx, 104 struct gl_renderbuffer *rb, 105 GLuint x, GLuint y, GLuint w, GLuint h, 106 GLbitfield mode, 107 GLubyte **out_map, 108 GLint *out_stride) 109 { 110 struct brw_context *brw = brw_context(ctx); 111 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb; 112 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 113 struct intel_mipmap_tree *mt; 114 void *map; 115 ptrdiff_t stride; 116 117 if (srb->Buffer) { 118 /* this is a malloc'd renderbuffer (accum buffer), not an irb */ 119 GLint bpp = _mesa_get_format_bytes(rb->Format); 120 GLint rowStride = srb->RowStride; 121 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp; 122 *out_stride = rowStride; 123 return; 124 } 125 126 intel_prepare_render(brw); 127 128 /* The MapRenderbuffer API should always return a single-sampled mapping. 129 * The case we are asked to map multisampled RBs is in glReadPixels() (or 130 * swrast paths like glCopyTexImage()) from a window-system MSAA buffer, 131 * and GL expects an automatic resolve to happen. 132 * 133 * If it's a color miptree, there is a ->singlesample_mt which wraps the 134 * actual window system renderbuffer (which we may resolve to at any time), 135 * while the miptree itself is our driver-private allocation. If it's a 136 * depth or stencil miptree, we have a private MSAA buffer and no shared 137 * singlesample buffer, and since we don't expect anybody to ever actually 138 * resolve it, we just make a temporary singlesample buffer now when we 139 * have to. 140 */ 141 if (rb->NumSamples > 1) { 142 if (!irb->singlesample_mt) { 143 irb->singlesample_mt = 144 intel_miptree_create_for_renderbuffer(brw, irb->mt->format, 145 rb->Width, rb->Height, 146 1 /*num_samples*/); 147 if (!irb->singlesample_mt) 148 goto fail; 149 irb->singlesample_mt_is_tmp = true; 150 irb->need_downsample = true; 151 } 152 153 intel_renderbuffer_downsample(brw, irb); 154 mt = irb->singlesample_mt; 155 156 irb->need_map_upsample = mode & GL_MAP_WRITE_BIT; 157 } else { 158 mt = irb->mt; 159 } 160 161 /* For a window-system renderbuffer, we need to flip the mapping we receive 162 * upside-down. So we need to ask for a rectangle on flipped vertically, and 163 * we then return a pointer to the bottom of it with a negative stride. 164 */ 165 if (rb->Name == 0) { 166 y = rb->Height - y - h; 167 } 168 169 intel_miptree_map(brw, mt, irb->mt_level, irb->mt_layer, 170 x, y, w, h, mode, &map, &stride); 171 172 if (rb->Name == 0) { 173 map += (h - 1) * stride; 174 stride = -stride; 175 } 176 177 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%"PRIdPTR"\n", 178 __func__, rb->Name, _mesa_get_format_name(rb->Format), 179 x, y, w, h, map, stride); 180 181 *out_map = map; 182 *out_stride = stride; 183 return; 184 185 fail: 186 *out_map = NULL; 187 *out_stride = 0; 188 } 189 190 /** 191 * \see dd_function_table::UnmapRenderbuffer 192 */ 193 static void 194 intel_unmap_renderbuffer(struct gl_context *ctx, 195 struct gl_renderbuffer *rb) 196 { 197 struct brw_context *brw = brw_context(ctx); 198 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb; 199 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 200 struct intel_mipmap_tree *mt; 201 202 DBG("%s: rb %d (%s)\n", __func__, 203 rb->Name, _mesa_get_format_name(rb->Format)); 204 205 if (srb->Buffer) { 206 /* this is a malloc'd renderbuffer (accum buffer) */ 207 /* nothing to do */ 208 return; 209 } 210 211 if (rb->NumSamples > 1) { 212 mt = irb->singlesample_mt; 213 } else { 214 mt = irb->mt; 215 } 216 217 intel_miptree_unmap(brw, mt, irb->mt_level, irb->mt_layer); 218 219 if (irb->need_map_upsample) { 220 intel_renderbuffer_upsample(brw, irb); 221 irb->need_map_upsample = false; 222 } 223 224 if (irb->singlesample_mt_is_tmp) 225 intel_miptree_release(&irb->singlesample_mt); 226 } 227 228 229 /** 230 * Round up the requested multisample count to the next supported sample size. 231 */ 232 unsigned 233 intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples) 234 { 235 const int *msaa_modes = intel_supported_msaa_modes(intel); 236 int quantized_samples = 0; 237 238 for (int i = 0; msaa_modes[i] != -1; ++i) { 239 if (msaa_modes[i] >= num_samples) 240 quantized_samples = msaa_modes[i]; 241 else 242 break; 243 } 244 245 return quantized_samples; 246 } 247 248 static mesa_format 249 intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat) 250 { 251 struct brw_context *brw = brw_context(ctx); 252 MAYBE_UNUSED const struct gen_device_info *devinfo = &brw->screen->devinfo; 253 254 switch (internalFormat) { 255 default: 256 /* Use the same format-choice logic as for textures. 257 * Renderbuffers aren't any different from textures for us, 258 * except they're less useful because you can't texture with 259 * them. 260 */ 261 return ctx->Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D, 262 internalFormat, 263 GL_NONE, GL_NONE); 264 break; 265 case GL_STENCIL_INDEX: 266 case GL_STENCIL_INDEX1_EXT: 267 case GL_STENCIL_INDEX4_EXT: 268 case GL_STENCIL_INDEX8_EXT: 269 case GL_STENCIL_INDEX16_EXT: 270 /* These aren't actual texture formats, so force them here. */ 271 if (brw->has_separate_stencil) { 272 return MESA_FORMAT_S_UINT8; 273 } else { 274 assert(!devinfo->must_use_separate_stencil); 275 return MESA_FORMAT_Z24_UNORM_S8_UINT; 276 } 277 } 278 } 279 280 static GLboolean 281 intel_alloc_private_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 282 GLenum internalFormat, 283 GLuint width, GLuint height) 284 { 285 struct brw_context *brw = brw_context(ctx); 286 struct intel_screen *screen = brw->screen; 287 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 288 289 assert(rb->Format != MESA_FORMAT_NONE); 290 291 rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples); 292 rb->Width = width; 293 rb->Height = height; 294 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format); 295 296 intel_miptree_release(&irb->mt); 297 298 DBG("%s: %s: %s (%dx%d)\n", __func__, 299 _mesa_enum_to_string(internalFormat), 300 _mesa_get_format_name(rb->Format), width, height); 301 302 if (width == 0 || height == 0) 303 return true; 304 305 irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format, 306 width, height, 307 MAX2(rb->NumSamples, 1)); 308 if (!irb->mt) 309 return false; 310 311 irb->layer_count = 1; 312 313 return true; 314 } 315 316 /** 317 * Called via glRenderbufferStorageEXT() to set the format and allocate 318 * storage for a user-created renderbuffer. 319 */ 320 static GLboolean 321 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 322 GLenum internalFormat, 323 GLuint width, GLuint height) 324 { 325 rb->Format = intel_renderbuffer_format(ctx, internalFormat); 326 return intel_alloc_private_renderbuffer_storage(ctx, rb, internalFormat, width, height); 327 } 328 329 static void 330 intel_image_target_renderbuffer_storage(struct gl_context *ctx, 331 struct gl_renderbuffer *rb, 332 void *image_handle) 333 { 334 struct brw_context *brw = brw_context(ctx); 335 struct intel_renderbuffer *irb; 336 __DRIscreen *dri_screen = brw->screen->driScrnPriv; 337 __DRIimage *image; 338 339 image = dri_screen->dri2.image->lookupEGLImage(dri_screen, image_handle, 340 dri_screen->loaderPrivate); 341 if (image == NULL) 342 return; 343 344 if (image->planar_format && image->planar_format->nplanes > 1) { 345 _mesa_error(ctx, GL_INVALID_OPERATION, 346 "glEGLImageTargetRenderbufferStorage(planar buffers are not " 347 "supported as render targets.)"); 348 return; 349 } 350 351 /* __DRIimage is opaque to the core so it has to be checked here */ 352 if (!brw->mesa_format_supports_render[image->format]) { 353 _mesa_error(ctx, GL_INVALID_OPERATION, 354 "glEGLImageTargetRenderbufferStorage(unsupported image format)"); 355 return; 356 } 357 358 irb = intel_renderbuffer(rb); 359 intel_miptree_release(&irb->mt); 360 361 /* Disable creation of the miptree's aux buffers because the driver exposes 362 * no EGL API to manage them. That is, there is no API for resolving the aux 363 * buffer's content to the main buffer nor for invalidating the aux buffer's 364 * content. 365 */ 366 irb->mt = intel_miptree_create_for_dri_image(brw, image, GL_TEXTURE_2D, 367 image->format, false); 368 if (!irb->mt) 369 return; 370 371 rb->InternalFormat = image->internal_format; 372 rb->Width = image->width; 373 rb->Height = image->height; 374 rb->Format = image->format; 375 rb->_BaseFormat = _mesa_get_format_base_format(image->format); 376 rb->NeedsFinishRenderTexture = true; 377 irb->layer_count = 1; 378 } 379 380 /** 381 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a 382 * window system framebuffer is resized. 383 * 384 * Any actual buffer reallocations for hardware renderbuffers (which would 385 * have triggered _mesa_resize_framebuffer()) were done by 386 * intel_process_dri2_buffer(). 387 */ 388 static GLboolean 389 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 390 GLenum internalFormat, GLuint width, GLuint height) 391 { 392 (void) ctx; 393 assert(rb->Name == 0); 394 rb->Width = width; 395 rb->Height = height; 396 rb->InternalFormat = internalFormat; 397 398 return true; 399 } 400 401 /** Dummy function for gl_renderbuffer::AllocStorage() */ 402 static GLboolean 403 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 404 GLenum internalFormat, GLuint width, GLuint height) 405 { 406 (void) rb; 407 (void) internalFormat; 408 (void) width; 409 (void) height; 410 _mesa_problem(ctx, "intel_nop_alloc_storage should never be called."); 411 return false; 412 } 413 414 /** 415 * Create an intel_renderbuffer for a __DRIdrawable. This function is 416 * unrelated to GL renderbuffers (that is, those created by 417 * glGenRenderbuffers). 418 * 419 * \param num_samples must be quantized. 420 */ 421 struct intel_renderbuffer * 422 intel_create_winsys_renderbuffer(struct intel_screen *screen, 423 mesa_format format, unsigned num_samples) 424 { 425 struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer); 426 if (!irb) 427 return NULL; 428 429 struct gl_renderbuffer *rb = &irb->Base.Base; 430 irb->layer_count = 1; 431 432 _mesa_init_renderbuffer(rb, 0); 433 rb->ClassID = INTEL_RB_CLASS; 434 rb->NumSamples = num_samples; 435 436 /* The base format and internal format must be derived from the user-visible 437 * format (that is, the gl_config's format), even if we internally use 438 * choose a different format for the renderbuffer. Otherwise, rendering may 439 * use incorrect channel write masks. 440 */ 441 rb->_BaseFormat = _mesa_get_format_base_format(format); 442 rb->InternalFormat = rb->_BaseFormat; 443 444 rb->Format = format; 445 if (!screen->mesa_format_supports_render[rb->Format]) { 446 /* The glRenderbufferStorage paths in core Mesa detect if the driver 447 * does not support the user-requested format, and then searches for 448 * a falback format. The DRI code bypasses core Mesa, though. So we do 449 * the fallbacks here. 450 * 451 * We must support MESA_FORMAT_R8G8B8X8 on Android because the Android 452 * framework requires HAL_PIXEL_FORMAT_RGBX8888 winsys surfaces. 453 */ 454 rb->Format = _mesa_format_fallback_rgbx_to_rgba(rb->Format); 455 assert(screen->mesa_format_supports_render[rb->Format]); 456 } 457 458 /* intel-specific methods */ 459 rb->Delete = intel_delete_renderbuffer; 460 rb->AllocStorage = intel_alloc_window_storage; 461 462 return irb; 463 } 464 465 /** 466 * Private window-system buffers (as opposed to ones shared with the display 467 * server created with intel_create_winsys_renderbuffer()) are most similar in their 468 * handling to user-created renderbuffers, but they have a resize handler that 469 * may be called at intel_update_renderbuffers() time. 470 * 471 * \param num_samples must be quantized. 472 */ 473 struct intel_renderbuffer * 474 intel_create_private_renderbuffer(struct intel_screen *screen, 475 mesa_format format, unsigned num_samples) 476 { 477 struct intel_renderbuffer *irb; 478 479 irb = intel_create_winsys_renderbuffer(screen, format, num_samples); 480 irb->Base.Base.AllocStorage = intel_alloc_private_renderbuffer_storage; 481 482 return irb; 483 } 484 485 /** 486 * Create a new renderbuffer object. 487 * Typically called via glBindRenderbufferEXT(). 488 */ 489 static struct gl_renderbuffer * 490 intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 491 { 492 struct intel_renderbuffer *irb; 493 struct gl_renderbuffer *rb; 494 495 irb = CALLOC_STRUCT(intel_renderbuffer); 496 if (!irb) { 497 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 498 return NULL; 499 } 500 501 rb = &irb->Base.Base; 502 503 _mesa_init_renderbuffer(rb, name); 504 rb->ClassID = INTEL_RB_CLASS; 505 506 /* intel-specific methods */ 507 rb->Delete = intel_delete_renderbuffer; 508 rb->AllocStorage = intel_alloc_renderbuffer_storage; 509 /* span routines set in alloc_storage function */ 510 511 return rb; 512 } 513 514 static bool 515 intel_renderbuffer_update_wrapper(struct brw_context *brw, 516 struct intel_renderbuffer *irb, 517 struct gl_texture_image *image, 518 uint32_t layer, 519 bool layered) 520 { 521 struct gl_renderbuffer *rb = &irb->Base.Base; 522 struct intel_texture_image *intel_image = intel_texture_image(image); 523 struct intel_mipmap_tree *mt = intel_image->mt; 524 int level = image->Level; 525 526 rb->AllocStorage = intel_nop_alloc_storage; 527 528 /* adjust for texture view parameters */ 529 layer += image->TexObject->MinLayer; 530 level += image->TexObject->MinLevel; 531 532 intel_miptree_check_level_layer(mt, level, layer); 533 irb->mt_level = level; 534 irb->mt_layer = layer; 535 536 if (!layered) { 537 irb->layer_count = 1; 538 } else if (mt->target != GL_TEXTURE_3D && image->TexObject->NumLayers > 0) { 539 irb->layer_count = image->TexObject->NumLayers; 540 } else { 541 irb->layer_count = mt->surf.dim == ISL_SURF_DIM_3D ? 542 minify(mt->surf.logical_level0_px.depth, level) : 543 mt->surf.logical_level0_px.array_len; 544 } 545 546 intel_miptree_reference(&irb->mt, mt); 547 548 intel_renderbuffer_set_draw_offset(irb); 549 550 return true; 551 } 552 553 void 554 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb) 555 { 556 unsigned int dst_x, dst_y; 557 558 /* compute offset of the particular 2D image within the texture region */ 559 intel_miptree_get_image_offset(irb->mt, 560 irb->mt_level, 561 irb->mt_layer, 562 &dst_x, &dst_y); 563 564 irb->draw_x = dst_x; 565 irb->draw_y = dst_y; 566 } 567 568 /** 569 * Called by glFramebufferTexture[123]DEXT() (and other places) to 570 * prepare for rendering into texture memory. This might be called 571 * many times to choose different texture levels, cube faces, etc 572 * before intel_finish_render_texture() is ever called. 573 */ 574 static void 575 intel_render_texture(struct gl_context * ctx, 576 struct gl_framebuffer *fb, 577 struct gl_renderbuffer_attachment *att) 578 { 579 struct brw_context *brw = brw_context(ctx); 580 struct gl_renderbuffer *rb = att->Renderbuffer; 581 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 582 struct gl_texture_image *image = rb->TexImage; 583 struct intel_texture_image *intel_image = intel_texture_image(image); 584 struct intel_mipmap_tree *mt = intel_image->mt; 585 int layer; 586 587 (void) fb; 588 589 if (att->CubeMapFace > 0) { 590 assert(att->Zoffset == 0); 591 layer = att->CubeMapFace; 592 } else { 593 layer = att->Zoffset; 594 } 595 596 if (!intel_image->mt) { 597 /* Fallback on drawing to a texture that doesn't have a miptree 598 * (has a border, width/height 0, etc.) 599 */ 600 _swrast_render_texture(ctx, fb, att); 601 return; 602 } 603 604 intel_miptree_check_level_layer(mt, att->TextureLevel, layer); 605 606 if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer, att->Layered)) { 607 _swrast_render_texture(ctx, fb, att); 608 return; 609 } 610 611 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n", 612 _mesa_get_format_name(image->TexFormat), 613 att->Texture->Name, image->Width, image->Height, image->Depth, 614 rb->RefCount); 615 } 616 617 618 #define fbo_incomplete(fb, ...) do { \ 619 static GLuint msg_id = 0; \ 620 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \ 621 _mesa_gl_debug(ctx, &msg_id, \ 622 MESA_DEBUG_SOURCE_API, \ 623 MESA_DEBUG_TYPE_OTHER, \ 624 MESA_DEBUG_SEVERITY_MEDIUM, \ 625 __VA_ARGS__); \ 626 } \ 627 DBG(__VA_ARGS__); \ 628 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \ 629 } while (0) 630 631 /** 632 * Do additional "completeness" testing of a framebuffer object. 633 */ 634 static void 635 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 636 { 637 struct brw_context *brw = brw_context(ctx); 638 const struct gen_device_info *devinfo = &brw->screen->devinfo; 639 struct intel_renderbuffer *depthRb = 640 intel_get_renderbuffer(fb, BUFFER_DEPTH); 641 struct intel_renderbuffer *stencilRb = 642 intel_get_renderbuffer(fb, BUFFER_STENCIL); 643 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL; 644 unsigned i; 645 646 DBG("%s() on fb %p (%s)\n", __func__, 647 fb, (fb == ctx->DrawBuffer ? "drawbuffer" : 648 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer"))); 649 650 if (depthRb) 651 depth_mt = depthRb->mt; 652 if (stencilRb) { 653 stencil_mt = stencilRb->mt; 654 if (stencil_mt->stencil_mt) 655 stencil_mt = stencil_mt->stencil_mt; 656 } 657 658 if (depth_mt && stencil_mt) { 659 if (devinfo->gen >= 6) { 660 const unsigned d_width = depth_mt->surf.phys_level0_sa.width; 661 const unsigned d_height = depth_mt->surf.phys_level0_sa.height; 662 const unsigned d_depth = depth_mt->surf.dim == ISL_SURF_DIM_3D ? 663 depth_mt->surf.phys_level0_sa.depth : 664 depth_mt->surf.phys_level0_sa.array_len; 665 666 const unsigned s_width = stencil_mt->surf.phys_level0_sa.width; 667 const unsigned s_height = stencil_mt->surf.phys_level0_sa.height; 668 const unsigned s_depth = stencil_mt->surf.dim == ISL_SURF_DIM_3D ? 669 stencil_mt->surf.phys_level0_sa.depth : 670 stencil_mt->surf.phys_level0_sa.array_len; 671 672 /* For gen >= 6, we are using the lod/minimum-array-element fields 673 * and supporting layered rendering. This means that we must restrict 674 * the depth & stencil attachments to match in various more retrictive 675 * ways. (width, height, depth, LOD and layer) 676 */ 677 if (d_width != s_width || 678 d_height != s_height || 679 d_depth != s_depth || 680 depthRb->mt_level != stencilRb->mt_level || 681 depthRb->mt_layer != stencilRb->mt_layer) { 682 fbo_incomplete(fb, 683 "FBO incomplete: depth and stencil must match in" 684 "width, height, depth, LOD and layer\n"); 685 } 686 } 687 if (depth_mt == stencil_mt) { 688 /* For true packed depth/stencil (not faked on prefers-separate-stencil 689 * hardware) we need to be sure they're the same level/layer, since 690 * we'll be emitting a single packet describing the packed setup. 691 */ 692 if (depthRb->mt_level != stencilRb->mt_level || 693 depthRb->mt_layer != stencilRb->mt_layer) { 694 fbo_incomplete(fb, 695 "FBO incomplete: depth image level/layer %d/%d != " 696 "stencil image %d/%d\n", 697 depthRb->mt_level, 698 depthRb->mt_layer, 699 stencilRb->mt_level, 700 stencilRb->mt_layer); 701 } 702 } else { 703 if (!brw->has_separate_stencil) { 704 fbo_incomplete(fb, "FBO incomplete: separate stencil " 705 "unsupported\n"); 706 } 707 if (stencil_mt->format != MESA_FORMAT_S_UINT8) { 708 fbo_incomplete(fb, "FBO incomplete: separate stencil is %s " 709 "instead of S8\n", 710 _mesa_get_format_name(stencil_mt->format)); 711 } 712 if (devinfo->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) { 713 /* Before Gen7, separate depth and stencil buffers can be used 714 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2, 715 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable: 716 * [DevSNB]: This field must be set to the same value (enabled 717 * or disabled) as Hierarchical Depth Buffer Enable. 718 */ 719 fbo_incomplete(fb, "FBO incomplete: separate stencil " 720 "without HiZ\n"); 721 } 722 } 723 } 724 725 for (i = 0; i < ARRAY_SIZE(fb->Attachment); i++) { 726 struct gl_renderbuffer *rb; 727 struct intel_renderbuffer *irb; 728 729 if (fb->Attachment[i].Type == GL_NONE) 730 continue; 731 732 /* A supported attachment will have a Renderbuffer set either 733 * from being a Renderbuffer or being a texture that got the 734 * intel_wrap_texture() treatment. 735 */ 736 rb = fb->Attachment[i].Renderbuffer; 737 if (rb == NULL) { 738 fbo_incomplete(fb, "FBO incomplete: attachment without " 739 "renderbuffer\n"); 740 continue; 741 } 742 743 if (fb->Attachment[i].Type == GL_TEXTURE) { 744 if (rb->TexImage->Border) { 745 fbo_incomplete(fb, "FBO incomplete: texture with border\n"); 746 continue; 747 } 748 } 749 750 irb = intel_renderbuffer(rb); 751 if (irb == NULL) { 752 fbo_incomplete(fb, "FBO incomplete: software rendering " 753 "renderbuffer\n"); 754 continue; 755 } 756 757 if (!brw_render_target_supported(brw, rb)) { 758 fbo_incomplete(fb, "FBO incomplete: Unsupported HW " 759 "texture/renderbuffer format attached: %s\n", 760 _mesa_get_format_name(intel_rb_format(irb))); 761 } 762 } 763 } 764 765 /** 766 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 767 * We can do this when the dst renderbuffer is actually a texture and 768 * there is no scaling, mirroring or scissoring. 769 * 770 * \return new buffer mask indicating the buffers left to blit using the 771 * normal path. 772 */ 773 static GLbitfield 774 intel_blit_framebuffer_with_blitter(struct gl_context *ctx, 775 const struct gl_framebuffer *readFb, 776 const struct gl_framebuffer *drawFb, 777 GLint srcX0, GLint srcY0, 778 GLint srcX1, GLint srcY1, 779 GLint dstX0, GLint dstY0, 780 GLint dstX1, GLint dstY1, 781 GLbitfield mask) 782 { 783 struct brw_context *brw = brw_context(ctx); 784 785 /* Sync up the state of window system buffers. We need to do this before 786 * we go looking for the buffers. 787 */ 788 intel_prepare_render(brw); 789 790 if (mask & GL_COLOR_BUFFER_BIT) { 791 unsigned i; 792 struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer; 793 struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb); 794 795 if (!src_irb) { 796 perf_debug("glBlitFramebuffer(): missing src renderbuffer. " 797 "Falling back to software rendering.\n"); 798 return mask; 799 } 800 801 /* If the source and destination are the same size with no mirroring, 802 * the rectangles are within the size of the texture and there is no 803 * scissor, then we can probably use the blit engine. 804 */ 805 if (!(srcX0 - srcX1 == dstX0 - dstX1 && 806 srcY0 - srcY1 == dstY0 - dstY1 && 807 srcX1 >= srcX0 && 808 srcY1 >= srcY0 && 809 srcX0 >= 0 && srcX1 <= readFb->Width && 810 srcY0 >= 0 && srcY1 <= readFb->Height && 811 dstX0 >= 0 && dstX1 <= drawFb->Width && 812 dstY0 >= 0 && dstY1 <= drawFb->Height && 813 !(ctx->Scissor.EnableFlags))) { 814 perf_debug("glBlitFramebuffer(): non-1:1 blit. " 815 "Falling back to software rendering.\n"); 816 return mask; 817 } 818 819 /* Blit to all active draw buffers. We don't do any pre-checking, 820 * because we assume that copying to MRTs is rare, and failure midway 821 * through copying is even more rare. Even if it was to occur, it's 822 * safe to let meta start the copy over from scratch, because 823 * glBlitFramebuffer completely overwrites the destination pixels, and 824 * results are undefined if any destination pixels have a dependency on 825 * source pixels. 826 */ 827 for (i = 0; i < drawFb->_NumColorDrawBuffers; i++) { 828 struct gl_renderbuffer *dst_rb = drawFb->_ColorDrawBuffers[i]; 829 struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb); 830 831 if (!dst_irb) { 832 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. " 833 "Falling back to software rendering.\n"); 834 return mask; 835 } 836 837 if (ctx->Color.sRGBEnabled && 838 _mesa_get_format_color_encoding(src_irb->mt->format) != 839 _mesa_get_format_color_encoding(dst_irb->mt->format)) { 840 perf_debug("glBlitFramebuffer() with sRGB conversion cannot be " 841 "handled by BLT path.\n"); 842 return mask; 843 } 844 845 if (!intel_miptree_blit(brw, 846 src_irb->mt, 847 src_irb->mt_level, src_irb->mt_layer, 848 srcX0, srcY0, src_rb->Name == 0, 849 dst_irb->mt, 850 dst_irb->mt_level, dst_irb->mt_layer, 851 dstX0, dstY0, dst_rb->Name == 0, 852 dstX1 - dstX0, dstY1 - dstY0, GL_COPY)) { 853 perf_debug("glBlitFramebuffer(): unknown blit failure. " 854 "Falling back to software rendering.\n"); 855 return mask; 856 } 857 } 858 859 mask &= ~GL_COLOR_BUFFER_BIT; 860 } 861 862 return mask; 863 } 864 865 static void 866 intel_blit_framebuffer(struct gl_context *ctx, 867 struct gl_framebuffer *readFb, 868 struct gl_framebuffer *drawFb, 869 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 870 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 871 GLbitfield mask, GLenum filter) 872 { 873 struct brw_context *brw = brw_context(ctx); 874 const struct gen_device_info *devinfo = &brw->screen->devinfo; 875 876 /* Page 679 of OpenGL 4.4 spec says: 877 * "Added BlitFramebuffer to commands affected by conditional rendering in 878 * section 10.10 (Bug 9562)." 879 */ 880 if (!_mesa_check_conditional_render(ctx)) 881 return; 882 883 if (devinfo->gen < 6) { 884 /* On gen4-5, try BLT first. 885 * 886 * Gen4-5 have a single ring for both 3D and BLT operations, so there's 887 * no inter-ring synchronization issues like on Gen6+. It is apparently 888 * faster than using the 3D pipeline. Original Gen4 also has to rebase 889 * and copy miptree slices in order to render to unaligned locations. 890 */ 891 mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb, 892 srcX0, srcY0, srcX1, srcY1, 893 dstX0, dstY0, dstX1, dstY1, 894 mask); 895 if (mask == 0x0) 896 return; 897 } 898 899 mask = brw_blorp_framebuffer(brw, readFb, drawFb, 900 srcX0, srcY0, srcX1, srcY1, 901 dstX0, dstY0, dstX1, dstY1, 902 mask, filter); 903 if (mask == 0x0) 904 return; 905 906 mask = _mesa_meta_BlitFramebuffer(ctx, readFb, drawFb, 907 srcX0, srcY0, srcX1, srcY1, 908 dstX0, dstY0, dstX1, dstY1, 909 mask, filter); 910 if (mask == 0x0) 911 return; 912 913 if (devinfo->gen >= 8 && (mask & GL_STENCIL_BUFFER_BIT)) { 914 assert(!"Invalid blit"); 915 } 916 917 /* Try using the BLT engine. */ 918 mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb, 919 srcX0, srcY0, srcX1, srcY1, 920 dstX0, dstY0, dstX1, dstY1, 921 mask); 922 if (mask == 0x0) 923 return; 924 925 _swrast_BlitFramebuffer(ctx, readFb, drawFb, 926 srcX0, srcY0, srcX1, srcY1, 927 dstX0, dstY0, dstX1, dstY1, 928 mask, filter); 929 } 930 931 /** 932 * Does the renderbuffer have hiz enabled? 933 */ 934 bool 935 intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb) 936 { 937 return intel_miptree_level_has_hiz(irb->mt, irb->mt_level); 938 } 939 940 void 941 intel_renderbuffer_move_to_temp(struct brw_context *brw, 942 struct intel_renderbuffer *irb, 943 bool invalidate) 944 { 945 struct gl_renderbuffer *rb =&irb->Base.Base; 946 struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage); 947 struct intel_mipmap_tree *new_mt; 948 int width, height, depth; 949 950 intel_get_image_dims(rb->TexImage, &width, &height, &depth); 951 952 assert(irb->align_wa_mt == NULL); 953 new_mt = intel_miptree_create(brw, GL_TEXTURE_2D, 954 intel_image->base.Base.TexFormat, 955 0, 0, 956 width, height, 1, 957 irb->mt->surf.samples, 958 MIPTREE_CREATE_BUSY); 959 960 if (!invalidate) 961 intel_miptree_copy_slice(brw, intel_image->mt, 962 intel_image->base.Base.Level, irb->mt_layer, 963 new_mt, 0, 0); 964 965 intel_miptree_reference(&irb->align_wa_mt, new_mt); 966 intel_miptree_release(&new_mt); 967 968 irb->draw_x = 0; 969 irb->draw_y = 0; 970 } 971 972 void 973 brw_cache_sets_clear(struct brw_context *brw) 974 { 975 struct hash_entry *render_entry; 976 hash_table_foreach(brw->render_cache, render_entry) 977 _mesa_hash_table_remove(brw->render_cache, render_entry); 978 979 struct set_entry *depth_entry; 980 set_foreach(brw->depth_cache, depth_entry) 981 _mesa_set_remove(brw->depth_cache, depth_entry); 982 } 983 984 /** 985 * Emits an appropriate flush for a BO if it has been rendered to within the 986 * same batchbuffer as a read that's about to be emitted. 987 * 988 * The GPU has separate, incoherent caches for the render cache and the 989 * sampler cache, along with other caches. Usually data in the different 990 * caches don't interact (e.g. we don't render to our driver-generated 991 * immediate constant data), but for render-to-texture in FBOs we definitely 992 * do. When a batchbuffer is flushed, the kernel will ensure that everything 993 * necessary is flushed before another use of that BO, but for reuse from 994 * different caches within a batchbuffer, it's all our responsibility. 995 */ 996 static void 997 flush_depth_and_render_caches(struct brw_context *brw, struct brw_bo *bo) 998 { 999 const struct gen_device_info *devinfo = &brw->screen->devinfo; 1000 1001 if (devinfo->gen >= 6) { 1002 brw_emit_pipe_control_flush(brw, 1003 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 1004 PIPE_CONTROL_RENDER_TARGET_FLUSH | 1005 PIPE_CONTROL_CS_STALL); 1006 1007 brw_emit_pipe_control_flush(brw, 1008 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1009 PIPE_CONTROL_CONST_CACHE_INVALIDATE); 1010 } else { 1011 brw_emit_mi_flush(brw); 1012 } 1013 1014 brw_cache_sets_clear(brw); 1015 } 1016 1017 void 1018 brw_cache_flush_for_read(struct brw_context *brw, struct brw_bo *bo) 1019 { 1020 if (_mesa_hash_table_search(brw->render_cache, bo) || 1021 _mesa_set_search(brw->depth_cache, bo)) 1022 flush_depth_and_render_caches(brw, bo); 1023 } 1024 1025 static void * 1026 format_aux_tuple(enum isl_format format, enum isl_aux_usage aux_usage) 1027 { 1028 return (void *)(uintptr_t)((uint32_t)format << 8 | aux_usage); 1029 } 1030 1031 void 1032 brw_cache_flush_for_render(struct brw_context *brw, struct brw_bo *bo, 1033 enum isl_format format, 1034 enum isl_aux_usage aux_usage) 1035 { 1036 if (_mesa_set_search(brw->depth_cache, bo)) 1037 flush_depth_and_render_caches(brw, bo); 1038 1039 /* Check to see if this bo has been used by a previous rendering operation 1040 * but with a different format or aux usage. If it has, flush the render 1041 * cache so we ensure that it's only in there with one format or aux usage 1042 * at a time. 1043 * 1044 * Even though it's not obvious, this can easily happen in practice. 1045 * Suppose a client is blending on a surface with sRGB encode enabled on 1046 * gen9. This implies that you get AUX_USAGE_CCS_D at best. If the client 1047 * then disables sRGB decode and continues blending we will flip on 1048 * AUX_USAGE_CCS_E without doing any sort of resolve in-between (this is 1049 * perfectly valid since CCS_E is a subset of CCS_D). However, this means 1050 * that we have fragments in-flight which are rendering with UNORM+CCS_E 1051 * and other fragments in-flight with SRGB+CCS_D on the same surface at the 1052 * same time and the pixel scoreboard and color blender are trying to sort 1053 * it all out. This ends badly (i.e. GPU hangs). 1054 * 1055 * To date, we have never observed GPU hangs or even corruption to be 1056 * associated with switching the format, only the aux usage. However, 1057 * there are comments in various docs which indicate that the render cache 1058 * isn't 100% resilient to format changes. We may as well be conservative 1059 * and flush on format changes too. We can always relax this later if we 1060 * find it to be a performance problem. 1061 */ 1062 struct hash_entry *entry = _mesa_hash_table_search(brw->render_cache, bo); 1063 if (entry && entry->data != format_aux_tuple(format, aux_usage)) 1064 flush_depth_and_render_caches(brw, bo); 1065 } 1066 1067 void 1068 brw_render_cache_add_bo(struct brw_context *brw, struct brw_bo *bo, 1069 enum isl_format format, 1070 enum isl_aux_usage aux_usage) 1071 { 1072 #ifndef NDEBUG 1073 struct hash_entry *entry = _mesa_hash_table_search(brw->render_cache, bo); 1074 if (entry) { 1075 /* Otherwise, someone didn't do a flush_for_render and that would be 1076 * very bad indeed. 1077 */ 1078 assert(entry->data == format_aux_tuple(format, aux_usage)); 1079 } 1080 #endif 1081 1082 _mesa_hash_table_insert(brw->render_cache, bo, 1083 format_aux_tuple(format, aux_usage)); 1084 } 1085 1086 void 1087 brw_cache_flush_for_depth(struct brw_context *brw, struct brw_bo *bo) 1088 { 1089 if (_mesa_hash_table_search(brw->render_cache, bo)) 1090 flush_depth_and_render_caches(brw, bo); 1091 } 1092 1093 void 1094 brw_depth_cache_add_bo(struct brw_context *brw, struct brw_bo *bo) 1095 { 1096 _mesa_set_add(brw->depth_cache, bo); 1097 } 1098 1099 /** 1100 * Do one-time context initializations related to GL_EXT_framebuffer_object. 1101 * Hook in device driver functions. 1102 */ 1103 void 1104 intel_fbo_init(struct brw_context *brw) 1105 { 1106 struct dd_function_table *dd = &brw->ctx.Driver; 1107 dd->NewRenderbuffer = intel_new_renderbuffer; 1108 dd->MapRenderbuffer = intel_map_renderbuffer; 1109 dd->UnmapRenderbuffer = intel_unmap_renderbuffer; 1110 dd->RenderTexture = intel_render_texture; 1111 dd->ValidateFramebuffer = intel_validate_framebuffer; 1112 dd->BlitFramebuffer = intel_blit_framebuffer; 1113 dd->EGLImageTargetRenderbufferStorage = 1114 intel_image_target_renderbuffer_storage; 1115 1116 brw->render_cache = _mesa_hash_table_create(brw, _mesa_hash_pointer, 1117 _mesa_key_pointer_equal); 1118 brw->depth_cache = _mesa_set_create(brw, _mesa_hash_pointer, 1119 _mesa_key_pointer_equal); 1120 } 1121