1 /* 2 * Copyright 2006 VMware, Inc. 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the 7 * "Software"), to deal in the Software without restriction, including 8 * without limitation the rights to use, copy, modify, merge, publish, 9 * distribute, sublicense, and/or sell copies of the Software, and to 10 * permit persons to whom the Software is furnished to do so, subject to 11 * the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the 14 * next paragraph) shall be included in all copies or substantial portions 15 * of the Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR 21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 24 */ 25 26 #include "main/enums.h" 27 #include "main/imports.h" 28 #include "main/macros.h" 29 #include "main/mtypes.h" 30 #include "main/fbobject.h" 31 #include "main/framebuffer.h" 32 #include "main/renderbuffer.h" 33 #include "main/context.h" 34 #include "main/teximage.h" 35 #include "main/image.h" 36 #include "main/condrender.h" 37 #include "util/hash_table.h" 38 #include "util/set.h" 39 40 #include "swrast/swrast.h" 41 #include "drivers/common/meta.h" 42 43 #include "intel_batchbuffer.h" 44 #include "intel_buffers.h" 45 #include "intel_blit.h" 46 #include "intel_fbo.h" 47 #include "intel_mipmap_tree.h" 48 #include "intel_image.h" 49 #include "intel_screen.h" 50 #include "intel_tex.h" 51 #include "brw_context.h" 52 #include "brw_defines.h" 53 54 #define FILE_DEBUG_FLAG DEBUG_FBO 55 56 /** Called by gl_renderbuffer::Delete() */ 57 static void 58 intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb) 59 { 60 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 61 62 assert(irb); 63 64 intel_miptree_release(&irb->mt); 65 intel_miptree_release(&irb->singlesample_mt); 66 67 _mesa_delete_renderbuffer(ctx, rb); 68 } 69 70 /** 71 * \brief Downsample a winsys renderbuffer from mt to singlesample_mt. 72 * 73 * If the miptree needs no downsample, then skip. 74 */ 75 void 76 intel_renderbuffer_downsample(struct brw_context *brw, 77 struct intel_renderbuffer *irb) 78 { 79 if (!irb->need_downsample) 80 return; 81 intel_miptree_updownsample(brw, irb->mt, irb->singlesample_mt); 82 irb->need_downsample = false; 83 } 84 85 /** 86 * \brief Upsample a winsys renderbuffer from singlesample_mt to mt. 87 * 88 * The upsample is done unconditionally. 89 */ 90 void 91 intel_renderbuffer_upsample(struct brw_context *brw, 92 struct intel_renderbuffer *irb) 93 { 94 assert(!irb->need_downsample); 95 96 intel_miptree_updownsample(brw, irb->singlesample_mt, irb->mt); 97 } 98 99 /** 100 * \see dd_function_table::MapRenderbuffer 101 */ 102 static void 103 intel_map_renderbuffer(struct gl_context *ctx, 104 struct gl_renderbuffer *rb, 105 GLuint x, GLuint y, GLuint w, GLuint h, 106 GLbitfield mode, 107 GLubyte **out_map, 108 GLint *out_stride) 109 { 110 struct brw_context *brw = brw_context(ctx); 111 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb; 112 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 113 struct intel_mipmap_tree *mt; 114 void *map; 115 ptrdiff_t stride; 116 117 if (srb->Buffer) { 118 /* this is a malloc'd renderbuffer (accum buffer), not an irb */ 119 GLint bpp = _mesa_get_format_bytes(rb->Format); 120 GLint rowStride = srb->RowStride; 121 *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp; 122 *out_stride = rowStride; 123 return; 124 } 125 126 intel_prepare_render(brw); 127 128 /* The MapRenderbuffer API should always return a single-sampled mapping. 129 * The case we are asked to map multisampled RBs is in glReadPixels() (or 130 * swrast paths like glCopyTexImage()) from a window-system MSAA buffer, 131 * and GL expects an automatic resolve to happen. 132 * 133 * If it's a color miptree, there is a ->singlesample_mt which wraps the 134 * actual window system renderbuffer (which we may resolve to at any time), 135 * while the miptree itself is our driver-private allocation. If it's a 136 * depth or stencil miptree, we have a private MSAA buffer and no shared 137 * singlesample buffer, and since we don't expect anybody to ever actually 138 * resolve it, we just make a temporary singlesample buffer now when we 139 * have to. 140 */ 141 if (rb->NumSamples > 1) { 142 if (!irb->singlesample_mt) { 143 irb->singlesample_mt = 144 intel_miptree_create_for_renderbuffer(brw, irb->mt->format, 145 rb->Width, rb->Height, 146 0 /*num_samples*/); 147 if (!irb->singlesample_mt) 148 goto fail; 149 irb->singlesample_mt_is_tmp = true; 150 irb->need_downsample = true; 151 } 152 153 intel_renderbuffer_downsample(brw, irb); 154 mt = irb->singlesample_mt; 155 156 irb->need_map_upsample = mode & GL_MAP_WRITE_BIT; 157 } else { 158 mt = irb->mt; 159 } 160 161 /* For a window-system renderbuffer, we need to flip the mapping we receive 162 * upside-down. So we need to ask for a rectangle on flipped vertically, and 163 * we then return a pointer to the bottom of it with a negative stride. 164 */ 165 if (rb->Name == 0) { 166 y = rb->Height - y - h; 167 } 168 169 intel_miptree_map(brw, mt, irb->mt_level, irb->mt_layer, 170 x, y, w, h, mode, &map, &stride); 171 172 if (rb->Name == 0) { 173 map += (h - 1) * stride; 174 stride = -stride; 175 } 176 177 DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%"PRIdPTR"\n", 178 __func__, rb->Name, _mesa_get_format_name(rb->Format), 179 x, y, w, h, map, stride); 180 181 *out_map = map; 182 *out_stride = stride; 183 return; 184 185 fail: 186 *out_map = NULL; 187 *out_stride = 0; 188 } 189 190 /** 191 * \see dd_function_table::UnmapRenderbuffer 192 */ 193 static void 194 intel_unmap_renderbuffer(struct gl_context *ctx, 195 struct gl_renderbuffer *rb) 196 { 197 struct brw_context *brw = brw_context(ctx); 198 struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb; 199 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 200 struct intel_mipmap_tree *mt; 201 202 DBG("%s: rb %d (%s)\n", __func__, 203 rb->Name, _mesa_get_format_name(rb->Format)); 204 205 if (srb->Buffer) { 206 /* this is a malloc'd renderbuffer (accum buffer) */ 207 /* nothing to do */ 208 return; 209 } 210 211 if (rb->NumSamples > 1) { 212 mt = irb->singlesample_mt; 213 } else { 214 mt = irb->mt; 215 } 216 217 intel_miptree_unmap(brw, mt, irb->mt_level, irb->mt_layer); 218 219 if (irb->need_map_upsample) { 220 intel_renderbuffer_upsample(brw, irb); 221 irb->need_map_upsample = false; 222 } 223 224 if (irb->singlesample_mt_is_tmp) 225 intel_miptree_release(&irb->singlesample_mt); 226 } 227 228 229 /** 230 * Round up the requested multisample count to the next supported sample size. 231 */ 232 unsigned 233 intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples) 234 { 235 const int *msaa_modes = intel_supported_msaa_modes(intel); 236 int quantized_samples = 0; 237 238 for (int i = 0; msaa_modes[i] != -1; ++i) { 239 if (msaa_modes[i] >= num_samples) 240 quantized_samples = msaa_modes[i]; 241 else 242 break; 243 } 244 245 return quantized_samples; 246 } 247 248 static mesa_format 249 intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat) 250 { 251 struct brw_context *brw = brw_context(ctx); 252 253 switch (internalFormat) { 254 default: 255 /* Use the same format-choice logic as for textures. 256 * Renderbuffers aren't any different from textures for us, 257 * except they're less useful because you can't texture with 258 * them. 259 */ 260 return ctx->Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D, 261 internalFormat, 262 GL_NONE, GL_NONE); 263 break; 264 case GL_STENCIL_INDEX: 265 case GL_STENCIL_INDEX1_EXT: 266 case GL_STENCIL_INDEX4_EXT: 267 case GL_STENCIL_INDEX8_EXT: 268 case GL_STENCIL_INDEX16_EXT: 269 /* These aren't actual texture formats, so force them here. */ 270 if (brw->has_separate_stencil) { 271 return MESA_FORMAT_S_UINT8; 272 } else { 273 assert(!brw->must_use_separate_stencil); 274 return MESA_FORMAT_Z24_UNORM_S8_UINT; 275 } 276 } 277 } 278 279 static GLboolean 280 intel_alloc_private_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 281 GLenum internalFormat, 282 GLuint width, GLuint height) 283 { 284 struct brw_context *brw = brw_context(ctx); 285 struct intel_screen *screen = brw->screen; 286 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 287 288 assert(rb->Format != MESA_FORMAT_NONE); 289 290 rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples); 291 rb->Width = width; 292 rb->Height = height; 293 rb->_BaseFormat = _mesa_get_format_base_format(rb->Format); 294 295 intel_miptree_release(&irb->mt); 296 297 DBG("%s: %s: %s (%dx%d)\n", __func__, 298 _mesa_enum_to_string(internalFormat), 299 _mesa_get_format_name(rb->Format), width, height); 300 301 if (width == 0 || height == 0) 302 return true; 303 304 irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format, 305 width, height, 306 rb->NumSamples); 307 if (!irb->mt) 308 return false; 309 310 irb->layer_count = 1; 311 312 return true; 313 } 314 315 /** 316 * Called via glRenderbufferStorageEXT() to set the format and allocate 317 * storage for a user-created renderbuffer. 318 */ 319 static GLboolean 320 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 321 GLenum internalFormat, 322 GLuint width, GLuint height) 323 { 324 rb->Format = intel_renderbuffer_format(ctx, internalFormat); 325 return intel_alloc_private_renderbuffer_storage(ctx, rb, internalFormat, width, height); 326 } 327 328 static void 329 intel_image_target_renderbuffer_storage(struct gl_context *ctx, 330 struct gl_renderbuffer *rb, 331 void *image_handle) 332 { 333 struct brw_context *brw = brw_context(ctx); 334 struct intel_renderbuffer *irb; 335 __DRIscreen *dri_screen = brw->screen->driScrnPriv; 336 __DRIimage *image; 337 338 image = dri_screen->dri2.image->lookupEGLImage(dri_screen, image_handle, 339 dri_screen->loaderPrivate); 340 if (image == NULL) 341 return; 342 343 if (image->planar_format && image->planar_format->nplanes > 1) { 344 _mesa_error(ctx, GL_INVALID_OPERATION, 345 "glEGLImageTargetRenderbufferStorage(planar buffers are not " 346 "supported as render targets.)"); 347 return; 348 } 349 350 /* __DRIimage is opaque to the core so it has to be checked here */ 351 if (!brw->format_supported_as_render_target[image->format]) { 352 _mesa_error(ctx, GL_INVALID_OPERATION, 353 "glEGLImageTargetRenderbufferStorage(unsupported image format)"); 354 return; 355 } 356 357 irb = intel_renderbuffer(rb); 358 intel_miptree_release(&irb->mt); 359 360 /* Disable creation of the miptree's aux buffers because the driver exposes 361 * no EGL API to manage them. That is, there is no API for resolving the aux 362 * buffer's content to the main buffer nor for invalidating the aux buffer's 363 * content. 364 */ 365 irb->mt = intel_miptree_create_for_bo(brw, 366 image->bo, 367 image->format, 368 image->offset, 369 image->width, 370 image->height, 371 1, 372 image->pitch, 373 MIPTREE_LAYOUT_DISABLE_AUX); 374 if (!irb->mt) 375 return; 376 377 /* Adjust the miptree's upper-left coordinate. 378 * 379 * FIXME: Adjusting the miptree's layout outside of 380 * intel_miptree_create_layout() is fragile. Plumb the adjustment through 381 * intel_miptree_create_layout() and brw_tex_layout(). 382 */ 383 irb->mt->level[0].level_x = image->tile_x; 384 irb->mt->level[0].level_y = image->tile_y; 385 irb->mt->level[0].slice[0].x_offset = image->tile_x; 386 irb->mt->level[0].slice[0].y_offset = image->tile_y; 387 irb->mt->total_width += image->tile_x; 388 irb->mt->total_height += image->tile_y; 389 390 rb->InternalFormat = image->internal_format; 391 rb->Width = image->width; 392 rb->Height = image->height; 393 rb->Format = image->format; 394 rb->_BaseFormat = _mesa_get_format_base_format(image->format); 395 rb->NeedsFinishRenderTexture = true; 396 irb->layer_count = 1; 397 } 398 399 /** 400 * Called by _mesa_resize_framebuffer() for each hardware renderbuffer when a 401 * window system framebuffer is resized. 402 * 403 * Any actual buffer reallocations for hardware renderbuffers (which would 404 * have triggered _mesa_resize_framebuffer()) were done by 405 * intel_process_dri2_buffer(). 406 */ 407 static GLboolean 408 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 409 GLenum internalFormat, GLuint width, GLuint height) 410 { 411 (void) ctx; 412 assert(rb->Name == 0); 413 rb->Width = width; 414 rb->Height = height; 415 rb->InternalFormat = internalFormat; 416 417 return true; 418 } 419 420 /** Dummy function for gl_renderbuffer::AllocStorage() */ 421 static GLboolean 422 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb, 423 GLenum internalFormat, GLuint width, GLuint height) 424 { 425 (void) rb; 426 (void) internalFormat; 427 (void) width; 428 (void) height; 429 _mesa_problem(ctx, "intel_nop_alloc_storage should never be called."); 430 return false; 431 } 432 433 /** 434 * Create a new intel_renderbuffer which corresponds to an on-screen window, 435 * not a user-created renderbuffer. 436 * 437 * \param num_samples must be quantized. 438 */ 439 struct intel_renderbuffer * 440 intel_create_renderbuffer(mesa_format format, unsigned num_samples) 441 { 442 struct intel_renderbuffer *irb; 443 struct gl_renderbuffer *rb; 444 445 GET_CURRENT_CONTEXT(ctx); 446 447 irb = CALLOC_STRUCT(intel_renderbuffer); 448 if (!irb) { 449 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 450 return NULL; 451 } 452 453 rb = &irb->Base.Base; 454 irb->layer_count = 1; 455 456 _mesa_init_renderbuffer(rb, 0); 457 rb->ClassID = INTEL_RB_CLASS; 458 rb->_BaseFormat = _mesa_get_format_base_format(format); 459 rb->Format = format; 460 rb->InternalFormat = rb->_BaseFormat; 461 rb->NumSamples = num_samples; 462 463 /* intel-specific methods */ 464 rb->Delete = intel_delete_renderbuffer; 465 rb->AllocStorage = intel_alloc_window_storage; 466 467 return irb; 468 } 469 470 /** 471 * Private window-system buffers (as opposed to ones shared with the display 472 * server created with intel_create_renderbuffer()) are most similar in their 473 * handling to user-created renderbuffers, but they have a resize handler that 474 * may be called at intel_update_renderbuffers() time. 475 * 476 * \param num_samples must be quantized. 477 */ 478 struct intel_renderbuffer * 479 intel_create_private_renderbuffer(mesa_format format, unsigned num_samples) 480 { 481 struct intel_renderbuffer *irb; 482 483 irb = intel_create_renderbuffer(format, num_samples); 484 irb->Base.Base.AllocStorage = intel_alloc_private_renderbuffer_storage; 485 486 return irb; 487 } 488 489 /** 490 * Create a new renderbuffer object. 491 * Typically called via glBindRenderbufferEXT(). 492 */ 493 static struct gl_renderbuffer * 494 intel_new_renderbuffer(struct gl_context * ctx, GLuint name) 495 { 496 struct intel_renderbuffer *irb; 497 struct gl_renderbuffer *rb; 498 499 irb = CALLOC_STRUCT(intel_renderbuffer); 500 if (!irb) { 501 _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer"); 502 return NULL; 503 } 504 505 rb = &irb->Base.Base; 506 507 _mesa_init_renderbuffer(rb, name); 508 rb->ClassID = INTEL_RB_CLASS; 509 510 /* intel-specific methods */ 511 rb->Delete = intel_delete_renderbuffer; 512 rb->AllocStorage = intel_alloc_renderbuffer_storage; 513 /* span routines set in alloc_storage function */ 514 515 return rb; 516 } 517 518 static bool 519 intel_renderbuffer_update_wrapper(struct brw_context *brw, 520 struct intel_renderbuffer *irb, 521 struct gl_texture_image *image, 522 uint32_t layer, 523 bool layered) 524 { 525 struct gl_renderbuffer *rb = &irb->Base.Base; 526 struct intel_texture_image *intel_image = intel_texture_image(image); 527 struct intel_mipmap_tree *mt = intel_image->mt; 528 int level = image->Level; 529 530 rb->AllocStorage = intel_nop_alloc_storage; 531 532 /* adjust for texture view parameters */ 533 layer += image->TexObject->MinLayer; 534 level += image->TexObject->MinLevel; 535 536 intel_miptree_check_level_layer(mt, level, layer); 537 irb->mt_level = level; 538 539 int layer_multiplier; 540 switch (mt->msaa_layout) { 541 case INTEL_MSAA_LAYOUT_UMS: 542 case INTEL_MSAA_LAYOUT_CMS: 543 layer_multiplier = MAX2(mt->num_samples, 1); 544 break; 545 546 default: 547 layer_multiplier = 1; 548 } 549 550 irb->mt_layer = layer_multiplier * layer; 551 552 if (!layered) { 553 irb->layer_count = 1; 554 } else if (mt->target != GL_TEXTURE_3D && image->TexObject->NumLayers > 0) { 555 irb->layer_count = image->TexObject->NumLayers; 556 } else { 557 irb->layer_count = mt->level[level].depth / layer_multiplier; 558 } 559 560 intel_miptree_reference(&irb->mt, mt); 561 562 intel_renderbuffer_set_draw_offset(irb); 563 564 if (intel_miptree_wants_hiz_buffer(brw, mt)) { 565 intel_miptree_alloc_hiz(brw, mt); 566 if (!mt->hiz_buf) 567 return false; 568 } 569 570 return true; 571 } 572 573 void 574 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb) 575 { 576 unsigned int dst_x, dst_y; 577 578 /* compute offset of the particular 2D image within the texture region */ 579 intel_miptree_get_image_offset(irb->mt, 580 irb->mt_level, 581 irb->mt_layer, 582 &dst_x, &dst_y); 583 584 irb->draw_x = dst_x; 585 irb->draw_y = dst_y; 586 } 587 588 /** 589 * Called by glFramebufferTexture[123]DEXT() (and other places) to 590 * prepare for rendering into texture memory. This might be called 591 * many times to choose different texture levels, cube faces, etc 592 * before intel_finish_render_texture() is ever called. 593 */ 594 static void 595 intel_render_texture(struct gl_context * ctx, 596 struct gl_framebuffer *fb, 597 struct gl_renderbuffer_attachment *att) 598 { 599 struct brw_context *brw = brw_context(ctx); 600 struct gl_renderbuffer *rb = att->Renderbuffer; 601 struct intel_renderbuffer *irb = intel_renderbuffer(rb); 602 struct gl_texture_image *image = rb->TexImage; 603 struct intel_texture_image *intel_image = intel_texture_image(image); 604 struct intel_mipmap_tree *mt = intel_image->mt; 605 int layer; 606 607 (void) fb; 608 609 if (att->CubeMapFace > 0) { 610 assert(att->Zoffset == 0); 611 layer = att->CubeMapFace; 612 } else { 613 layer = att->Zoffset; 614 } 615 616 if (!intel_image->mt) { 617 /* Fallback on drawing to a texture that doesn't have a miptree 618 * (has a border, width/height 0, etc.) 619 */ 620 _swrast_render_texture(ctx, fb, att); 621 return; 622 } 623 624 intel_miptree_check_level_layer(mt, att->TextureLevel, layer); 625 626 if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer, att->Layered)) { 627 _swrast_render_texture(ctx, fb, att); 628 return; 629 } 630 631 DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n", 632 _mesa_get_format_name(image->TexFormat), 633 att->Texture->Name, image->Width, image->Height, image->Depth, 634 rb->RefCount); 635 } 636 637 638 #define fbo_incomplete(fb, ...) do { \ 639 static GLuint msg_id = 0; \ 640 if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \ 641 _mesa_gl_debug(ctx, &msg_id, \ 642 MESA_DEBUG_SOURCE_API, \ 643 MESA_DEBUG_TYPE_OTHER, \ 644 MESA_DEBUG_SEVERITY_MEDIUM, \ 645 __VA_ARGS__); \ 646 } \ 647 DBG(__VA_ARGS__); \ 648 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \ 649 } while (0) 650 651 /** 652 * Do additional "completeness" testing of a framebuffer object. 653 */ 654 static void 655 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb) 656 { 657 struct brw_context *brw = brw_context(ctx); 658 struct intel_renderbuffer *depthRb = 659 intel_get_renderbuffer(fb, BUFFER_DEPTH); 660 struct intel_renderbuffer *stencilRb = 661 intel_get_renderbuffer(fb, BUFFER_STENCIL); 662 struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL; 663 unsigned i; 664 665 DBG("%s() on fb %p (%s)\n", __func__, 666 fb, (fb == ctx->DrawBuffer ? "drawbuffer" : 667 (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer"))); 668 669 if (depthRb) 670 depth_mt = depthRb->mt; 671 if (stencilRb) { 672 stencil_mt = stencilRb->mt; 673 if (stencil_mt->stencil_mt) 674 stencil_mt = stencil_mt->stencil_mt; 675 } 676 677 if (depth_mt && stencil_mt) { 678 if (brw->gen >= 6) { 679 /* For gen >= 6, we are using the lod/minimum-array-element fields 680 * and supporting layered rendering. This means that we must restrict 681 * the depth & stencil attachments to match in various more retrictive 682 * ways. (width, height, depth, LOD and layer) 683 */ 684 if (depth_mt->physical_width0 != stencil_mt->physical_width0 || 685 depth_mt->physical_height0 != stencil_mt->physical_height0 || 686 depth_mt->physical_depth0 != stencil_mt->physical_depth0 || 687 depthRb->mt_level != stencilRb->mt_level || 688 depthRb->mt_layer != stencilRb->mt_layer) { 689 fbo_incomplete(fb, 690 "FBO incomplete: depth and stencil must match in" 691 "width, height, depth, LOD and layer\n"); 692 } 693 } 694 if (depth_mt == stencil_mt) { 695 /* For true packed depth/stencil (not faked on prefers-separate-stencil 696 * hardware) we need to be sure they're the same level/layer, since 697 * we'll be emitting a single packet describing the packed setup. 698 */ 699 if (depthRb->mt_level != stencilRb->mt_level || 700 depthRb->mt_layer != stencilRb->mt_layer) { 701 fbo_incomplete(fb, 702 "FBO incomplete: depth image level/layer %d/%d != " 703 "stencil image %d/%d\n", 704 depthRb->mt_level, 705 depthRb->mt_layer, 706 stencilRb->mt_level, 707 stencilRb->mt_layer); 708 } 709 } else { 710 if (!brw->has_separate_stencil) { 711 fbo_incomplete(fb, "FBO incomplete: separate stencil " 712 "unsupported\n"); 713 } 714 if (stencil_mt->format != MESA_FORMAT_S_UINT8) { 715 fbo_incomplete(fb, "FBO incomplete: separate stencil is %s " 716 "instead of S8\n", 717 _mesa_get_format_name(stencil_mt->format)); 718 } 719 if (brw->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) { 720 /* Before Gen7, separate depth and stencil buffers can be used 721 * only if HiZ is enabled. From the Sandybridge PRM, Volume 2, 722 * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable: 723 * [DevSNB]: This field must be set to the same value (enabled 724 * or disabled) as Hierarchical Depth Buffer Enable. 725 */ 726 fbo_incomplete(fb, "FBO incomplete: separate stencil " 727 "without HiZ\n"); 728 } 729 } 730 } 731 732 for (i = 0; i < ARRAY_SIZE(fb->Attachment); i++) { 733 struct gl_renderbuffer *rb; 734 struct intel_renderbuffer *irb; 735 736 if (fb->Attachment[i].Type == GL_NONE) 737 continue; 738 739 /* A supported attachment will have a Renderbuffer set either 740 * from being a Renderbuffer or being a texture that got the 741 * intel_wrap_texture() treatment. 742 */ 743 rb = fb->Attachment[i].Renderbuffer; 744 if (rb == NULL) { 745 fbo_incomplete(fb, "FBO incomplete: attachment without " 746 "renderbuffer\n"); 747 continue; 748 } 749 750 if (fb->Attachment[i].Type == GL_TEXTURE) { 751 if (rb->TexImage->Border) { 752 fbo_incomplete(fb, "FBO incomplete: texture with border\n"); 753 continue; 754 } 755 } 756 757 irb = intel_renderbuffer(rb); 758 if (irb == NULL) { 759 fbo_incomplete(fb, "FBO incomplete: software rendering " 760 "renderbuffer\n"); 761 continue; 762 } 763 764 if (!brw_render_target_supported(brw, rb)) { 765 fbo_incomplete(fb, "FBO incomplete: Unsupported HW " 766 "texture/renderbuffer format attached: %s\n", 767 _mesa_get_format_name(intel_rb_format(irb))); 768 } 769 } 770 } 771 772 /** 773 * Try to do a glBlitFramebuffer using glCopyTexSubImage2D 774 * We can do this when the dst renderbuffer is actually a texture and 775 * there is no scaling, mirroring or scissoring. 776 * 777 * \return new buffer mask indicating the buffers left to blit using the 778 * normal path. 779 */ 780 static GLbitfield 781 intel_blit_framebuffer_with_blitter(struct gl_context *ctx, 782 const struct gl_framebuffer *readFb, 783 const struct gl_framebuffer *drawFb, 784 GLint srcX0, GLint srcY0, 785 GLint srcX1, GLint srcY1, 786 GLint dstX0, GLint dstY0, 787 GLint dstX1, GLint dstY1, 788 GLbitfield mask) 789 { 790 struct brw_context *brw = brw_context(ctx); 791 792 /* Sync up the state of window system buffers. We need to do this before 793 * we go looking for the buffers. 794 */ 795 intel_prepare_render(brw); 796 797 if (mask & GL_COLOR_BUFFER_BIT) { 798 unsigned i; 799 struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer; 800 struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb); 801 802 if (!src_irb) { 803 perf_debug("glBlitFramebuffer(): missing src renderbuffer. " 804 "Falling back to software rendering.\n"); 805 return mask; 806 } 807 808 /* If the source and destination are the same size with no mirroring, 809 * the rectangles are within the size of the texture and there is no 810 * scissor, then we can probably use the blit engine. 811 */ 812 if (!(srcX0 - srcX1 == dstX0 - dstX1 && 813 srcY0 - srcY1 == dstY0 - dstY1 && 814 srcX1 >= srcX0 && 815 srcY1 >= srcY0 && 816 srcX0 >= 0 && srcX1 <= readFb->Width && 817 srcY0 >= 0 && srcY1 <= readFb->Height && 818 dstX0 >= 0 && dstX1 <= drawFb->Width && 819 dstY0 >= 0 && dstY1 <= drawFb->Height && 820 !(ctx->Scissor.EnableFlags))) { 821 perf_debug("glBlitFramebuffer(): non-1:1 blit. " 822 "Falling back to software rendering.\n"); 823 return mask; 824 } 825 826 /* Blit to all active draw buffers. We don't do any pre-checking, 827 * because we assume that copying to MRTs is rare, and failure midway 828 * through copying is even more rare. Even if it was to occur, it's 829 * safe to let meta start the copy over from scratch, because 830 * glBlitFramebuffer completely overwrites the destination pixels, and 831 * results are undefined if any destination pixels have a dependency on 832 * source pixels. 833 */ 834 for (i = 0; i < drawFb->_NumColorDrawBuffers; i++) { 835 struct gl_renderbuffer *dst_rb = drawFb->_ColorDrawBuffers[i]; 836 struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb); 837 838 if (!dst_irb) { 839 perf_debug("glBlitFramebuffer(): missing dst renderbuffer. " 840 "Falling back to software rendering.\n"); 841 return mask; 842 } 843 844 if (ctx->Color.sRGBEnabled && 845 _mesa_get_format_color_encoding(src_irb->mt->format) != 846 _mesa_get_format_color_encoding(dst_irb->mt->format)) { 847 perf_debug("glBlitFramebuffer() with sRGB conversion cannot be " 848 "handled by BLT path.\n"); 849 return mask; 850 } 851 852 if (!intel_miptree_blit(brw, 853 src_irb->mt, 854 src_irb->mt_level, src_irb->mt_layer, 855 srcX0, srcY0, src_rb->Name == 0, 856 dst_irb->mt, 857 dst_irb->mt_level, dst_irb->mt_layer, 858 dstX0, dstY0, dst_rb->Name == 0, 859 dstX1 - dstX0, dstY1 - dstY0, GL_COPY)) { 860 perf_debug("glBlitFramebuffer(): unknown blit failure. " 861 "Falling back to software rendering.\n"); 862 return mask; 863 } 864 } 865 866 mask &= ~GL_COLOR_BUFFER_BIT; 867 } 868 869 return mask; 870 } 871 872 static void 873 intel_blit_framebuffer(struct gl_context *ctx, 874 struct gl_framebuffer *readFb, 875 struct gl_framebuffer *drawFb, 876 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 877 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 878 GLbitfield mask, GLenum filter) 879 { 880 struct brw_context *brw = brw_context(ctx); 881 882 /* Page 679 of OpenGL 4.4 spec says: 883 * "Added BlitFramebuffer to commands affected by conditional rendering in 884 * section 10.10 (Bug 9562)." 885 */ 886 if (!_mesa_check_conditional_render(ctx)) 887 return; 888 889 mask = brw_blorp_framebuffer(brw, readFb, drawFb, 890 srcX0, srcY0, srcX1, srcY1, 891 dstX0, dstY0, dstX1, dstY1, 892 mask, filter); 893 if (mask == 0x0) 894 return; 895 896 mask = _mesa_meta_BlitFramebuffer(ctx, readFb, drawFb, 897 srcX0, srcY0, srcX1, srcY1, 898 dstX0, dstY0, dstX1, dstY1, 899 mask, filter); 900 if (mask == 0x0) 901 return; 902 903 if (brw->gen >= 8 && (mask & GL_STENCIL_BUFFER_BIT)) { 904 assert(!"Invalid blit"); 905 } 906 907 /* Try using the BLT engine. */ 908 mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb, 909 srcX0, srcY0, srcX1, srcY1, 910 dstX0, dstY0, dstX1, dstY1, 911 mask); 912 if (mask == 0x0) 913 return; 914 915 _swrast_BlitFramebuffer(ctx, readFb, drawFb, 916 srcX0, srcY0, srcX1, srcY1, 917 dstX0, dstY0, dstX1, dstY1, 918 mask, filter); 919 } 920 921 /** 922 * Gen4-5 implementation of glBlitFrameBuffer(). 923 * 924 * Tries BLT, Meta, then swrast. 925 * 926 * Gen4-5 have a single ring for both 3D and BLT operations, so there's no 927 * inter-ring synchronization issues like on Gen6+. It is apparently faster 928 * than using the 3D pipeline. Original Gen4 also has to rebase and copy 929 * miptree slices in order to render to unaligned locations. 930 */ 931 static void 932 gen4_blit_framebuffer(struct gl_context *ctx, 933 struct gl_framebuffer *readFb, 934 struct gl_framebuffer *drawFb, 935 GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1, 936 GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1, 937 GLbitfield mask, GLenum filter) 938 { 939 /* Page 679 of OpenGL 4.4 spec says: 940 * "Added BlitFramebuffer to commands affected by conditional rendering in 941 * section 10.10 (Bug 9562)." 942 */ 943 if (!_mesa_check_conditional_render(ctx)) 944 return; 945 946 mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb, 947 srcX0, srcY0, srcX1, srcY1, 948 dstX0, dstY0, dstX1, dstY1, 949 mask); 950 if (mask == 0x0) 951 return; 952 953 mask = _mesa_meta_BlitFramebuffer(ctx, readFb, drawFb, 954 srcX0, srcY0, srcX1, srcY1, 955 dstX0, dstY0, dstX1, dstY1, 956 mask, filter); 957 if (mask == 0x0) 958 return; 959 960 _swrast_BlitFramebuffer(ctx, readFb, drawFb, 961 srcX0, srcY0, srcX1, srcY1, 962 dstX0, dstY0, dstX1, dstY1, 963 mask, filter); 964 } 965 966 /** 967 * Does the renderbuffer have hiz enabled? 968 */ 969 bool 970 intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb) 971 { 972 return intel_miptree_level_has_hiz(irb->mt, irb->mt_level); 973 } 974 975 bool 976 intel_renderbuffer_resolve_hiz(struct brw_context *brw, 977 struct intel_renderbuffer *irb) 978 { 979 if (irb->mt) 980 return intel_miptree_slice_resolve_hiz(brw, 981 irb->mt, 982 irb->mt_level, 983 irb->mt_layer); 984 985 return false; 986 } 987 988 void 989 intel_renderbuffer_att_set_needs_depth_resolve(struct gl_renderbuffer_attachment *att) 990 { 991 struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer); 992 if (irb->mt) { 993 if (att->Layered) { 994 intel_miptree_set_all_slices_need_depth_resolve(irb->mt, irb->mt_level); 995 } else { 996 intel_miptree_slice_set_needs_depth_resolve(irb->mt, 997 irb->mt_level, 998 irb->mt_layer); 999 } 1000 } 1001 } 1002 1003 bool 1004 intel_renderbuffer_resolve_depth(struct brw_context *brw, 1005 struct intel_renderbuffer *irb) 1006 { 1007 if (irb->mt) 1008 return intel_miptree_slice_resolve_depth(brw, 1009 irb->mt, 1010 irb->mt_level, 1011 irb->mt_layer); 1012 1013 return false; 1014 } 1015 1016 void 1017 intel_renderbuffer_move_to_temp(struct brw_context *brw, 1018 struct intel_renderbuffer *irb, 1019 bool invalidate) 1020 { 1021 struct gl_renderbuffer *rb =&irb->Base.Base; 1022 struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage); 1023 struct intel_mipmap_tree *new_mt; 1024 int width, height, depth; 1025 1026 uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD | 1027 MIPTREE_LAYOUT_TILING_ANY; 1028 1029 intel_get_image_dims(rb->TexImage, &width, &height, &depth); 1030 1031 new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target, 1032 intel_image->base.Base.TexFormat, 1033 intel_image->base.Base.Level, 1034 intel_image->base.Base.Level, 1035 width, height, depth, 1036 irb->mt->num_samples, 1037 layout_flags); 1038 1039 if (intel_miptree_wants_hiz_buffer(brw, new_mt)) { 1040 intel_miptree_alloc_hiz(brw, new_mt); 1041 } 1042 1043 intel_miptree_copy_teximage(brw, intel_image, new_mt, invalidate); 1044 1045 intel_miptree_reference(&irb->mt, intel_image->mt); 1046 intel_renderbuffer_set_draw_offset(irb); 1047 intel_miptree_release(&new_mt); 1048 } 1049 1050 void 1051 brw_render_cache_set_clear(struct brw_context *brw) 1052 { 1053 struct set_entry *entry; 1054 1055 set_foreach(brw->render_cache, entry) { 1056 _mesa_set_remove(brw->render_cache, entry); 1057 } 1058 } 1059 1060 void 1061 brw_render_cache_set_add_bo(struct brw_context *brw, drm_intel_bo *bo) 1062 { 1063 _mesa_set_add(brw->render_cache, bo); 1064 } 1065 1066 /** 1067 * Emits an appropriate flush for a BO if it has been rendered to within the 1068 * same batchbuffer as a read that's about to be emitted. 1069 * 1070 * The GPU has separate, incoherent caches for the render cache and the 1071 * sampler cache, along with other caches. Usually data in the different 1072 * caches don't interact (e.g. we don't render to our driver-generated 1073 * immediate constant data), but for render-to-texture in FBOs we definitely 1074 * do. When a batchbuffer is flushed, the kernel will ensure that everything 1075 * necessary is flushed before another use of that BO, but for reuse from 1076 * different caches within a batchbuffer, it's all our responsibility. 1077 */ 1078 void 1079 brw_render_cache_set_check_flush(struct brw_context *brw, drm_intel_bo *bo) 1080 { 1081 if (!_mesa_set_search(brw->render_cache, bo)) 1082 return; 1083 1084 if (brw->gen >= 6) { 1085 brw_emit_pipe_control_flush(brw, 1086 PIPE_CONTROL_DEPTH_CACHE_FLUSH | 1087 PIPE_CONTROL_RENDER_TARGET_FLUSH | 1088 PIPE_CONTROL_CS_STALL); 1089 1090 brw_emit_pipe_control_flush(brw, 1091 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1092 PIPE_CONTROL_CONST_CACHE_INVALIDATE); 1093 } else { 1094 brw_emit_mi_flush(brw); 1095 } 1096 1097 brw_render_cache_set_clear(brw); 1098 } 1099 1100 /** 1101 * Do one-time context initializations related to GL_EXT_framebuffer_object. 1102 * Hook in device driver functions. 1103 */ 1104 void 1105 intel_fbo_init(struct brw_context *brw) 1106 { 1107 struct dd_function_table *dd = &brw->ctx.Driver; 1108 dd->NewRenderbuffer = intel_new_renderbuffer; 1109 dd->MapRenderbuffer = intel_map_renderbuffer; 1110 dd->UnmapRenderbuffer = intel_unmap_renderbuffer; 1111 dd->RenderTexture = intel_render_texture; 1112 dd->ValidateFramebuffer = intel_validate_framebuffer; 1113 if (brw->gen >= 6) 1114 dd->BlitFramebuffer = intel_blit_framebuffer; 1115 else 1116 dd->BlitFramebuffer = gen4_blit_framebuffer; 1117 dd->EGLImageTargetRenderbufferStorage = 1118 intel_image_target_renderbuffer_storage; 1119 1120 brw->render_cache = _mesa_set_create(brw, _mesa_hash_pointer, 1121 _mesa_key_pointer_equal); 1122 } 1123