Home | History | Annotate | Download | only in intel
      1 /**************************************************************************
      2  *
      3  * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     21  * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
     22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 
     29 #include "main/enums.h"
     30 #include "main/imports.h"
     31 #include "main/macros.h"
     32 #include "main/mfeatures.h"
     33 #include "main/mtypes.h"
     34 #include "main/fbobject.h"
     35 #include "main/framebuffer.h"
     36 #include "main/renderbuffer.h"
     37 #include "main/context.h"
     38 #include "main/teximage.h"
     39 #include "main/image.h"
     40 
     41 #include "swrast/swrast.h"
     42 #include "drivers/common/meta.h"
     43 
     44 #include "intel_context.h"
     45 #include "intel_batchbuffer.h"
     46 #include "intel_buffers.h"
     47 #include "intel_blit.h"
     48 #include "intel_fbo.h"
     49 #include "intel_mipmap_tree.h"
     50 #include "intel_regions.h"
     51 #include "intel_tex.h"
     52 #include "intel_span.h"
     53 #ifndef I915
     54 #include "brw_context.h"
     55 #endif
     56 
     57 #define FILE_DEBUG_FLAG DEBUG_FBO
     58 
     59 static struct gl_renderbuffer *
     60 intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
     61 
     62 struct intel_region*
     63 intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
     64 {
     65    struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
     66    if (irb && irb->mt) {
     67       if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
     68 	 return irb->mt->stencil_mt->region;
     69       else
     70 	 return irb->mt->region;
     71    } else
     72       return NULL;
     73 }
     74 
     75 /**
     76  * Create a new framebuffer object.
     77  */
     78 static struct gl_framebuffer *
     79 intel_new_framebuffer(struct gl_context * ctx, GLuint name)
     80 {
     81    /* Only drawable state in intel_framebuffer at this time, just use Mesa's
     82     * class
     83     */
     84    return _mesa_new_framebuffer(ctx, name);
     85 }
     86 
     87 
     88 /** Called by gl_renderbuffer::Delete() */
     89 static void
     90 intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
     91 {
     92    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
     93 
     94    ASSERT(irb);
     95 
     96    intel_miptree_release(&irb->mt);
     97 
     98    _mesa_delete_renderbuffer(ctx, rb);
     99 }
    100 
    101 /**
    102  * \see dd_function_table::MapRenderbuffer
    103  */
    104 static void
    105 intel_map_renderbuffer(struct gl_context *ctx,
    106 		       struct gl_renderbuffer *rb,
    107 		       GLuint x, GLuint y, GLuint w, GLuint h,
    108 		       GLbitfield mode,
    109 		       GLubyte **out_map,
    110 		       GLint *out_stride)
    111 {
    112    struct intel_context *intel = intel_context(ctx);
    113    struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
    114    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
    115    void *map;
    116    int stride;
    117 
    118    if (srb->Buffer) {
    119       /* this is a malloc'd renderbuffer (accum buffer), not an irb */
    120       GLint bpp = _mesa_get_format_bytes(rb->Format);
    121       GLint rowStride = srb->RowStride;
    122       *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
    123       *out_stride = rowStride;
    124       return;
    125    }
    126 
    127    /* We sometimes get called with this by our intel_span.c usage. */
    128    if (!irb->mt) {
    129       *out_map = NULL;
    130       *out_stride = 0;
    131       return;
    132    }
    133 
    134    /* For a window-system renderbuffer, we need to flip the mapping we receive
    135     * upside-down.  So we need to ask for a rectangle on flipped vertically, and
    136     * we then return a pointer to the bottom of it with a negative stride.
    137     */
    138    if (rb->Name == 0) {
    139       y = rb->Height - y - h;
    140    }
    141 
    142    intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
    143 		     x, y, w, h, mode, &map, &stride);
    144 
    145    if (rb->Name == 0) {
    146       map += (h - 1) * stride;
    147       stride = -stride;
    148    }
    149 
    150    DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
    151        __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
    152        x, y, w, h, map, stride);
    153 
    154    *out_map = map;
    155    *out_stride = stride;
    156 }
    157 
    158 /**
    159  * \see dd_function_table::UnmapRenderbuffer
    160  */
    161 static void
    162 intel_unmap_renderbuffer(struct gl_context *ctx,
    163 			 struct gl_renderbuffer *rb)
    164 {
    165    struct intel_context *intel = intel_context(ctx);
    166    struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
    167    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
    168 
    169    DBG("%s: rb %d (%s)\n", __FUNCTION__,
    170        rb->Name, _mesa_get_format_name(rb->Format));
    171 
    172    if (srb->Buffer) {
    173       /* this is a malloc'd renderbuffer (accum buffer) */
    174       /* nothing to do */
    175       return;
    176    }
    177 
    178    intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
    179 }
    180 
    181 
    182 /**
    183  * Round up the requested multisample count to the next supported sample size.
    184  */
    185 unsigned
    186 intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
    187 {
    188    switch (intel->gen) {
    189    case 6:
    190       /* Gen6 supports only 4x multisampling. */
    191       if (num_samples > 0)
    192          return 4;
    193       else
    194          return 0;
    195    case 7:
    196       /* Gen7 supports 4x and 8x multisampling. */
    197       if (num_samples > 4)
    198          return 8;
    199       else if (num_samples > 0)
    200          return 4;
    201       else
    202          return 0;
    203       return 0;
    204    default:
    205       /* MSAA unsupported.  However, a careful reading of
    206        * EXT_framebuffer_multisample reveals that we need to permit
    207        * num_samples to be 1 (since num_samples is permitted to be as high as
    208        * GL_MAX_SAMPLES, and GL_MAX_SAMPLES must be at least 1).  Since
    209        * platforms before Gen6 don't support MSAA, this is safe, because
    210        * multisampling won't happen anyhow.
    211        */
    212       if (num_samples > 0)
    213          return 1;
    214       return 0;
    215    }
    216 }
    217 
    218 
    219 /**
    220  * Called via glRenderbufferStorageEXT() to set the format and allocate
    221  * storage for a user-created renderbuffer.
    222  */
    223 GLboolean
    224 intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
    225                                  GLenum internalFormat,
    226                                  GLuint width, GLuint height)
    227 {
    228    struct intel_context *intel = intel_context(ctx);
    229    struct intel_screen *screen = intel->intelScreen;
    230    struct intel_renderbuffer *irb = intel_renderbuffer(rb);
    231    rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
    232 
    233    switch (internalFormat) {
    234    default:
    235       /* Use the same format-choice logic as for textures.
    236        * Renderbuffers aren't any different from textures for us,
    237        * except they're less useful because you can't texture with
    238        * them.
    239        */
    240       rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
    241 							 internalFormat,
    242 							 GL_NONE, GL_NONE);
    243       break;
    244    case GL_STENCIL_INDEX:
    245    case GL_STENCIL_INDEX1_EXT:
    246    case GL_STENCIL_INDEX4_EXT:
    247    case GL_STENCIL_INDEX8_EXT:
    248    case GL_STENCIL_INDEX16_EXT:
    249       /* These aren't actual texture formats, so force them here. */
    250       if (intel->has_separate_stencil) {
    251 	 rb->Format = MESA_FORMAT_S8;
    252       } else {
    253 	 assert(!intel->must_use_separate_stencil);
    254 	 rb->Format = MESA_FORMAT_S8_Z24;
    255       }
    256       break;
    257    }
    258 
    259    rb->Width = width;
    260    rb->Height = height;
    261    rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
    262 
    263    intel_miptree_release(&irb->mt);
    264 
    265    DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
    266        _mesa_lookup_enum_by_nr(internalFormat),
    267        _mesa_get_format_name(rb->Format), width, height);
    268 
    269    if (width == 0 || height == 0)
    270       return true;
    271 
    272    irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
    273 						   width, height,
    274                                                    rb->NumSamples);
    275    if (!irb->mt)
    276       return false;
    277 
    278    return true;
    279 }
    280 
    281 
    282 #if FEATURE_OES_EGL_image
    283 static void
    284 intel_image_target_renderbuffer_storage(struct gl_context *ctx,
    285 					struct gl_renderbuffer *rb,
    286 					void *image_handle)
    287 {
    288    struct intel_context *intel = intel_context(ctx);
    289    struct intel_renderbuffer *irb;
    290    __DRIscreen *screen;
    291    __DRIimage *image;
    292 
    293    screen = intel->intelScreen->driScrnPriv;
    294    image = screen->dri2.image->lookupEGLImage(screen, image_handle,
    295 					      screen->loaderPrivate);
    296    if (image == NULL)
    297       return;
    298 
    299    /* __DRIimage is opaque to the core so it has to be checked here */
    300    switch (image->format) {
    301    case MESA_FORMAT_RGBA8888_REV:
    302       _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
    303             "glEGLImageTargetRenderbufferStorage(unsupported image format");
    304       return;
    305       break;
    306    default:
    307       break;
    308    }
    309 
    310    irb = intel_renderbuffer(rb);
    311    intel_miptree_release(&irb->mt);
    312    irb->mt = intel_miptree_create_for_region(intel,
    313                                              GL_TEXTURE_2D,
    314                                              image->format,
    315                                              image->region);
    316    if (!irb->mt)
    317       return;
    318 
    319    rb->InternalFormat = image->internal_format;
    320    rb->Width = image->region->width;
    321    rb->Height = image->region->height;
    322    rb->Format = image->format;
    323    rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
    324 					   image->internal_format);
    325 }
    326 #endif
    327 
    328 /**
    329  * Called for each hardware renderbuffer when a _window_ is resized.
    330  * Just update fields.
    331  * Not used for user-created renderbuffers!
    332  */
    333 static GLboolean
    334 intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
    335                            GLenum internalFormat, GLuint width, GLuint height)
    336 {
    337    ASSERT(rb->Name == 0);
    338    rb->Width = width;
    339    rb->Height = height;
    340    rb->InternalFormat = internalFormat;
    341 
    342    return true;
    343 }
    344 
    345 
    346 static void
    347 intel_resize_buffers(struct gl_context *ctx, struct gl_framebuffer *fb,
    348 		     GLuint width, GLuint height)
    349 {
    350    int i;
    351 
    352    _mesa_resize_framebuffer(ctx, fb, width, height);
    353 
    354    fb->Initialized = true; /* XXX remove someday */
    355 
    356    if (_mesa_is_user_fbo(fb)) {
    357       return;
    358    }
    359 
    360 
    361    /* Make sure all window system renderbuffers are up to date */
    362    for (i = BUFFER_FRONT_LEFT; i <= BUFFER_BACK_RIGHT; i++) {
    363       struct gl_renderbuffer *rb = fb->Attachment[i].Renderbuffer;
    364 
    365       /* only resize if size is changing */
    366       if (rb && (rb->Width != width || rb->Height != height)) {
    367 	 rb->AllocStorage(ctx, rb, rb->InternalFormat, width, height);
    368       }
    369    }
    370 }
    371 
    372 
    373 /** Dummy function for gl_renderbuffer::AllocStorage() */
    374 static GLboolean
    375 intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
    376                         GLenum internalFormat, GLuint width, GLuint height)
    377 {
    378    _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
    379    return false;
    380 }
    381 
    382 /**
    383  * Create a new intel_renderbuffer which corresponds to an on-screen window,
    384  * not a user-created renderbuffer.
    385  *
    386  * \param num_samples must be quantized.
    387  */
    388 struct intel_renderbuffer *
    389 intel_create_renderbuffer(gl_format format, unsigned num_samples)
    390 {
    391    struct intel_renderbuffer *irb;
    392    struct gl_renderbuffer *rb;
    393 
    394    GET_CURRENT_CONTEXT(ctx);
    395 
    396    irb = CALLOC_STRUCT(intel_renderbuffer);
    397    if (!irb) {
    398       _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
    399       return NULL;
    400    }
    401 
    402    rb = &irb->Base.Base;
    403 
    404    _mesa_init_renderbuffer(rb, 0);
    405    rb->ClassID = INTEL_RB_CLASS;
    406    rb->_BaseFormat = _mesa_get_format_base_format(format);
    407    rb->Format = format;
    408    rb->InternalFormat = rb->_BaseFormat;
    409    rb->NumSamples = num_samples;
    410 
    411    /* intel-specific methods */
    412    rb->Delete = intel_delete_renderbuffer;
    413    rb->AllocStorage = intel_alloc_window_storage;
    414 
    415    return irb;
    416 }
    417 
    418 /**
    419  * Private window-system buffers (as opposed to ones shared with the display
    420  * server created with intel_create_renderbuffer()) are most similar in their
    421  * handling to user-created renderbuffers, but they have a resize handler that
    422  * may be called at intel_update_renderbuffers() time.
    423  *
    424  * \param num_samples must be quantized.
    425  */
    426 struct intel_renderbuffer *
    427 intel_create_private_renderbuffer(gl_format format, unsigned num_samples)
    428 {
    429    struct intel_renderbuffer *irb;
    430 
    431    irb = intel_create_renderbuffer(format, num_samples);
    432    irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
    433 
    434    return irb;
    435 }
    436 
    437 /**
    438  * Create a new renderbuffer object.
    439  * Typically called via glBindRenderbufferEXT().
    440  */
    441 static struct gl_renderbuffer *
    442 intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
    443 {
    444    /*struct intel_context *intel = intel_context(ctx); */
    445    struct intel_renderbuffer *irb;
    446    struct gl_renderbuffer *rb;
    447 
    448    irb = CALLOC_STRUCT(intel_renderbuffer);
    449    if (!irb) {
    450       _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
    451       return NULL;
    452    }
    453 
    454    rb = &irb->Base.Base;
    455 
    456    _mesa_init_renderbuffer(rb, name);
    457    rb->ClassID = INTEL_RB_CLASS;
    458 
    459    /* intel-specific methods */
    460    rb->Delete = intel_delete_renderbuffer;
    461    rb->AllocStorage = intel_alloc_renderbuffer_storage;
    462    /* span routines set in alloc_storage function */
    463 
    464    return rb;
    465 }
    466 
    467 
    468 /**
    469  * Called via glBindFramebufferEXT().
    470  */
    471 static void
    472 intel_bind_framebuffer(struct gl_context * ctx, GLenum target,
    473                        struct gl_framebuffer *fb, struct gl_framebuffer *fbread)
    474 {
    475    if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
    476       intel_draw_buffer(ctx);
    477    }
    478    else {
    479       /* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
    480    }
    481 }
    482 
    483 
    484 /**
    485  * Called via glFramebufferRenderbufferEXT().
    486  */
    487 static void
    488 intel_framebuffer_renderbuffer(struct gl_context * ctx,
    489                                struct gl_framebuffer *fb,
    490                                GLenum attachment, struct gl_renderbuffer *rb)
    491 {
    492    DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
    493 
    494    _mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
    495    intel_draw_buffer(ctx);
    496 }
    497 
    498 /**
    499  * \par Special case for separate stencil
    500  *
    501  *     When wrapping a depthstencil texture that uses separate stencil, this
    502  *     function is recursively called twice: once to create \c
    503  *     irb->wrapped_depth and again to create \c irb->wrapped_stencil.  On the
    504  *     call to create \c irb->wrapped_depth, the \c format and \c
    505  *     internal_format parameters do not match \c mt->format. In that case, \c
    506  *     mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
    507  *     MESA_FORMAT_X8_Z24.
    508  *
    509  * @return true on success
    510  */
    511 
    512 static bool
    513 intel_renderbuffer_update_wrapper(struct intel_context *intel,
    514                                   struct intel_renderbuffer *irb,
    515 				  struct gl_texture_image *image,
    516                                   uint32_t layer)
    517 {
    518    struct gl_renderbuffer *rb = &irb->Base.Base;
    519    struct intel_texture_image *intel_image = intel_texture_image(image);
    520    struct intel_mipmap_tree *mt = intel_image->mt;
    521    int level = image->Level;
    522 
    523    rb->Format = image->TexFormat;
    524    rb->InternalFormat = image->InternalFormat;
    525    rb->_BaseFormat = image->_BaseFormat;
    526    rb->Width = mt->level[level].width;
    527    rb->Height = mt->level[level].height;
    528 
    529    rb->Delete = intel_delete_renderbuffer;
    530    rb->AllocStorage = intel_nop_alloc_storage;
    531 
    532    intel_miptree_check_level_layer(mt, level, layer);
    533    irb->mt_level = level;
    534    irb->mt_layer = layer;
    535 
    536    intel_miptree_reference(&irb->mt, mt);
    537 
    538    intel_renderbuffer_set_draw_offset(irb);
    539 
    540    if (mt->hiz_mt == NULL &&
    541        intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
    542       intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
    543       if (!mt->hiz_mt)
    544 	 return false;
    545    }
    546 
    547    return true;
    548 }
    549 
    550 void
    551 intel_renderbuffer_set_draw_offset(struct intel_renderbuffer *irb)
    552 {
    553    unsigned int dst_x, dst_y;
    554 
    555    /* compute offset of the particular 2D image within the texture region */
    556    intel_miptree_get_image_offset(irb->mt,
    557 				  irb->mt_level,
    558 				  0, /* face, which we ignore */
    559 				  irb->mt_layer,
    560 				  &dst_x, &dst_y);
    561 
    562    irb->draw_x = dst_x;
    563    irb->draw_y = dst_y;
    564 }
    565 
    566 /**
    567  * Rendering to tiled buffers requires that the base address of the
    568  * buffer be aligned to a page boundary.  We generally render to
    569  * textures by pointing the surface at the mipmap image level, which
    570  * may not be aligned to a tile boundary.
    571  *
    572  * This function returns an appropriately-aligned base offset
    573  * according to the tiling restrictions, plus any required x/y offset
    574  * from there.
    575  */
    576 uint32_t
    577 intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
    578 				uint32_t *tile_x,
    579 				uint32_t *tile_y)
    580 {
    581    struct intel_region *region = irb->mt->region;
    582    uint32_t mask_x, mask_y;
    583 
    584    intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
    585 
    586    *tile_x = irb->draw_x & mask_x;
    587    *tile_y = irb->draw_y & mask_y;
    588    return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
    589                                           irb->draw_y & ~mask_y, false);
    590 }
    591 
    592 /**
    593  * Called by glFramebufferTexture[123]DEXT() (and other places) to
    594  * prepare for rendering into texture memory.  This might be called
    595  * many times to choose different texture levels, cube faces, etc
    596  * before intel_finish_render_texture() is ever called.
    597  */
    598 static void
    599 intel_render_texture(struct gl_context * ctx,
    600                      struct gl_framebuffer *fb,
    601                      struct gl_renderbuffer_attachment *att)
    602 {
    603    struct intel_context *intel = intel_context(ctx);
    604    struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
    605    struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
    606    struct intel_texture_image *intel_image = intel_texture_image(image);
    607    struct intel_mipmap_tree *mt = intel_image->mt;
    608    int layer;
    609 
    610    (void) fb;
    611 
    612    if (att->CubeMapFace > 0) {
    613       assert(att->Zoffset == 0);
    614       layer = att->CubeMapFace;
    615    } else {
    616       layer = att->Zoffset;
    617    }
    618 
    619    if (!intel_image->mt) {
    620       /* Fallback on drawing to a texture that doesn't have a miptree
    621        * (has a border, width/height 0, etc.)
    622        */
    623       _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
    624       _swrast_render_texture(ctx, fb, att);
    625       return;
    626    }
    627    else if (!irb) {
    628       intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
    629 
    630       irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
    631 
    632       if (irb) {
    633          /* bind the wrapper to the attachment point */
    634          _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
    635       }
    636       else {
    637          /* fallback to software rendering */
    638          _swrast_render_texture(ctx, fb, att);
    639          return;
    640       }
    641    }
    642 
    643    if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
    644        _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
    645        _swrast_render_texture(ctx, fb, att);
    646        return;
    647    }
    648 
    649    irb->tex_image = image;
    650 
    651    DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
    652        _mesa_get_format_name(image->TexFormat),
    653        att->Texture->Name, image->Width, image->Height,
    654        irb->Base.Base.RefCount);
    655 
    656    /* update drawing region, etc */
    657    intel_draw_buffer(ctx);
    658 }
    659 
    660 
    661 /**
    662  * Called by Mesa when rendering to a texture is done.
    663  */
    664 static void
    665 intel_finish_render_texture(struct gl_context * ctx,
    666                             struct gl_renderbuffer_attachment *att)
    667 {
    668    struct intel_context *intel = intel_context(ctx);
    669    struct gl_texture_object *tex_obj = att->Texture;
    670    struct gl_texture_image *image =
    671       tex_obj->Image[att->CubeMapFace][att->TextureLevel];
    672    struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
    673 
    674    DBG("Finish render %s texture tex=%u\n",
    675        _mesa_get_format_name(image->TexFormat), att->Texture->Name);
    676 
    677    if (irb)
    678       irb->tex_image = NULL;
    679 
    680    /* Since we've (probably) rendered to the texture and will (likely) use
    681     * it in the texture domain later on in this batchbuffer, flush the
    682     * batch.  Once again, we wish for a domain tracker in libdrm to cover
    683     * usage inside of a batchbuffer like GEM does in the kernel.
    684     */
    685    intel_batchbuffer_emit_mi_flush(intel);
    686 }
    687 
    688 /**
    689  * Do additional "completeness" testing of a framebuffer object.
    690  */
    691 static void
    692 intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
    693 {
    694    struct intel_context *intel = intel_context(ctx);
    695    const struct intel_renderbuffer *depthRb =
    696       intel_get_renderbuffer(fb, BUFFER_DEPTH);
    697    const struct intel_renderbuffer *stencilRb =
    698       intel_get_renderbuffer(fb, BUFFER_STENCIL);
    699    struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
    700    int i;
    701 
    702    DBG("%s() on fb %p (%s)\n", __FUNCTION__,
    703        fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
    704 	    (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
    705 
    706    if (depthRb)
    707       depth_mt = depthRb->mt;
    708    if (stencilRb) {
    709       stencil_mt = stencilRb->mt;
    710       if (stencil_mt->stencil_mt)
    711 	 stencil_mt = stencil_mt->stencil_mt;
    712    }
    713 
    714    if (depth_mt && stencil_mt) {
    715       if (depth_mt == stencil_mt) {
    716 	 /* For true packed depth/stencil (not faked on prefers-separate-stencil
    717 	  * hardware) we need to be sure they're the same level/layer, since
    718 	  * we'll be emitting a single packet describing the packed setup.
    719 	  */
    720 	 if (depthRb->mt_level != stencilRb->mt_level ||
    721 	     depthRb->mt_layer != stencilRb->mt_layer) {
    722 	    DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
    723 		depthRb->mt_level,
    724 		depthRb->mt_layer,
    725 		stencilRb->mt_level,
    726 		stencilRb->mt_layer);
    727 	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
    728 	 }
    729       } else {
    730 	 if (!intel->has_separate_stencil) {
    731 	    DBG("separate stencil unsupported\n");
    732 	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
    733 	 }
    734 	 if (stencil_mt->format != MESA_FORMAT_S8) {
    735 	    DBG("separate stencil is %s instead of S8\n",
    736 		_mesa_get_format_name(stencil_mt->format));
    737 	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
    738 	 }
    739 	 if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
    740 	    /* Before Gen7, separate depth and stencil buffers can be used
    741 	     * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
    742 	     * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
    743 	     *     [DevSNB]: This field must be set to the same value (enabled
    744 	     *     or disabled) as Hierarchical Depth Buffer Enable.
    745 	     */
    746 	    DBG("separate stencil without HiZ\n");
    747 	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
    748 	 }
    749       }
    750    }
    751 
    752    for (i = 0; i < Elements(fb->Attachment); i++) {
    753       struct gl_renderbuffer *rb;
    754       struct intel_renderbuffer *irb;
    755 
    756       if (fb->Attachment[i].Type == GL_NONE)
    757 	 continue;
    758 
    759       /* A supported attachment will have a Renderbuffer set either
    760        * from being a Renderbuffer or being a texture that got the
    761        * intel_wrap_texture() treatment.
    762        */
    763       rb = fb->Attachment[i].Renderbuffer;
    764       if (rb == NULL) {
    765 	 DBG("attachment without renderbuffer\n");
    766 	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
    767 	 continue;
    768       }
    769 
    770       if (fb->Attachment[i].Type == GL_TEXTURE) {
    771 	 const struct gl_texture_image *img =
    772 	    _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
    773 
    774 	 if (img->Border) {
    775 	    DBG("texture with border\n");
    776 	    fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
    777 	    continue;
    778 	 }
    779       }
    780 
    781       irb = intel_renderbuffer(rb);
    782       if (irb == NULL) {
    783 	 DBG("software rendering renderbuffer\n");
    784 	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
    785 	 continue;
    786       }
    787 
    788       if (!intel->vtbl.render_target_supported(intel, rb)) {
    789 	 DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
    790 	     _mesa_get_format_name(intel_rb_format(irb)));
    791 	 fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
    792       }
    793    }
    794 }
    795 
    796 /**
    797  * Try to do a glBlitFramebuffer using glCopyTexSubImage2D
    798  * We can do this when the dst renderbuffer is actually a texture and
    799  * there is no scaling, mirroring or scissoring.
    800  *
    801  * \return new buffer mask indicating the buffers left to blit using the
    802  *         normal path.
    803  */
    804 static GLbitfield
    805 intel_blit_framebuffer_copy_tex_sub_image(struct gl_context *ctx,
    806                                           GLint srcX0, GLint srcY0,
    807                                           GLint srcX1, GLint srcY1,
    808                                           GLint dstX0, GLint dstY0,
    809                                           GLint dstX1, GLint dstY1,
    810                                           GLbitfield mask, GLenum filter)
    811 {
    812    if (mask & GL_COLOR_BUFFER_BIT) {
    813       const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
    814       const struct gl_framebuffer *readFb = ctx->ReadBuffer;
    815       const struct gl_renderbuffer_attachment *drawAtt =
    816          &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
    817       struct intel_renderbuffer *srcRb =
    818          intel_renderbuffer(readFb->_ColorReadBuffer);
    819 
    820       /* If the source and destination are the same size with no
    821          mirroring, the rectangles are within the size of the
    822          texture and there is no scissor then we can use
    823          glCopyTexSubimage2D to implement the blit. This will end
    824          up as a fast hardware blit on some drivers */
    825       if (srcRb && drawAtt && drawAtt->Texture &&
    826           srcX0 - srcX1 == dstX0 - dstX1 &&
    827           srcY0 - srcY1 == dstY0 - dstY1 &&
    828           srcX1 >= srcX0 &&
    829           srcY1 >= srcY0 &&
    830           srcX0 >= 0 && srcX1 <= readFb->Width &&
    831           srcY0 >= 0 && srcY1 <= readFb->Height &&
    832           dstX0 >= 0 && dstX1 <= drawFb->Width &&
    833           dstY0 >= 0 && dstY1 <= drawFb->Height &&
    834           !ctx->Scissor.Enabled) {
    835          const struct gl_texture_object *texObj = drawAtt->Texture;
    836          const GLuint dstLevel = drawAtt->TextureLevel;
    837          const GLenum target = texObj->Target;
    838 
    839          struct gl_texture_image *texImage =
    840             _mesa_select_tex_image(ctx, texObj, target, dstLevel);
    841 
    842          if (intel_copy_texsubimage(intel_context(ctx),
    843                                     intel_texture_image(texImage),
    844                                     dstX0, dstY0,
    845                                     srcRb,
    846                                     srcX0, srcY0,
    847                                     srcX1 - srcX0, /* width */
    848                                     srcY1 - srcY0))
    849             mask &= ~GL_COLOR_BUFFER_BIT;
    850       }
    851    }
    852 
    853    return mask;
    854 }
    855 
    856 static void
    857 intel_blit_framebuffer(struct gl_context *ctx,
    858                        GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
    859                        GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
    860                        GLbitfield mask, GLenum filter)
    861 {
    862    /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
    863    mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
    864                                                     srcX0, srcY0, srcX1, srcY1,
    865                                                     dstX0, dstY0, dstX1, dstY1,
    866                                                     mask, filter);
    867    if (mask == 0x0)
    868       return;
    869 
    870 #ifndef I915
    871    mask = brw_blorp_framebuffer(intel_context(ctx),
    872                                 srcX0, srcY0, srcX1, srcY1,
    873                                 dstX0, dstY0, dstX1, dstY1,
    874                                 mask, filter);
    875    if (mask == 0x0)
    876       return;
    877 #endif
    878 
    879    _mesa_meta_BlitFramebuffer(ctx,
    880                               srcX0, srcY0, srcX1, srcY1,
    881                               dstX0, dstY0, dstX1, dstY1,
    882                               mask, filter);
    883 }
    884 
    885 /**
    886  * This is a no-op except on multisample buffers shared with DRI2.
    887  */
    888 void
    889 intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
    890 {
    891    if (irb->mt && irb->mt->singlesample_mt)
    892       irb->mt->need_downsample = true;
    893 }
    894 
    895 void
    896 intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
    897 {
    898    if (irb->mt) {
    899       intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
    900                                                 irb->mt_level,
    901                                                 irb->mt_layer);
    902    }
    903 }
    904 
    905 void
    906 intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
    907 {
    908    if (irb->mt) {
    909       intel_miptree_slice_set_needs_depth_resolve(irb->mt,
    910                                                   irb->mt_level,
    911                                                   irb->mt_layer);
    912    }
    913 }
    914 
    915 bool
    916 intel_renderbuffer_resolve_hiz(struct intel_context *intel,
    917 			       struct intel_renderbuffer *irb)
    918 {
    919    if (irb->mt)
    920       return intel_miptree_slice_resolve_hiz(intel,
    921                                              irb->mt,
    922                                              irb->mt_level,
    923                                              irb->mt_layer);
    924 
    925    return false;
    926 }
    927 
    928 bool
    929 intel_renderbuffer_resolve_depth(struct intel_context *intel,
    930 				 struct intel_renderbuffer *irb)
    931 {
    932    if (irb->mt)
    933       return intel_miptree_slice_resolve_depth(intel,
    934                                                irb->mt,
    935                                                irb->mt_level,
    936                                                irb->mt_layer);
    937 
    938    return false;
    939 }
    940 
    941 /**
    942  * Do one-time context initializations related to GL_EXT_framebuffer_object.
    943  * Hook in device driver functions.
    944  */
    945 void
    946 intel_fbo_init(struct intel_context *intel)
    947 {
    948    intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
    949    intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
    950    intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
    951    intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
    952    intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
    953    intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
    954    intel->ctx.Driver.RenderTexture = intel_render_texture;
    955    intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
    956    intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
    957    intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
    958    intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
    959 
    960 #if FEATURE_OES_EGL_image
    961    intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
    962       intel_image_target_renderbuffer_storage;
    963 #endif
    964 }
    965