Home | History | Annotate | Download | only in i915
      1 /**************************************************************************
      2  *
      3  * Copyright 2006 VMware, Inc.
      4  * All Rights Reserved.
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the
      8  * "Software"), to deal in the Software without restriction, including
      9  * without limitation the rights to use, copy, modify, merge, publish,
     10  * distribute, sub license, and/or sell copies of the Software, and to
     11  * permit persons to whom the Software is furnished to do so, subject to
     12  * the following conditions:
     13  *
     14  * The above copyright notice and this permission notice (including the
     15  * next paragraph) shall be included in all copies or substantial portions
     16  * of the Software.
     17  *
     18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
     19  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     20  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
     21  * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
     22  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
     23  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
     24  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     25  *
     26  **************************************************************************/
     27 
     28 #include <GL/gl.h>
     29 #include <GL/internal/dri_interface.h>
     30 
     31 #include "intel_batchbuffer.h"
     32 #include "intel_chipset.h"
     33 #include "intel_context.h"
     34 #include "intel_mipmap_tree.h"
     35 #include "intel_regions.h"
     36 #include "intel_tex_layout.h"
     37 #include "intel_tex.h"
     38 #include "intel_blit.h"
     39 
     40 #include "main/enums.h"
     41 #include "main/formats.h"
     42 #include "main/glformats.h"
     43 #include "main/teximage.h"
     44 
     45 #define FILE_DEBUG_FLAG DEBUG_MIPTREE
     46 
     47 static GLenum
     48 target_to_target(GLenum target)
     49 {
     50    switch (target) {
     51    case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
     52    case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
     53    case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
     54    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
     55    case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
     56    case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
     57       return GL_TEXTURE_CUBE_MAP_ARB;
     58    default:
     59       return target;
     60    }
     61 }
     62 
     63 struct intel_mipmap_tree *
     64 intel_miptree_create_layout(struct intel_context *intel,
     65                             GLenum target,
     66                             mesa_format format,
     67                             GLuint first_level,
     68                             GLuint last_level,
     69                             GLuint width0,
     70                             GLuint height0,
     71                             GLuint depth0)
     72 {
     73    struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
     74    if (!mt)
     75       return NULL;
     76 
     77    DBG("%s target %s format %s level %d..%d <-- %p\n", __func__,
     78        _mesa_enum_to_string(target),
     79        _mesa_get_format_name(format),
     80        first_level, last_level, mt);
     81 
     82    mt->target = target_to_target(target);
     83    mt->format = format;
     84    mt->first_level = first_level;
     85    mt->last_level = last_level;
     86    mt->logical_width0 = width0;
     87    mt->logical_height0 = height0;
     88    mt->logical_depth0 = depth0;
     89 
     90    /* The cpp is bytes per (1, blockheight)-sized block for compressed
     91     * textures.  This is why you'll see divides by blockheight all over
     92     */
     93    unsigned bw, bh;
     94    _mesa_get_format_block_size(format, &bw, &bh);
     95    assert(_mesa_get_format_bytes(mt->format) % bw == 0);
     96    mt->cpp = _mesa_get_format_bytes(mt->format) / bw;
     97 
     98    mt->compressed = _mesa_is_format_compressed(format);
     99    mt->refcount = 1;
    100 
    101    if (target == GL_TEXTURE_CUBE_MAP) {
    102       assert(depth0 == 1);
    103       depth0 = 6;
    104    }
    105 
    106    mt->physical_width0 = width0;
    107    mt->physical_height0 = height0;
    108    mt->physical_depth0 = depth0;
    109 
    110    intel_get_texture_alignment_unit(intel, mt->format,
    111 				    &mt->align_w, &mt->align_h);
    112 
    113    (void) intel;
    114    if (intel->is_945)
    115       i945_miptree_layout(mt);
    116    else
    117       i915_miptree_layout(mt);
    118 
    119    return mt;
    120 }
    121 
    122 /**
    123  * \brief Helper function for intel_miptree_create().
    124  */
    125 static uint32_t
    126 intel_miptree_choose_tiling(struct intel_context *intel,
    127                             mesa_format format,
    128                             uint32_t width0,
    129                             enum intel_miptree_tiling_mode requested,
    130                             struct intel_mipmap_tree *mt)
    131 {
    132    /* Some usages may want only one type of tiling, like depth miptrees (Y
    133     * tiled), or temporary BOs for uploading data once (linear).
    134     */
    135    switch (requested) {
    136    case INTEL_MIPTREE_TILING_ANY:
    137       break;
    138    case INTEL_MIPTREE_TILING_Y:
    139       return I915_TILING_Y;
    140    case INTEL_MIPTREE_TILING_NONE:
    141       return I915_TILING_NONE;
    142    }
    143 
    144    int minimum_pitch = mt->total_width * mt->cpp;
    145 
    146    /* If the width is much smaller than a tile, don't bother tiling. */
    147    if (minimum_pitch < 64)
    148       return I915_TILING_NONE;
    149 
    150    if (ALIGN(minimum_pitch, 512) >= 32768) {
    151       perf_debug("%dx%d miptree too large to blit, falling back to untiled",
    152                  mt->total_width, mt->total_height);
    153       return I915_TILING_NONE;
    154    }
    155 
    156    /* We don't have BLORP to handle Y-tiled blits, so use X-tiling. */
    157    return I915_TILING_X;
    158 }
    159 
    160 struct intel_mipmap_tree *
    161 intel_miptree_create(struct intel_context *intel,
    162 		     GLenum target,
    163 		     mesa_format format,
    164 		     GLuint first_level,
    165 		     GLuint last_level,
    166 		     GLuint width0,
    167 		     GLuint height0,
    168 		     GLuint depth0,
    169 		     bool expect_accelerated_upload,
    170                      enum intel_miptree_tiling_mode requested_tiling)
    171 {
    172    struct intel_mipmap_tree *mt;
    173    GLuint total_width, total_height;
    174 
    175 
    176    mt = intel_miptree_create_layout(intel, target, format,
    177 				      first_level, last_level, width0,
    178 				      height0, depth0);
    179    /*
    180     * pitch == 0 || height == 0  indicates the null texture
    181     */
    182    if (!mt || !mt->total_width || !mt->total_height) {
    183       intel_miptree_release(&mt);
    184       return NULL;
    185    }
    186 
    187    total_width = mt->total_width;
    188    total_height = mt->total_height;
    189 
    190    uint32_t tiling = intel_miptree_choose_tiling(intel, format, width0,
    191                                                  requested_tiling,
    192                                                  mt);
    193    bool y_or_x = tiling == (I915_TILING_Y | I915_TILING_X);
    194 
    195    mt->region = intel_region_alloc(intel->intelScreen,
    196 				   y_or_x ? I915_TILING_Y : tiling,
    197 				   mt->cpp,
    198 				   total_width,
    199 				   total_height,
    200 				   expect_accelerated_upload);
    201 
    202    /* If the region is too large to fit in the aperture, we need to use the
    203     * BLT engine to support it.  The BLT paths can't currently handle Y-tiling,
    204     * so we need to fall back to X.
    205     */
    206    if (y_or_x && mt->region->bo->size >= intel->max_gtt_map_object_size) {
    207       perf_debug("%dx%d miptree larger than aperture; falling back to X-tiled\n",
    208                  mt->total_width, mt->total_height);
    209       intel_region_release(&mt->region);
    210 
    211       mt->region = intel_region_alloc(intel->intelScreen,
    212                                       I915_TILING_X,
    213                                       mt->cpp,
    214                                       total_width,
    215                                       total_height,
    216                                       expect_accelerated_upload);
    217    }
    218 
    219    mt->offset = 0;
    220 
    221    if (!mt->region) {
    222        intel_miptree_release(&mt);
    223        return NULL;
    224    }
    225 
    226    return mt;
    227 }
    228 
    229 struct intel_mipmap_tree *
    230 intel_miptree_create_for_bo(struct intel_context *intel,
    231                             drm_intel_bo *bo,
    232                             mesa_format format,
    233                             uint32_t offset,
    234                             uint32_t width,
    235                             uint32_t height,
    236                             int pitch,
    237                             uint32_t tiling)
    238 {
    239    struct intel_mipmap_tree *mt;
    240 
    241    struct intel_region *region = calloc(1, sizeof(*region));
    242    if (!region)
    243       return NULL;
    244 
    245    /* Nothing will be able to use this miptree with the BO if the offset isn't
    246     * aligned.
    247     */
    248    if (tiling != I915_TILING_NONE)
    249       assert(offset % 4096 == 0);
    250 
    251    /* miptrees can't handle negative pitch.  If you need flipping of images,
    252     * that's outside of the scope of the mt.
    253     */
    254    assert(pitch >= 0);
    255 
    256    mt = intel_miptree_create_layout(intel, GL_TEXTURE_2D, format,
    257                                     0, 0,
    258                                     width, height, 1);
    259    if (!mt) {
    260       free(region);
    261       return mt;
    262    }
    263 
    264    region->cpp = mt->cpp;
    265    region->width = width;
    266    region->height = height;
    267    region->pitch = pitch;
    268    region->refcount = 1;
    269    drm_intel_bo_reference(bo);
    270    region->bo = bo;
    271    region->tiling = tiling;
    272 
    273    mt->region = region;
    274    mt->offset = offset;
    275 
    276    return mt;
    277 }
    278 
    279 
    280 /**
    281  * For a singlesample DRI2 buffer, this simply wraps the given region with a miptree.
    282  *
    283  * For a multisample DRI2 buffer, this wraps the given region with
    284  * a singlesample miptree, then creates a multisample miptree into which the
    285  * singlesample miptree is embedded as a child.
    286  */
    287 struct intel_mipmap_tree*
    288 intel_miptree_create_for_dri2_buffer(struct intel_context *intel,
    289                                      unsigned dri_attachment,
    290                                      mesa_format format,
    291                                      struct intel_region *region)
    292 {
    293    struct intel_mipmap_tree *mt = NULL;
    294 
    295    /* Only the front and back buffers, which are color buffers, are shared
    296     * through DRI2.
    297     */
    298    assert(dri_attachment == __DRI_BUFFER_BACK_LEFT ||
    299           dri_attachment == __DRI_BUFFER_FRONT_LEFT ||
    300           dri_attachment == __DRI_BUFFER_FAKE_FRONT_LEFT);
    301    assert(_mesa_get_format_base_format(format) == GL_RGB ||
    302           _mesa_get_format_base_format(format) == GL_RGBA);
    303 
    304    mt = intel_miptree_create_for_bo(intel,
    305                                     region->bo,
    306                                     format,
    307                                     0,
    308                                     region->width,
    309                                     region->height,
    310                                     region->pitch,
    311                                     region->tiling);
    312    if (!mt)
    313       return NULL;
    314    mt->region->name = region->name;
    315 
    316    return mt;
    317 }
    318 
    319 /**
    320  * For a singlesample image buffer, this simply wraps the given region with a miptree.
    321  *
    322  * For a multisample image buffer, this wraps the given region with
    323  * a singlesample miptree, then creates a multisample miptree into which the
    324  * singlesample miptree is embedded as a child.
    325  */
    326 struct intel_mipmap_tree*
    327 intel_miptree_create_for_image_buffer(struct intel_context *intel,
    328                                       enum __DRIimageBufferMask buffer_type,
    329                                       mesa_format format,
    330                                       uint32_t num_samples,
    331                                       struct intel_region *region)
    332 {
    333    struct intel_mipmap_tree *mt = NULL;
    334 
    335    /* Only the front and back buffers, which are color buffers, are allocated
    336     * through the image loader.
    337     */
    338    assert(_mesa_get_format_base_format(format) == GL_RGB ||
    339           _mesa_get_format_base_format(format) == GL_RGBA);
    340 
    341    mt = intel_miptree_create_for_bo(intel,
    342                                     region->bo,
    343                                     format,
    344                                     0,
    345                                     region->width,
    346                                     region->height,
    347                                     region->pitch,
    348                                     region->tiling);
    349    return mt;
    350 }
    351 
    352 struct intel_mipmap_tree*
    353 intel_miptree_create_for_renderbuffer(struct intel_context *intel,
    354                                       mesa_format format,
    355                                       uint32_t width,
    356                                       uint32_t height)
    357 {
    358    uint32_t depth = 1;
    359 
    360    return intel_miptree_create(intel, GL_TEXTURE_2D, format, 0, 0,
    361                                width, height, depth, true,
    362                                INTEL_MIPTREE_TILING_ANY);
    363 }
    364 
    365 void
    366 intel_miptree_reference(struct intel_mipmap_tree **dst,
    367                         struct intel_mipmap_tree *src)
    368 {
    369    if (*dst == src)
    370       return;
    371 
    372    intel_miptree_release(dst);
    373 
    374    if (src) {
    375       src->refcount++;
    376       DBG("%s %p refcount now %d\n", __func__, src, src->refcount);
    377    }
    378 
    379    *dst = src;
    380 }
    381 
    382 
    383 void
    384 intel_miptree_release(struct intel_mipmap_tree **mt)
    385 {
    386    if (!*mt)
    387       return;
    388 
    389    DBG("%s %p refcount will be %d\n", __func__, *mt, (*mt)->refcount - 1);
    390    if (--(*mt)->refcount <= 0) {
    391       GLuint i;
    392 
    393       DBG("%s deleting %p\n", __func__, *mt);
    394 
    395       intel_region_release(&((*mt)->region));
    396 
    397       for (i = 0; i < MAX_TEXTURE_LEVELS; i++) {
    398 	 free((*mt)->level[i].slice);
    399       }
    400 
    401       free(*mt);
    402    }
    403    *mt = NULL;
    404 }
    405 
    406 void
    407 intel_miptree_get_dimensions_for_image(struct gl_texture_image *image,
    408                                        int *width, int *height, int *depth)
    409 {
    410    switch (image->TexObject->Target) {
    411    case GL_TEXTURE_1D_ARRAY:
    412       *width = image->Width;
    413       *height = 1;
    414       *depth = image->Height;
    415       break;
    416    default:
    417       *width = image->Width;
    418       *height = image->Height;
    419       *depth = image->Depth;
    420       break;
    421    }
    422 }
    423 
    424 /**
    425  * Can the image be pulled into a unified mipmap tree?  This mirrors
    426  * the completeness test in a lot of ways.
    427  *
    428  * Not sure whether I want to pass gl_texture_image here.
    429  */
    430 bool
    431 intel_miptree_match_image(struct intel_mipmap_tree *mt,
    432                           struct gl_texture_image *image)
    433 {
    434    struct intel_texture_image *intelImage = intel_texture_image(image);
    435    GLuint level = intelImage->base.Base.Level;
    436    int width, height, depth;
    437 
    438    /* glTexImage* choose the texture object based on the target passed in, and
    439     * objects can't change targets over their lifetimes, so this should be
    440     * true.
    441     */
    442    assert(target_to_target(image->TexObject->Target) == mt->target);
    443 
    444    mesa_format mt_format = mt->format;
    445 
    446    if (image->TexFormat != mt_format)
    447       return false;
    448 
    449    intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
    450 
    451    if (mt->target == GL_TEXTURE_CUBE_MAP)
    452       depth = 6;
    453 
    454    /* Test image dimensions against the base level image adjusted for
    455     * minification.  This will also catch images not present in the
    456     * tree, changed targets, etc.
    457     */
    458    if (mt->target == GL_TEXTURE_2D_MULTISAMPLE ||
    459          mt->target == GL_TEXTURE_2D_MULTISAMPLE_ARRAY) {
    460       /* nonzero level here is always bogus */
    461       assert(level == 0);
    462 
    463       if (width != mt->logical_width0 ||
    464             height != mt->logical_height0 ||
    465             depth != mt->logical_depth0) {
    466          return false;
    467       }
    468    }
    469    else {
    470       /* all normal textures, renderbuffers, etc */
    471       if (width != mt->level[level].width ||
    472           height != mt->level[level].height ||
    473           depth != mt->level[level].depth) {
    474          return false;
    475       }
    476    }
    477 
    478    return true;
    479 }
    480 
    481 
    482 void
    483 intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
    484 			     GLuint level,
    485 			     GLuint x, GLuint y,
    486 			     GLuint w, GLuint h, GLuint d)
    487 {
    488    mt->level[level].width = w;
    489    mt->level[level].height = h;
    490    mt->level[level].depth = d;
    491    mt->level[level].level_x = x;
    492    mt->level[level].level_y = y;
    493 
    494    DBG("%s level %d size: %d,%d,%d offset %d,%d\n", __func__,
    495        level, w, h, d, x, y);
    496 
    497    assert(mt->level[level].slice == NULL);
    498 
    499    mt->level[level].slice = calloc(d, sizeof(*mt->level[0].slice));
    500    mt->level[level].slice[0].x_offset = mt->level[level].level_x;
    501    mt->level[level].slice[0].y_offset = mt->level[level].level_y;
    502 }
    503 
    504 
    505 void
    506 intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
    507 			       GLuint level, GLuint img,
    508 			       GLuint x, GLuint y)
    509 {
    510    if (img == 0 && level == 0)
    511       assert(x == 0 && y == 0);
    512 
    513    assert(img < mt->level[level].depth);
    514 
    515    mt->level[level].slice[img].x_offset = mt->level[level].level_x + x;
    516    mt->level[level].slice[img].y_offset = mt->level[level].level_y + y;
    517 
    518    DBG("%s level %d img %d pos %d,%d\n",
    519        __func__, level, img,
    520        mt->level[level].slice[img].x_offset,
    521        mt->level[level].slice[img].y_offset);
    522 }
    523 
    524 void
    525 intel_miptree_get_image_offset(struct intel_mipmap_tree *mt,
    526 			       GLuint level, GLuint slice,
    527 			       GLuint *x, GLuint *y)
    528 {
    529    assert(slice < mt->level[level].depth);
    530 
    531    *x = mt->level[level].slice[slice].x_offset;
    532    *y = mt->level[level].slice[slice].y_offset;
    533 }
    534 
    535 /**
    536  * Rendering with tiled buffers requires that the base address of the buffer
    537  * be aligned to a page boundary.  For renderbuffers, and sometimes with
    538  * textures, we may want the surface to point at a texture image level that
    539  * isn't at a page boundary.
    540  *
    541  * This function returns an appropriately-aligned base offset
    542  * according to the tiling restrictions, plus any required x/y offset
    543  * from there.
    544  */
    545 uint32_t
    546 intel_miptree_get_tile_offsets(struct intel_mipmap_tree *mt,
    547                                GLuint level, GLuint slice,
    548                                uint32_t *tile_x,
    549                                uint32_t *tile_y)
    550 {
    551    struct intel_region *region = mt->region;
    552    uint32_t x, y;
    553    uint32_t mask_x, mask_y;
    554 
    555    intel_region_get_tile_masks(region, &mask_x, &mask_y, false);
    556    intel_miptree_get_image_offset(mt, level, slice, &x, &y);
    557 
    558    *tile_x = x & mask_x;
    559    *tile_y = y & mask_y;
    560 
    561    return intel_region_get_aligned_offset(region, x & ~mask_x, y & ~mask_y,
    562                                           false);
    563 }
    564 
    565 static void
    566 intel_miptree_copy_slice_sw(struct intel_context *intel,
    567                             struct intel_mipmap_tree *dst_mt,
    568                             struct intel_mipmap_tree *src_mt,
    569                             int level,
    570                             int slice,
    571                             int width,
    572                             int height)
    573 {
    574    void *src, *dst;
    575    int src_stride, dst_stride;
    576    int cpp = dst_mt->cpp;
    577 
    578    intel_miptree_map(intel, src_mt,
    579                      level, slice,
    580                      0, 0,
    581                      width, height,
    582                      GL_MAP_READ_BIT,
    583                      &src, &src_stride);
    584 
    585    intel_miptree_map(intel, dst_mt,
    586                      level, slice,
    587                      0, 0,
    588                      width, height,
    589                      GL_MAP_WRITE_BIT | GL_MAP_INVALIDATE_RANGE_BIT,
    590                      &dst, &dst_stride);
    591 
    592    DBG("sw blit %s mt %p %p/%d -> %s mt %p %p/%d (%dx%d)\n",
    593        _mesa_get_format_name(src_mt->format),
    594        src_mt, src, src_stride,
    595        _mesa_get_format_name(dst_mt->format),
    596        dst_mt, dst, dst_stride,
    597        width, height);
    598 
    599    int row_size = cpp * width;
    600    if (src_stride == row_size &&
    601        dst_stride == row_size) {
    602       memcpy(dst, src, row_size * height);
    603    } else {
    604       for (int i = 0; i < height; i++) {
    605          memcpy(dst, src, row_size);
    606          dst += dst_stride;
    607          src += src_stride;
    608       }
    609    }
    610 
    611    intel_miptree_unmap(intel, dst_mt, level, slice);
    612    intel_miptree_unmap(intel, src_mt, level, slice);
    613 }
    614 
    615 static void
    616 intel_miptree_copy_slice(struct intel_context *intel,
    617 			 struct intel_mipmap_tree *dst_mt,
    618 			 struct intel_mipmap_tree *src_mt,
    619 			 int level,
    620 			 int face,
    621 			 int depth)
    622 
    623 {
    624    mesa_format format = src_mt->format;
    625    uint32_t width = src_mt->level[level].width;
    626    uint32_t height = src_mt->level[level].height;
    627    int slice;
    628 
    629    if (face > 0)
    630       slice = face;
    631    else
    632       slice = depth;
    633 
    634    assert(depth < src_mt->level[level].depth);
    635    assert(src_mt->format == dst_mt->format);
    636 
    637    if (dst_mt->compressed) {
    638       height = ALIGN(height, dst_mt->align_h) / dst_mt->align_h;
    639       width = ALIGN(width, dst_mt->align_w);
    640    }
    641 
    642    uint32_t dst_x, dst_y, src_x, src_y;
    643    intel_miptree_get_image_offset(dst_mt, level, slice, &dst_x, &dst_y);
    644    intel_miptree_get_image_offset(src_mt, level, slice, &src_x, &src_y);
    645 
    646    DBG("validate blit mt %s %p %d,%d/%d -> mt %s %p %d,%d/%d (%dx%d)\n",
    647        _mesa_get_format_name(src_mt->format),
    648        src_mt, src_x, src_y, src_mt->region->pitch,
    649        _mesa_get_format_name(dst_mt->format),
    650        dst_mt, dst_x, dst_y, dst_mt->region->pitch,
    651        width, height);
    652 
    653    if (!intel_miptree_blit(intel,
    654                            src_mt, level, slice, 0, 0, false,
    655                            dst_mt, level, slice, 0, 0, false,
    656                            width, height, GL_COPY)) {
    657       perf_debug("miptree validate blit for %s failed\n",
    658                  _mesa_get_format_name(format));
    659 
    660       intel_miptree_copy_slice_sw(intel, dst_mt, src_mt, level, slice,
    661                                   width, height);
    662    }
    663 }
    664 
    665 /**
    666  * Copies the image's current data to the given miptree, and associates that
    667  * miptree with the image.
    668  *
    669  * If \c invalidate is true, then the actual image data does not need to be
    670  * copied, but the image still needs to be associated to the new miptree (this
    671  * is set to true if we're about to clear the image).
    672  */
    673 void
    674 intel_miptree_copy_teximage(struct intel_context *intel,
    675 			    struct intel_texture_image *intelImage,
    676 			    struct intel_mipmap_tree *dst_mt,
    677                             bool invalidate)
    678 {
    679    struct intel_mipmap_tree *src_mt = intelImage->mt;
    680    struct intel_texture_object *intel_obj =
    681       intel_texture_object(intelImage->base.Base.TexObject);
    682    int level = intelImage->base.Base.Level;
    683    int face = intelImage->base.Base.Face;
    684    GLuint depth = intelImage->base.Base.Depth;
    685 
    686    if (!invalidate) {
    687       for (int slice = 0; slice < depth; slice++) {
    688          intel_miptree_copy_slice(intel, dst_mt, src_mt, level, face, slice);
    689       }
    690    }
    691 
    692    intel_miptree_reference(&intelImage->mt, dst_mt);
    693    intel_obj->needs_validate = true;
    694 }
    695 
    696 void *
    697 intel_miptree_map_raw(struct intel_context *intel, struct intel_mipmap_tree *mt)
    698 {
    699    drm_intel_bo *bo = mt->region->bo;
    700 
    701    if (unlikely(INTEL_DEBUG & DEBUG_PERF)) {
    702       if (drm_intel_bo_busy(bo)) {
    703          perf_debug("Mapping a busy BO, causing a stall on the GPU.\n");
    704       }
    705    }
    706 
    707    intel_flush(&intel->ctx);
    708 
    709    if (mt->region->tiling != I915_TILING_NONE)
    710       drm_intel_gem_bo_map_gtt(bo);
    711    else
    712       drm_intel_bo_map(bo, true);
    713 
    714    return bo->virtual;
    715 }
    716 
    717 void
    718 intel_miptree_unmap_raw(struct intel_mipmap_tree *mt)
    719 {
    720    drm_intel_bo_unmap(mt->region->bo);
    721 }
    722 
    723 static void
    724 intel_miptree_map_gtt(struct intel_context *intel,
    725 		      struct intel_mipmap_tree *mt,
    726 		      struct intel_miptree_map *map,
    727 		      unsigned int level, unsigned int slice)
    728 {
    729    unsigned int bw, bh;
    730    void *base;
    731    unsigned int image_x, image_y;
    732    int x = map->x;
    733    int y = map->y;
    734 
    735    /* For compressed formats, the stride is the number of bytes per
    736     * row of blocks.  intel_miptree_get_image_offset() already does
    737     * the divide.
    738     */
    739    _mesa_get_format_block_size(mt->format, &bw, &bh);
    740    assert(y % bh == 0);
    741    y /= bh;
    742 
    743    base = intel_miptree_map_raw(intel, mt) + mt->offset;
    744 
    745    if (base == NULL)
    746       map->ptr = NULL;
    747    else {
    748       /* Note that in the case of cube maps, the caller must have passed the
    749        * slice number referencing the face.
    750       */
    751       intel_miptree_get_image_offset(mt, level, slice, &image_x, &image_y);
    752       x += image_x;
    753       y += image_y;
    754 
    755       map->stride = mt->region->pitch;
    756       map->ptr = base + y * map->stride + x * mt->cpp;
    757    }
    758 
    759    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
    760        map->x, map->y, map->w, map->h,
    761        mt, _mesa_get_format_name(mt->format),
    762        x, y, map->ptr, map->stride);
    763 }
    764 
    765 static void
    766 intel_miptree_unmap_gtt(struct intel_mipmap_tree *mt)
    767 {
    768    intel_miptree_unmap_raw(mt);
    769 }
    770 
    771 static void
    772 intel_miptree_map_blit(struct intel_context *intel,
    773 		       struct intel_mipmap_tree *mt,
    774 		       struct intel_miptree_map *map,
    775 		       unsigned int level, unsigned int slice)
    776 {
    777    map->mt = intel_miptree_create(intel, GL_TEXTURE_2D, mt->format,
    778                                   0, 0,
    779                                   map->w, map->h, 1,
    780                                   false,
    781                                   INTEL_MIPTREE_TILING_NONE);
    782    if (!map->mt) {
    783       fprintf(stderr, "Failed to allocate blit temporary\n");
    784       goto fail;
    785    }
    786    map->stride = map->mt->region->pitch;
    787 
    788    if (!intel_miptree_blit(intel,
    789                            mt, level, slice,
    790                            map->x, map->y, false,
    791                            map->mt, 0, 0,
    792                            0, 0, false,
    793                            map->w, map->h, GL_COPY)) {
    794       fprintf(stderr, "Failed to blit\n");
    795       goto fail;
    796    }
    797 
    798    intel_batchbuffer_flush(intel);
    799    map->ptr = intel_miptree_map_raw(intel, map->mt);
    800 
    801    DBG("%s: %d,%d %dx%d from mt %p (%s) %d,%d = %p/%d\n", __func__,
    802        map->x, map->y, map->w, map->h,
    803        mt, _mesa_get_format_name(mt->format),
    804        level, slice, map->ptr, map->stride);
    805 
    806    return;
    807 
    808 fail:
    809    intel_miptree_release(&map->mt);
    810    map->ptr = NULL;
    811    map->stride = 0;
    812 }
    813 
    814 static void
    815 intel_miptree_unmap_blit(struct intel_context *intel,
    816 			 struct intel_mipmap_tree *mt,
    817 			 struct intel_miptree_map *map,
    818 			 unsigned int level,
    819 			 unsigned int slice)
    820 {
    821    struct gl_context *ctx = &intel->ctx;
    822 
    823    intel_miptree_unmap_raw(map->mt);
    824 
    825    if (map->mode & GL_MAP_WRITE_BIT) {
    826       bool ok = intel_miptree_blit(intel,
    827                                    map->mt, 0, 0,
    828                                    0, 0, false,
    829                                    mt, level, slice,
    830                                    map->x, map->y, false,
    831                                    map->w, map->h, GL_COPY);
    832       WARN_ONCE(!ok, "Failed to blit from linear temporary mapping");
    833    }
    834 
    835    intel_miptree_release(&map->mt);
    836 }
    837 
    838 /**
    839  * Create and attach a map to the miptree at (level, slice). Return the
    840  * attached map.
    841  */
    842 static struct intel_miptree_map*
    843 intel_miptree_attach_map(struct intel_mipmap_tree *mt,
    844                          unsigned int level,
    845                          unsigned int slice,
    846                          unsigned int x,
    847                          unsigned int y,
    848                          unsigned int w,
    849                          unsigned int h,
    850                          GLbitfield mode)
    851 {
    852    struct intel_miptree_map *map = calloc(1, sizeof(*map));
    853 
    854    if (!map)
    855       return NULL;
    856 
    857    assert(mt->level[level].slice[slice].map == NULL);
    858    mt->level[level].slice[slice].map = map;
    859 
    860    map->mode = mode;
    861    map->x = x;
    862    map->y = y;
    863    map->w = w;
    864    map->h = h;
    865 
    866    return map;
    867 }
    868 
    869 /**
    870  * Release the map at (level, slice).
    871  */
    872 static void
    873 intel_miptree_release_map(struct intel_mipmap_tree *mt,
    874                          unsigned int level,
    875                          unsigned int slice)
    876 {
    877    struct intel_miptree_map **map;
    878 
    879    map = &mt->level[level].slice[slice].map;
    880    free(*map);
    881    *map = NULL;
    882 }
    883 
    884 void
    885 intel_miptree_map(struct intel_context *intel,
    886                   struct intel_mipmap_tree *mt,
    887                   unsigned int level,
    888                   unsigned int slice,
    889                   unsigned int x,
    890                   unsigned int y,
    891                   unsigned int w,
    892                   unsigned int h,
    893                   GLbitfield mode,
    894                   void **out_ptr,
    895                   int *out_stride)
    896 {
    897    struct intel_miptree_map *map;
    898 
    899    map = intel_miptree_attach_map(mt, level, slice, x, y, w, h, mode);
    900    if (!map){
    901       *out_ptr = NULL;
    902       *out_stride = 0;
    903       return;
    904    }
    905 
    906    /* See intel_miptree_blit() for details on the 32k pitch limit. */
    907    if (mt->region->tiling != I915_TILING_NONE &&
    908        mt->region->bo->size >= intel->max_gtt_map_object_size) {
    909       assert(mt->region->pitch < 32768);
    910       intel_miptree_map_blit(intel, mt, map, level, slice);
    911    } else {
    912       intel_miptree_map_gtt(intel, mt, map, level, slice);
    913    }
    914 
    915    *out_ptr = map->ptr;
    916    *out_stride = map->stride;
    917 
    918    if (map->ptr == NULL)
    919       intel_miptree_release_map(mt, level, slice);
    920 }
    921 
    922 void
    923 intel_miptree_unmap(struct intel_context *intel,
    924                     struct intel_mipmap_tree *mt,
    925                     unsigned int level,
    926                     unsigned int slice)
    927 {
    928    struct intel_miptree_map *map = mt->level[level].slice[slice].map;
    929 
    930    if (!map)
    931       return;
    932 
    933    DBG("%s: mt %p (%s) level %d slice %d\n", __func__,
    934        mt, _mesa_get_format_name(mt->format), level, slice);
    935 
    936    if (map->mt) {
    937       intel_miptree_unmap_blit(intel, mt, map, level, slice);
    938    } else {
    939       intel_miptree_unmap_gtt(mt);
    940    }
    941 
    942    intel_miptree_release_map(mt, level, slice);
    943 }
    944