1 /* 2 * Copyright 2010 Jerome Glisse <glisse (at) freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 * 23 * Authors: 24 * Jerome Glisse 25 * Corbin Simpson 26 */ 27 #include "r600_pipe_common.h" 28 #include "r600_cs.h" 29 #include "r600_query.h" 30 #include "util/u_format.h" 31 #include "util/u_log.h" 32 #include "util/u_memory.h" 33 #include "util/u_pack_color.h" 34 #include "util/u_surface.h" 35 #include "util/os_time.h" 36 #include <errno.h> 37 #include <inttypes.h> 38 39 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen, 40 struct r600_texture *rtex); 41 static enum radeon_surf_mode 42 r600_choose_tiling(struct r600_common_screen *rscreen, 43 const struct pipe_resource *templ); 44 45 46 bool r600_prepare_for_dma_blit(struct r600_common_context *rctx, 47 struct r600_texture *rdst, 48 unsigned dst_level, unsigned dstx, 49 unsigned dsty, unsigned dstz, 50 struct r600_texture *rsrc, 51 unsigned src_level, 52 const struct pipe_box *src_box) 53 { 54 if (!rctx->dma.cs) 55 return false; 56 57 if (rdst->surface.bpe != rsrc->surface.bpe) 58 return false; 59 60 /* MSAA: Blits don't exist in the real world. */ 61 if (rsrc->resource.b.b.nr_samples > 1 || 62 rdst->resource.b.b.nr_samples > 1) 63 return false; 64 65 /* Depth-stencil surfaces: 66 * When dst is linear, the DB->CB copy preserves HTILE. 67 * When dst is tiled, the 3D path must be used to update HTILE. 68 */ 69 if (rsrc->is_depth || rdst->is_depth) 70 return false; 71 72 /* CMASK as: 73 * src: Both texture and SDMA paths need decompression. Use SDMA. 74 * dst: If overwriting the whole texture, discard CMASK and use 75 * SDMA. Otherwise, use the 3D path. 76 */ 77 if (rdst->cmask.size && rdst->dirty_level_mask & (1 << dst_level)) { 78 /* The CMASK clear is only enabled for the first level. */ 79 assert(dst_level == 0); 80 if (!util_texrange_covers_whole_level(&rdst->resource.b.b, dst_level, 81 dstx, dsty, dstz, src_box->width, 82 src_box->height, src_box->depth)) 83 return false; 84 85 r600_texture_discard_cmask(rctx->screen, rdst); 86 } 87 88 /* All requirements are met. Prepare textures for SDMA. */ 89 if (rsrc->cmask.size && rsrc->dirty_level_mask & (1 << src_level)) 90 rctx->b.flush_resource(&rctx->b, &rsrc->resource.b.b); 91 92 assert(!(rsrc->dirty_level_mask & (1 << src_level))); 93 assert(!(rdst->dirty_level_mask & (1 << dst_level))); 94 95 return true; 96 } 97 98 /* Same as resource_copy_region, except that both upsampling and downsampling are allowed. */ 99 static void r600_copy_region_with_blit(struct pipe_context *pipe, 100 struct pipe_resource *dst, 101 unsigned dst_level, 102 unsigned dstx, unsigned dsty, unsigned dstz, 103 struct pipe_resource *src, 104 unsigned src_level, 105 const struct pipe_box *src_box) 106 { 107 struct pipe_blit_info blit; 108 109 memset(&blit, 0, sizeof(blit)); 110 blit.src.resource = src; 111 blit.src.format = src->format; 112 blit.src.level = src_level; 113 blit.src.box = *src_box; 114 blit.dst.resource = dst; 115 blit.dst.format = dst->format; 116 blit.dst.level = dst_level; 117 blit.dst.box.x = dstx; 118 blit.dst.box.y = dsty; 119 blit.dst.box.z = dstz; 120 blit.dst.box.width = src_box->width; 121 blit.dst.box.height = src_box->height; 122 blit.dst.box.depth = src_box->depth; 123 blit.mask = util_format_get_mask(src->format) & 124 util_format_get_mask(dst->format); 125 blit.filter = PIPE_TEX_FILTER_NEAREST; 126 127 if (blit.mask) { 128 pipe->blit(pipe, &blit); 129 } 130 } 131 132 /* Copy from a full GPU texture to a transfer's staging one. */ 133 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer) 134 { 135 struct r600_common_context *rctx = (struct r600_common_context*)ctx; 136 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer; 137 struct pipe_resource *dst = &rtransfer->staging->b.b; 138 struct pipe_resource *src = transfer->resource; 139 140 if (src->nr_samples > 1) { 141 r600_copy_region_with_blit(ctx, dst, 0, 0, 0, 0, 142 src, transfer->level, &transfer->box); 143 return; 144 } 145 146 rctx->dma_copy(ctx, dst, 0, 0, 0, 0, src, transfer->level, 147 &transfer->box); 148 } 149 150 /* Copy from a transfer's staging texture to a full GPU one. */ 151 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer) 152 { 153 struct r600_common_context *rctx = (struct r600_common_context*)ctx; 154 struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer; 155 struct pipe_resource *dst = transfer->resource; 156 struct pipe_resource *src = &rtransfer->staging->b.b; 157 struct pipe_box sbox; 158 159 u_box_3d(0, 0, 0, transfer->box.width, transfer->box.height, transfer->box.depth, &sbox); 160 161 if (dst->nr_samples > 1) { 162 r600_copy_region_with_blit(ctx, dst, transfer->level, 163 transfer->box.x, transfer->box.y, transfer->box.z, 164 src, 0, &sbox); 165 return; 166 } 167 168 rctx->dma_copy(ctx, dst, transfer->level, 169 transfer->box.x, transfer->box.y, transfer->box.z, 170 src, 0, &sbox); 171 } 172 173 static unsigned r600_texture_get_offset(struct r600_common_screen *rscreen, 174 struct r600_texture *rtex, unsigned level, 175 const struct pipe_box *box, 176 unsigned *stride, 177 unsigned *layer_stride) 178 { 179 *stride = rtex->surface.u.legacy.level[level].nblk_x * 180 rtex->surface.bpe; 181 assert((uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 <= UINT_MAX); 182 *layer_stride = (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4; 183 184 if (!box) 185 return rtex->surface.u.legacy.level[level].offset; 186 187 /* Each texture is an array of mipmap levels. Each level is 188 * an array of slices. */ 189 return rtex->surface.u.legacy.level[level].offset + 190 box->z * (uint64_t)rtex->surface.u.legacy.level[level].slice_size_dw * 4 + 191 (box->y / rtex->surface.blk_h * 192 rtex->surface.u.legacy.level[level].nblk_x + 193 box->x / rtex->surface.blk_w) * rtex->surface.bpe; 194 } 195 196 static int r600_init_surface(struct r600_common_screen *rscreen, 197 struct radeon_surf *surface, 198 const struct pipe_resource *ptex, 199 enum radeon_surf_mode array_mode, 200 unsigned pitch_in_bytes_override, 201 unsigned offset, 202 bool is_imported, 203 bool is_scanout, 204 bool is_flushed_depth) 205 { 206 const struct util_format_description *desc = 207 util_format_description(ptex->format); 208 bool is_depth, is_stencil; 209 int r; 210 unsigned i, bpe, flags = 0; 211 212 is_depth = util_format_has_depth(desc); 213 is_stencil = util_format_has_stencil(desc); 214 215 if (rscreen->chip_class >= EVERGREEN && !is_flushed_depth && 216 ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) { 217 bpe = 4; /* stencil is allocated separately on evergreen */ 218 } else { 219 bpe = util_format_get_blocksize(ptex->format); 220 assert(util_is_power_of_two(bpe)); 221 } 222 223 if (!is_flushed_depth && is_depth) { 224 flags |= RADEON_SURF_ZBUFFER; 225 226 if (is_stencil) 227 flags |= RADEON_SURF_SBUFFER; 228 } 229 230 if (ptex->bind & PIPE_BIND_SCANOUT || is_scanout) { 231 /* This should catch bugs in gallium users setting incorrect flags. */ 232 assert(ptex->nr_samples <= 1 && 233 ptex->array_size == 1 && 234 ptex->depth0 == 1 && 235 ptex->last_level == 0 && 236 !(flags & RADEON_SURF_Z_OR_SBUFFER)); 237 238 flags |= RADEON_SURF_SCANOUT; 239 } 240 241 if (ptex->bind & PIPE_BIND_SHARED) 242 flags |= RADEON_SURF_SHAREABLE; 243 if (is_imported) 244 flags |= RADEON_SURF_IMPORTED | RADEON_SURF_SHAREABLE; 245 if (!(ptex->flags & R600_RESOURCE_FLAG_FORCE_TILING)) 246 flags |= RADEON_SURF_OPTIMIZE_FOR_SPACE; 247 248 r = rscreen->ws->surface_init(rscreen->ws, ptex, flags, bpe, 249 array_mode, surface); 250 if (r) { 251 return r; 252 } 253 254 if (pitch_in_bytes_override && 255 pitch_in_bytes_override != surface->u.legacy.level[0].nblk_x * bpe) { 256 /* old ddx on evergreen over estimate alignment for 1d, only 1 level 257 * for those 258 */ 259 surface->u.legacy.level[0].nblk_x = pitch_in_bytes_override / bpe; 260 surface->u.legacy.level[0].slice_size_dw = 261 ((uint64_t)pitch_in_bytes_override * surface->u.legacy.level[0].nblk_y) / 4; 262 } 263 264 if (offset) { 265 for (i = 0; i < ARRAY_SIZE(surface->u.legacy.level); ++i) 266 surface->u.legacy.level[i].offset += offset; 267 } 268 269 return 0; 270 } 271 272 static void r600_texture_init_metadata(struct r600_common_screen *rscreen, 273 struct r600_texture *rtex, 274 struct radeon_bo_metadata *metadata) 275 { 276 struct radeon_surf *surface = &rtex->surface; 277 278 memset(metadata, 0, sizeof(*metadata)); 279 280 metadata->u.legacy.microtile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_1D ? 281 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR; 282 metadata->u.legacy.macrotile = surface->u.legacy.level[0].mode >= RADEON_SURF_MODE_2D ? 283 RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR; 284 metadata->u.legacy.pipe_config = surface->u.legacy.pipe_config; 285 metadata->u.legacy.bankw = surface->u.legacy.bankw; 286 metadata->u.legacy.bankh = surface->u.legacy.bankh; 287 metadata->u.legacy.tile_split = surface->u.legacy.tile_split; 288 metadata->u.legacy.mtilea = surface->u.legacy.mtilea; 289 metadata->u.legacy.num_banks = surface->u.legacy.num_banks; 290 metadata->u.legacy.stride = surface->u.legacy.level[0].nblk_x * surface->bpe; 291 metadata->u.legacy.scanout = (surface->flags & RADEON_SURF_SCANOUT) != 0; 292 } 293 294 static void r600_surface_import_metadata(struct r600_common_screen *rscreen, 295 struct radeon_surf *surf, 296 struct radeon_bo_metadata *metadata, 297 enum radeon_surf_mode *array_mode, 298 bool *is_scanout) 299 { 300 surf->u.legacy.pipe_config = metadata->u.legacy.pipe_config; 301 surf->u.legacy.bankw = metadata->u.legacy.bankw; 302 surf->u.legacy.bankh = metadata->u.legacy.bankh; 303 surf->u.legacy.tile_split = metadata->u.legacy.tile_split; 304 surf->u.legacy.mtilea = metadata->u.legacy.mtilea; 305 surf->u.legacy.num_banks = metadata->u.legacy.num_banks; 306 307 if (metadata->u.legacy.macrotile == RADEON_LAYOUT_TILED) 308 *array_mode = RADEON_SURF_MODE_2D; 309 else if (metadata->u.legacy.microtile == RADEON_LAYOUT_TILED) 310 *array_mode = RADEON_SURF_MODE_1D; 311 else 312 *array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED; 313 314 *is_scanout = metadata->u.legacy.scanout; 315 } 316 317 static void r600_eliminate_fast_color_clear(struct r600_common_context *rctx, 318 struct r600_texture *rtex) 319 { 320 struct r600_common_screen *rscreen = rctx->screen; 321 struct pipe_context *ctx = &rctx->b; 322 323 if (ctx == rscreen->aux_context) 324 mtx_lock(&rscreen->aux_context_lock); 325 326 ctx->flush_resource(ctx, &rtex->resource.b.b); 327 ctx->flush(ctx, NULL, 0); 328 329 if (ctx == rscreen->aux_context) 330 mtx_unlock(&rscreen->aux_context_lock); 331 } 332 333 static void r600_texture_discard_cmask(struct r600_common_screen *rscreen, 334 struct r600_texture *rtex) 335 { 336 if (!rtex->cmask.size) 337 return; 338 339 assert(rtex->resource.b.b.nr_samples <= 1); 340 341 /* Disable CMASK. */ 342 memset(&rtex->cmask, 0, sizeof(rtex->cmask)); 343 rtex->cmask.base_address_reg = rtex->resource.gpu_address >> 8; 344 rtex->dirty_level_mask = 0; 345 346 rtex->cb_color_info &= ~EG_S_028C70_FAST_CLEAR(1); 347 348 if (rtex->cmask_buffer != &rtex->resource) 349 r600_resource_reference(&rtex->cmask_buffer, NULL); 350 351 /* Notify all contexts about the change. */ 352 p_atomic_inc(&rscreen->dirty_tex_counter); 353 p_atomic_inc(&rscreen->compressed_colortex_counter); 354 } 355 356 static void r600_reallocate_texture_inplace(struct r600_common_context *rctx, 357 struct r600_texture *rtex, 358 unsigned new_bind_flag, 359 bool invalidate_storage) 360 { 361 struct pipe_screen *screen = rctx->b.screen; 362 struct r600_texture *new_tex; 363 struct pipe_resource templ = rtex->resource.b.b; 364 unsigned i; 365 366 templ.bind |= new_bind_flag; 367 368 /* r600g doesn't react to dirty_tex_descriptor_counter */ 369 if (rctx->chip_class < SI) 370 return; 371 372 if (rtex->resource.b.is_shared) 373 return; 374 375 if (new_bind_flag == PIPE_BIND_LINEAR) { 376 if (rtex->surface.is_linear) 377 return; 378 379 /* This fails with MSAA, depth, and compressed textures. */ 380 if (r600_choose_tiling(rctx->screen, &templ) != 381 RADEON_SURF_MODE_LINEAR_ALIGNED) 382 return; 383 } 384 385 new_tex = (struct r600_texture*)screen->resource_create(screen, &templ); 386 if (!new_tex) 387 return; 388 389 /* Copy the pixels to the new texture. */ 390 if (!invalidate_storage) { 391 for (i = 0; i <= templ.last_level; i++) { 392 struct pipe_box box; 393 394 u_box_3d(0, 0, 0, 395 u_minify(templ.width0, i), u_minify(templ.height0, i), 396 util_num_layers(&templ, i), &box); 397 398 rctx->dma_copy(&rctx->b, &new_tex->resource.b.b, i, 0, 0, 0, 399 &rtex->resource.b.b, i, &box); 400 } 401 } 402 403 if (new_bind_flag == PIPE_BIND_LINEAR) { 404 r600_texture_discard_cmask(rctx->screen, rtex); 405 } 406 407 /* Replace the structure fields of rtex. */ 408 rtex->resource.b.b.bind = templ.bind; 409 pb_reference(&rtex->resource.buf, new_tex->resource.buf); 410 rtex->resource.gpu_address = new_tex->resource.gpu_address; 411 rtex->resource.vram_usage = new_tex->resource.vram_usage; 412 rtex->resource.gart_usage = new_tex->resource.gart_usage; 413 rtex->resource.bo_size = new_tex->resource.bo_size; 414 rtex->resource.bo_alignment = new_tex->resource.bo_alignment; 415 rtex->resource.domains = new_tex->resource.domains; 416 rtex->resource.flags = new_tex->resource.flags; 417 rtex->size = new_tex->size; 418 rtex->db_render_format = new_tex->db_render_format; 419 rtex->db_compatible = new_tex->db_compatible; 420 rtex->can_sample_z = new_tex->can_sample_z; 421 rtex->can_sample_s = new_tex->can_sample_s; 422 rtex->surface = new_tex->surface; 423 rtex->fmask = new_tex->fmask; 424 rtex->cmask = new_tex->cmask; 425 rtex->cb_color_info = new_tex->cb_color_info; 426 rtex->last_msaa_resolve_target_micro_mode = new_tex->last_msaa_resolve_target_micro_mode; 427 rtex->htile_offset = new_tex->htile_offset; 428 rtex->depth_cleared = new_tex->depth_cleared; 429 rtex->stencil_cleared = new_tex->stencil_cleared; 430 rtex->non_disp_tiling = new_tex->non_disp_tiling; 431 rtex->framebuffers_bound = new_tex->framebuffers_bound; 432 433 if (new_bind_flag == PIPE_BIND_LINEAR) { 434 assert(!rtex->htile_offset); 435 assert(!rtex->cmask.size); 436 assert(!rtex->fmask.size); 437 assert(!rtex->is_depth); 438 } 439 440 r600_texture_reference(&new_tex, NULL); 441 442 p_atomic_inc(&rctx->screen->dirty_tex_counter); 443 } 444 445 static boolean r600_texture_get_handle(struct pipe_screen* screen, 446 struct pipe_context *ctx, 447 struct pipe_resource *resource, 448 struct winsys_handle *whandle, 449 unsigned usage) 450 { 451 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; 452 struct r600_common_context *rctx; 453 struct r600_resource *res = (struct r600_resource*)resource; 454 struct r600_texture *rtex = (struct r600_texture*)resource; 455 struct radeon_bo_metadata metadata; 456 bool update_metadata = false; 457 unsigned stride, offset, slice_size; 458 459 ctx = threaded_context_unwrap_sync(ctx); 460 rctx = (struct r600_common_context*)(ctx ? ctx : rscreen->aux_context); 461 462 if (resource->target != PIPE_BUFFER) { 463 /* This is not supported now, but it might be required for OpenCL 464 * interop in the future. 465 */ 466 if (resource->nr_samples > 1 || rtex->is_depth) 467 return false; 468 469 /* Move a suballocated texture into a non-suballocated allocation. */ 470 if (rscreen->ws->buffer_is_suballocated(res->buf) || 471 rtex->surface.tile_swizzle) { 472 assert(!res->b.is_shared); 473 r600_reallocate_texture_inplace(rctx, rtex, 474 PIPE_BIND_SHARED, false); 475 rctx->b.flush(&rctx->b, NULL, 0); 476 assert(res->b.b.bind & PIPE_BIND_SHARED); 477 assert(res->flags & RADEON_FLAG_NO_SUBALLOC); 478 assert(rtex->surface.tile_swizzle == 0); 479 } 480 481 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH) && 482 rtex->cmask.size) { 483 /* Eliminate fast clear (CMASK) */ 484 r600_eliminate_fast_color_clear(rctx, rtex); 485 486 /* Disable CMASK if flush_resource isn't going 487 * to be called. 488 */ 489 if (rtex->cmask.size) 490 r600_texture_discard_cmask(rscreen, rtex); 491 } 492 493 /* Set metadata. */ 494 if (!res->b.is_shared || update_metadata) { 495 r600_texture_init_metadata(rscreen, rtex, &metadata); 496 if (rscreen->query_opaque_metadata) 497 rscreen->query_opaque_metadata(rscreen, rtex, 498 &metadata); 499 500 rscreen->ws->buffer_set_metadata(res->buf, &metadata); 501 } 502 503 offset = rtex->surface.u.legacy.level[0].offset; 504 stride = rtex->surface.u.legacy.level[0].nblk_x * 505 rtex->surface.bpe; 506 slice_size = (uint64_t)rtex->surface.u.legacy.level[0].slice_size_dw * 4; 507 } else { 508 /* Move a suballocated buffer into a non-suballocated allocation. */ 509 if (rscreen->ws->buffer_is_suballocated(res->buf)) { 510 assert(!res->b.is_shared); 511 512 /* Allocate a new buffer with PIPE_BIND_SHARED. */ 513 struct pipe_resource templ = res->b.b; 514 templ.bind |= PIPE_BIND_SHARED; 515 516 struct pipe_resource *newb = 517 screen->resource_create(screen, &templ); 518 if (!newb) 519 return false; 520 521 /* Copy the old buffer contents to the new one. */ 522 struct pipe_box box; 523 u_box_1d(0, newb->width0, &box); 524 rctx->b.resource_copy_region(&rctx->b, newb, 0, 0, 0, 0, 525 &res->b.b, 0, &box); 526 /* Move the new buffer storage to the old pipe_resource. */ 527 r600_replace_buffer_storage(&rctx->b, &res->b.b, newb); 528 pipe_resource_reference(&newb, NULL); 529 530 assert(res->b.b.bind & PIPE_BIND_SHARED); 531 assert(res->flags & RADEON_FLAG_NO_SUBALLOC); 532 } 533 534 /* Buffers */ 535 offset = 0; 536 stride = 0; 537 slice_size = 0; 538 } 539 540 if (res->b.is_shared) { 541 /* USAGE_EXPLICIT_FLUSH must be cleared if at least one user 542 * doesn't set it. 543 */ 544 res->external_usage |= usage & ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH; 545 if (!(usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH)) 546 res->external_usage &= ~PIPE_HANDLE_USAGE_EXPLICIT_FLUSH; 547 } else { 548 res->b.is_shared = true; 549 res->external_usage = usage; 550 } 551 552 return rscreen->ws->buffer_get_handle(res->buf, stride, offset, 553 slice_size, whandle); 554 } 555 556 static void r600_texture_destroy(struct pipe_screen *screen, 557 struct pipe_resource *ptex) 558 { 559 struct r600_texture *rtex = (struct r600_texture*)ptex; 560 struct r600_resource *resource = &rtex->resource; 561 562 r600_texture_reference(&rtex->flushed_depth_texture, NULL); 563 pipe_resource_reference((struct pipe_resource**)&resource->immed_buffer, NULL); 564 565 if (rtex->cmask_buffer != &rtex->resource) { 566 r600_resource_reference(&rtex->cmask_buffer, NULL); 567 } 568 pb_reference(&resource->buf, NULL); 569 FREE(rtex); 570 } 571 572 static const struct u_resource_vtbl r600_texture_vtbl; 573 574 /* The number of samples can be specified independently of the texture. */ 575 void r600_texture_get_fmask_info(struct r600_common_screen *rscreen, 576 struct r600_texture *rtex, 577 unsigned nr_samples, 578 struct r600_fmask_info *out) 579 { 580 /* FMASK is allocated like an ordinary texture. */ 581 struct pipe_resource templ = rtex->resource.b.b; 582 struct radeon_surf fmask = {}; 583 unsigned flags, bpe; 584 585 memset(out, 0, sizeof(*out)); 586 587 templ.nr_samples = 1; 588 flags = rtex->surface.flags | RADEON_SURF_FMASK; 589 590 /* Use the same parameters and tile mode. */ 591 fmask.u.legacy.bankw = rtex->surface.u.legacy.bankw; 592 fmask.u.legacy.bankh = rtex->surface.u.legacy.bankh; 593 fmask.u.legacy.mtilea = rtex->surface.u.legacy.mtilea; 594 fmask.u.legacy.tile_split = rtex->surface.u.legacy.tile_split; 595 596 if (nr_samples <= 4) 597 fmask.u.legacy.bankh = 4; 598 599 switch (nr_samples) { 600 case 2: 601 case 4: 602 bpe = 1; 603 break; 604 case 8: 605 bpe = 4; 606 break; 607 default: 608 R600_ERR("Invalid sample count for FMASK allocation.\n"); 609 return; 610 } 611 612 /* Overallocate FMASK on R600-R700 to fix colorbuffer corruption. 613 * This can be fixed by writing a separate FMASK allocator specifically 614 * for R600-R700 asics. */ 615 if (rscreen->chip_class <= R700) { 616 bpe *= 2; 617 } 618 619 if (rscreen->ws->surface_init(rscreen->ws, &templ, flags, bpe, 620 RADEON_SURF_MODE_2D, &fmask)) { 621 R600_ERR("Got error in surface_init while allocating FMASK.\n"); 622 return; 623 } 624 625 assert(fmask.u.legacy.level[0].mode == RADEON_SURF_MODE_2D); 626 627 out->slice_tile_max = (fmask.u.legacy.level[0].nblk_x * fmask.u.legacy.level[0].nblk_y) / 64; 628 if (out->slice_tile_max) 629 out->slice_tile_max -= 1; 630 631 out->tile_mode_index = fmask.u.legacy.tiling_index[0]; 632 out->pitch_in_pixels = fmask.u.legacy.level[0].nblk_x; 633 out->bank_height = fmask.u.legacy.bankh; 634 out->tile_swizzle = fmask.tile_swizzle; 635 out->alignment = MAX2(256, fmask.surf_alignment); 636 out->size = fmask.surf_size; 637 } 638 639 static void r600_texture_allocate_fmask(struct r600_common_screen *rscreen, 640 struct r600_texture *rtex) 641 { 642 r600_texture_get_fmask_info(rscreen, rtex, 643 rtex->resource.b.b.nr_samples, &rtex->fmask); 644 645 rtex->fmask.offset = align64(rtex->size, rtex->fmask.alignment); 646 rtex->size = rtex->fmask.offset + rtex->fmask.size; 647 } 648 649 void r600_texture_get_cmask_info(struct r600_common_screen *rscreen, 650 struct r600_texture *rtex, 651 struct r600_cmask_info *out) 652 { 653 unsigned cmask_tile_width = 8; 654 unsigned cmask_tile_height = 8; 655 unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height; 656 unsigned element_bits = 4; 657 unsigned cmask_cache_bits = 1024; 658 unsigned num_pipes = rscreen->info.num_tile_pipes; 659 unsigned pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes; 660 661 unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes; 662 unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements; 663 unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile); 664 unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile); 665 unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width; 666 667 unsigned pitch_elements = align(rtex->resource.b.b.width0, macro_tile_width); 668 unsigned height = align(rtex->resource.b.b.height0, macro_tile_height); 669 670 unsigned base_align = num_pipes * pipe_interleave_bytes; 671 unsigned slice_bytes = 672 ((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements; 673 674 assert(macro_tile_width % 128 == 0); 675 assert(macro_tile_height % 128 == 0); 676 677 out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1; 678 out->alignment = MAX2(256, base_align); 679 out->size = util_num_layers(&rtex->resource.b.b, 0) * 680 align(slice_bytes, base_align); 681 } 682 683 static void r600_texture_allocate_cmask(struct r600_common_screen *rscreen, 684 struct r600_texture *rtex) 685 { 686 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); 687 688 rtex->cmask.offset = align64(rtex->size, rtex->cmask.alignment); 689 rtex->size = rtex->cmask.offset + rtex->cmask.size; 690 691 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1); 692 } 693 694 static void r600_texture_alloc_cmask_separate(struct r600_common_screen *rscreen, 695 struct r600_texture *rtex) 696 { 697 if (rtex->cmask_buffer) 698 return; 699 700 assert(rtex->cmask.size == 0); 701 702 r600_texture_get_cmask_info(rscreen, rtex, &rtex->cmask); 703 704 rtex->cmask_buffer = (struct r600_resource *) 705 r600_aligned_buffer_create(&rscreen->b, 706 R600_RESOURCE_FLAG_UNMAPPABLE, 707 PIPE_USAGE_DEFAULT, 708 rtex->cmask.size, 709 rtex->cmask.alignment); 710 if (rtex->cmask_buffer == NULL) { 711 rtex->cmask.size = 0; 712 return; 713 } 714 715 /* update colorbuffer state bits */ 716 rtex->cmask.base_address_reg = rtex->cmask_buffer->gpu_address >> 8; 717 718 rtex->cb_color_info |= EG_S_028C70_FAST_CLEAR(1); 719 720 p_atomic_inc(&rscreen->compressed_colortex_counter); 721 } 722 723 void eg_resource_alloc_immed(struct r600_common_screen *rscreen, 724 struct r600_resource *res, 725 unsigned immed_size) 726 { 727 res->immed_buffer = (struct r600_resource *) 728 pipe_buffer_create(&rscreen->b, PIPE_BIND_CUSTOM, 729 PIPE_USAGE_DEFAULT, immed_size); 730 } 731 732 static void r600_texture_get_htile_size(struct r600_common_screen *rscreen, 733 struct r600_texture *rtex) 734 { 735 unsigned cl_width, cl_height, width, height; 736 unsigned slice_elements, slice_bytes, pipe_interleave_bytes, base_align; 737 unsigned num_pipes = rscreen->info.num_tile_pipes; 738 739 rtex->surface.htile_size = 0; 740 741 if (rscreen->chip_class <= EVERGREEN && 742 rscreen->info.drm_major == 2 && rscreen->info.drm_minor < 26) 743 return; 744 745 /* HW bug on R6xx. */ 746 if (rscreen->chip_class == R600 && 747 (rtex->resource.b.b.width0 > 7680 || 748 rtex->resource.b.b.height0 > 7680)) 749 return; 750 751 switch (num_pipes) { 752 case 1: 753 cl_width = 32; 754 cl_height = 16; 755 break; 756 case 2: 757 cl_width = 32; 758 cl_height = 32; 759 break; 760 case 4: 761 cl_width = 64; 762 cl_height = 32; 763 break; 764 case 8: 765 cl_width = 64; 766 cl_height = 64; 767 break; 768 case 16: 769 cl_width = 128; 770 cl_height = 64; 771 break; 772 default: 773 assert(0); 774 return; 775 } 776 777 width = align(rtex->resource.b.b.width0, cl_width * 8); 778 height = align(rtex->resource.b.b.height0, cl_height * 8); 779 780 slice_elements = (width * height) / (8 * 8); 781 slice_bytes = slice_elements * 4; 782 783 pipe_interleave_bytes = rscreen->info.pipe_interleave_bytes; 784 base_align = num_pipes * pipe_interleave_bytes; 785 786 rtex->surface.htile_alignment = base_align; 787 rtex->surface.htile_size = 788 util_num_layers(&rtex->resource.b.b, 0) * 789 align(slice_bytes, base_align); 790 } 791 792 static void r600_texture_allocate_htile(struct r600_common_screen *rscreen, 793 struct r600_texture *rtex) 794 { 795 r600_texture_get_htile_size(rscreen, rtex); 796 797 if (!rtex->surface.htile_size) 798 return; 799 800 rtex->htile_offset = align(rtex->size, rtex->surface.htile_alignment); 801 rtex->size = rtex->htile_offset + rtex->surface.htile_size; 802 } 803 804 void r600_print_texture_info(struct r600_common_screen *rscreen, 805 struct r600_texture *rtex, struct u_log_context *log) 806 { 807 int i; 808 809 /* Common parameters. */ 810 u_log_printf(log, " Info: npix_x=%u, npix_y=%u, npix_z=%u, blk_w=%u, " 811 "blk_h=%u, array_size=%u, last_level=%u, " 812 "bpe=%u, nsamples=%u, flags=0x%x, %s\n", 813 rtex->resource.b.b.width0, rtex->resource.b.b.height0, 814 rtex->resource.b.b.depth0, rtex->surface.blk_w, 815 rtex->surface.blk_h, 816 rtex->resource.b.b.array_size, rtex->resource.b.b.last_level, 817 rtex->surface.bpe, rtex->resource.b.b.nr_samples, 818 rtex->surface.flags, util_format_short_name(rtex->resource.b.b.format)); 819 820 u_log_printf(log, " Layout: size=%"PRIu64", alignment=%u, bankw=%u, " 821 "bankh=%u, nbanks=%u, mtilea=%u, tilesplit=%u, pipeconfig=%u, scanout=%u\n", 822 rtex->surface.surf_size, rtex->surface.surf_alignment, rtex->surface.u.legacy.bankw, 823 rtex->surface.u.legacy.bankh, rtex->surface.u.legacy.num_banks, rtex->surface.u.legacy.mtilea, 824 rtex->surface.u.legacy.tile_split, rtex->surface.u.legacy.pipe_config, 825 (rtex->surface.flags & RADEON_SURF_SCANOUT) != 0); 826 827 if (rtex->fmask.size) 828 u_log_printf(log, " FMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, pitch_in_pixels=%u, " 829 "bankh=%u, slice_tile_max=%u, tile_mode_index=%u\n", 830 rtex->fmask.offset, rtex->fmask.size, rtex->fmask.alignment, 831 rtex->fmask.pitch_in_pixels, rtex->fmask.bank_height, 832 rtex->fmask.slice_tile_max, rtex->fmask.tile_mode_index); 833 834 if (rtex->cmask.size) 835 u_log_printf(log, " CMask: offset=%"PRIu64", size=%"PRIu64", alignment=%u, " 836 "slice_tile_max=%u\n", 837 rtex->cmask.offset, rtex->cmask.size, rtex->cmask.alignment, 838 rtex->cmask.slice_tile_max); 839 840 if (rtex->htile_offset) 841 u_log_printf(log, " HTile: offset=%"PRIu64", size=%u " 842 "alignment=%u\n", 843 rtex->htile_offset, rtex->surface.htile_size, 844 rtex->surface.htile_alignment); 845 846 for (i = 0; i <= rtex->resource.b.b.last_level; i++) 847 u_log_printf(log, " Level[%i]: offset=%"PRIu64", slice_size=%"PRIu64", " 848 "npix_x=%u, npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, " 849 "mode=%u, tiling_index = %u\n", 850 i, rtex->surface.u.legacy.level[i].offset, 851 (uint64_t)rtex->surface.u.legacy.level[i].slice_size_dw * 4, 852 u_minify(rtex->resource.b.b.width0, i), 853 u_minify(rtex->resource.b.b.height0, i), 854 u_minify(rtex->resource.b.b.depth0, i), 855 rtex->surface.u.legacy.level[i].nblk_x, 856 rtex->surface.u.legacy.level[i].nblk_y, 857 rtex->surface.u.legacy.level[i].mode, 858 rtex->surface.u.legacy.tiling_index[i]); 859 860 if (rtex->surface.has_stencil) { 861 u_log_printf(log, " StencilLayout: tilesplit=%u\n", 862 rtex->surface.u.legacy.stencil_tile_split); 863 for (i = 0; i <= rtex->resource.b.b.last_level; i++) { 864 u_log_printf(log, " StencilLevel[%i]: offset=%"PRIu64", " 865 "slice_size=%"PRIu64", npix_x=%u, " 866 "npix_y=%u, npix_z=%u, nblk_x=%u, nblk_y=%u, " 867 "mode=%u, tiling_index = %u\n", 868 i, rtex->surface.u.legacy.stencil_level[i].offset, 869 (uint64_t)rtex->surface.u.legacy.stencil_level[i].slice_size_dw * 4, 870 u_minify(rtex->resource.b.b.width0, i), 871 u_minify(rtex->resource.b.b.height0, i), 872 u_minify(rtex->resource.b.b.depth0, i), 873 rtex->surface.u.legacy.stencil_level[i].nblk_x, 874 rtex->surface.u.legacy.stencil_level[i].nblk_y, 875 rtex->surface.u.legacy.stencil_level[i].mode, 876 rtex->surface.u.legacy.stencil_tiling_index[i]); 877 } 878 } 879 } 880 881 /* Common processing for r600_texture_create and r600_texture_from_handle */ 882 static struct r600_texture * 883 r600_texture_create_object(struct pipe_screen *screen, 884 const struct pipe_resource *base, 885 struct pb_buffer *buf, 886 struct radeon_surf *surface) 887 { 888 struct r600_texture *rtex; 889 struct r600_resource *resource; 890 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; 891 892 rtex = CALLOC_STRUCT(r600_texture); 893 if (!rtex) 894 return NULL; 895 896 resource = &rtex->resource; 897 resource->b.b = *base; 898 resource->b.b.next = NULL; 899 resource->b.vtbl = &r600_texture_vtbl; 900 pipe_reference_init(&resource->b.b.reference, 1); 901 resource->b.b.screen = screen; 902 903 /* don't include stencil-only formats which we don't support for rendering */ 904 rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format)); 905 906 rtex->surface = *surface; 907 rtex->size = rtex->surface.surf_size; 908 rtex->db_render_format = base->format; 909 910 /* Tiled depth textures utilize the non-displayable tile order. 911 * This must be done after r600_setup_surface. 912 * Applies to R600-Cayman. */ 913 rtex->non_disp_tiling = rtex->is_depth && rtex->surface.u.legacy.level[0].mode >= RADEON_SURF_MODE_1D; 914 /* Applies to GCN. */ 915 rtex->last_msaa_resolve_target_micro_mode = rtex->surface.micro_tile_mode; 916 917 if (rtex->is_depth) { 918 if (base->flags & (R600_RESOURCE_FLAG_TRANSFER | 919 R600_RESOURCE_FLAG_FLUSHED_DEPTH) || 920 rscreen->chip_class >= EVERGREEN) { 921 rtex->can_sample_z = !rtex->surface.u.legacy.depth_adjusted; 922 rtex->can_sample_s = !rtex->surface.u.legacy.stencil_adjusted; 923 } else { 924 if (rtex->resource.b.b.nr_samples <= 1 && 925 (rtex->resource.b.b.format == PIPE_FORMAT_Z16_UNORM || 926 rtex->resource.b.b.format == PIPE_FORMAT_Z32_FLOAT)) 927 rtex->can_sample_z = true; 928 } 929 930 if (!(base->flags & (R600_RESOURCE_FLAG_TRANSFER | 931 R600_RESOURCE_FLAG_FLUSHED_DEPTH))) { 932 rtex->db_compatible = true; 933 934 if (!(rscreen->debug_flags & DBG_NO_HYPERZ)) 935 r600_texture_allocate_htile(rscreen, rtex); 936 } 937 } else { 938 if (base->nr_samples > 1) { 939 if (!buf) { 940 r600_texture_allocate_fmask(rscreen, rtex); 941 r600_texture_allocate_cmask(rscreen, rtex); 942 rtex->cmask_buffer = &rtex->resource; 943 } 944 if (!rtex->fmask.size || !rtex->cmask.size) { 945 FREE(rtex); 946 return NULL; 947 } 948 } 949 } 950 951 /* Now create the backing buffer. */ 952 if (!buf) { 953 r600_init_resource_fields(rscreen, resource, rtex->size, 954 rtex->surface.surf_alignment); 955 956 /* Displayable surfaces are not suballocated. */ 957 if (resource->b.b.bind & PIPE_BIND_SCANOUT) 958 resource->flags |= RADEON_FLAG_NO_SUBALLOC; 959 960 if (!r600_alloc_resource(rscreen, resource)) { 961 FREE(rtex); 962 return NULL; 963 } 964 } else { 965 resource->buf = buf; 966 resource->gpu_address = rscreen->ws->buffer_get_virtual_address(resource->buf); 967 resource->bo_size = buf->size; 968 resource->bo_alignment = buf->alignment; 969 resource->domains = rscreen->ws->buffer_get_initial_domain(resource->buf); 970 if (resource->domains & RADEON_DOMAIN_VRAM) 971 resource->vram_usage = buf->size; 972 else if (resource->domains & RADEON_DOMAIN_GTT) 973 resource->gart_usage = buf->size; 974 } 975 976 if (rtex->cmask.size) { 977 /* Initialize the cmask to 0xCC (= compressed state). */ 978 r600_screen_clear_buffer(rscreen, &rtex->cmask_buffer->b.b, 979 rtex->cmask.offset, rtex->cmask.size, 980 0xCCCCCCCC); 981 } 982 if (rtex->htile_offset) { 983 uint32_t clear_value = 0; 984 985 r600_screen_clear_buffer(rscreen, &rtex->resource.b.b, 986 rtex->htile_offset, 987 rtex->surface.htile_size, 988 clear_value); 989 } 990 991 /* Initialize the CMASK base register value. */ 992 rtex->cmask.base_address_reg = 993 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8; 994 995 if (rscreen->debug_flags & DBG_VM) { 996 fprintf(stderr, "VM start=0x%"PRIX64" end=0x%"PRIX64" | Texture %ix%ix%i, %i levels, %i samples, %s\n", 997 rtex->resource.gpu_address, 998 rtex->resource.gpu_address + rtex->resource.buf->size, 999 base->width0, base->height0, util_num_layers(base, 0), base->last_level+1, 1000 base->nr_samples ? base->nr_samples : 1, util_format_short_name(base->format)); 1001 } 1002 1003 if (rscreen->debug_flags & DBG_TEX) { 1004 puts("Texture:"); 1005 struct u_log_context log; 1006 u_log_context_init(&log); 1007 r600_print_texture_info(rscreen, rtex, &log); 1008 u_log_new_page_print(&log, stdout); 1009 fflush(stdout); 1010 u_log_context_destroy(&log); 1011 } 1012 1013 return rtex; 1014 } 1015 1016 static enum radeon_surf_mode 1017 r600_choose_tiling(struct r600_common_screen *rscreen, 1018 const struct pipe_resource *templ) 1019 { 1020 const struct util_format_description *desc = util_format_description(templ->format); 1021 bool force_tiling = templ->flags & R600_RESOURCE_FLAG_FORCE_TILING; 1022 bool is_depth_stencil = util_format_is_depth_or_stencil(templ->format) && 1023 !(templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH); 1024 1025 /* MSAA resources must be 2D tiled. */ 1026 if (templ->nr_samples > 1) 1027 return RADEON_SURF_MODE_2D; 1028 1029 /* Transfer resources should be linear. */ 1030 if (templ->flags & R600_RESOURCE_FLAG_TRANSFER) 1031 return RADEON_SURF_MODE_LINEAR_ALIGNED; 1032 1033 /* r600g: force tiling on TEXTURE_2D and TEXTURE_3D compute resources. */ 1034 if (rscreen->chip_class >= R600 && rscreen->chip_class <= CAYMAN && 1035 (templ->bind & PIPE_BIND_COMPUTE_RESOURCE) && 1036 (templ->target == PIPE_TEXTURE_2D || 1037 templ->target == PIPE_TEXTURE_3D)) 1038 force_tiling = true; 1039 1040 /* Handle common candidates for the linear mode. 1041 * Compressed textures and DB surfaces must always be tiled. 1042 */ 1043 if (!force_tiling && 1044 !is_depth_stencil && 1045 !util_format_is_compressed(templ->format)) { 1046 if (rscreen->debug_flags & DBG_NO_TILING) 1047 return RADEON_SURF_MODE_LINEAR_ALIGNED; 1048 1049 /* Tiling doesn't work with the 422 (SUBSAMPLED) formats on R600+. */ 1050 if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) 1051 return RADEON_SURF_MODE_LINEAR_ALIGNED; 1052 1053 if (templ->bind & PIPE_BIND_LINEAR) 1054 return RADEON_SURF_MODE_LINEAR_ALIGNED; 1055 1056 /* 1D textures should be linear - fixes image operations on 1d */ 1057 if (templ->target == PIPE_TEXTURE_1D || 1058 templ->target == PIPE_TEXTURE_1D_ARRAY) 1059 return RADEON_SURF_MODE_LINEAR_ALIGNED; 1060 1061 /* Textures likely to be mapped often. */ 1062 if (templ->usage == PIPE_USAGE_STAGING || 1063 templ->usage == PIPE_USAGE_STREAM) 1064 return RADEON_SURF_MODE_LINEAR_ALIGNED; 1065 } 1066 1067 /* Make small textures 1D tiled. */ 1068 if (templ->width0 <= 16 || templ->height0 <= 16 || 1069 (rscreen->debug_flags & DBG_NO_2D_TILING)) 1070 return RADEON_SURF_MODE_1D; 1071 1072 /* The allocator will switch to 1D if needed. */ 1073 return RADEON_SURF_MODE_2D; 1074 } 1075 1076 struct pipe_resource *r600_texture_create(struct pipe_screen *screen, 1077 const struct pipe_resource *templ) 1078 { 1079 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; 1080 struct radeon_surf surface = {0}; 1081 bool is_flushed_depth = templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH; 1082 int r; 1083 1084 r = r600_init_surface(rscreen, &surface, templ, 1085 r600_choose_tiling(rscreen, templ), 0, 0, 1086 false, false, is_flushed_depth); 1087 if (r) { 1088 return NULL; 1089 } 1090 1091 return (struct pipe_resource *) 1092 r600_texture_create_object(screen, templ, NULL, &surface); 1093 } 1094 1095 static struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen, 1096 const struct pipe_resource *templ, 1097 struct winsys_handle *whandle, 1098 unsigned usage) 1099 { 1100 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; 1101 struct pb_buffer *buf = NULL; 1102 unsigned stride = 0, offset = 0; 1103 enum radeon_surf_mode array_mode; 1104 struct radeon_surf surface = {}; 1105 int r; 1106 struct radeon_bo_metadata metadata = {}; 1107 struct r600_texture *rtex; 1108 bool is_scanout; 1109 1110 /* Support only 2D textures without mipmaps */ 1111 if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) || 1112 templ->depth0 != 1 || templ->last_level != 0) 1113 return NULL; 1114 1115 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride, &offset); 1116 if (!buf) 1117 return NULL; 1118 1119 rscreen->ws->buffer_get_metadata(buf, &metadata); 1120 r600_surface_import_metadata(rscreen, &surface, &metadata, 1121 &array_mode, &is_scanout); 1122 1123 r = r600_init_surface(rscreen, &surface, templ, array_mode, stride, 1124 offset, true, is_scanout, false); 1125 if (r) { 1126 return NULL; 1127 } 1128 1129 rtex = r600_texture_create_object(screen, templ, buf, &surface); 1130 if (!rtex) 1131 return NULL; 1132 1133 rtex->resource.b.is_shared = true; 1134 rtex->resource.external_usage = usage; 1135 1136 if (rscreen->apply_opaque_metadata) 1137 rscreen->apply_opaque_metadata(rscreen, rtex, &metadata); 1138 1139 assert(rtex->surface.tile_swizzle == 0); 1140 return &rtex->resource.b.b; 1141 } 1142 1143 bool r600_init_flushed_depth_texture(struct pipe_context *ctx, 1144 struct pipe_resource *texture, 1145 struct r600_texture **staging) 1146 { 1147 struct r600_texture *rtex = (struct r600_texture*)texture; 1148 struct pipe_resource resource; 1149 struct r600_texture **flushed_depth_texture = staging ? 1150 staging : &rtex->flushed_depth_texture; 1151 enum pipe_format pipe_format = texture->format; 1152 1153 if (!staging) { 1154 if (rtex->flushed_depth_texture) 1155 return true; /* it's ready */ 1156 1157 if (!rtex->can_sample_z && rtex->can_sample_s) { 1158 switch (pipe_format) { 1159 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 1160 /* Save memory by not allocating the S plane. */ 1161 pipe_format = PIPE_FORMAT_Z32_FLOAT; 1162 break; 1163 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 1164 case PIPE_FORMAT_S8_UINT_Z24_UNORM: 1165 /* Save memory bandwidth by not copying the 1166 * stencil part during flush. 1167 * 1168 * This potentially increases memory bandwidth 1169 * if an application uses both Z and S texturing 1170 * simultaneously (a flushed Z24S8 texture 1171 * would be stored compactly), but how often 1172 * does that really happen? 1173 */ 1174 pipe_format = PIPE_FORMAT_Z24X8_UNORM; 1175 break; 1176 default:; 1177 } 1178 } else if (!rtex->can_sample_s && rtex->can_sample_z) { 1179 assert(util_format_has_stencil(util_format_description(pipe_format))); 1180 1181 /* DB->CB copies to an 8bpp surface don't work. */ 1182 pipe_format = PIPE_FORMAT_X24S8_UINT; 1183 } 1184 } 1185 1186 memset(&resource, 0, sizeof(resource)); 1187 resource.target = texture->target; 1188 resource.format = pipe_format; 1189 resource.width0 = texture->width0; 1190 resource.height0 = texture->height0; 1191 resource.depth0 = texture->depth0; 1192 resource.array_size = texture->array_size; 1193 resource.last_level = texture->last_level; 1194 resource.nr_samples = texture->nr_samples; 1195 resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT; 1196 resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL; 1197 resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH; 1198 1199 if (staging) 1200 resource.flags |= R600_RESOURCE_FLAG_TRANSFER; 1201 1202 *flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource); 1203 if (*flushed_depth_texture == NULL) { 1204 R600_ERR("failed to create temporary texture to hold flushed depth\n"); 1205 return false; 1206 } 1207 1208 (*flushed_depth_texture)->non_disp_tiling = false; 1209 return true; 1210 } 1211 1212 /** 1213 * Initialize the pipe_resource descriptor to be of the same size as the box, 1214 * which is supposed to hold a subregion of the texture "orig" at the given 1215 * mipmap level. 1216 */ 1217 static void r600_init_temp_resource_from_box(struct pipe_resource *res, 1218 struct pipe_resource *orig, 1219 const struct pipe_box *box, 1220 unsigned level, unsigned flags) 1221 { 1222 memset(res, 0, sizeof(*res)); 1223 res->format = orig->format; 1224 res->width0 = box->width; 1225 res->height0 = box->height; 1226 res->depth0 = 1; 1227 res->array_size = 1; 1228 res->usage = flags & R600_RESOURCE_FLAG_TRANSFER ? PIPE_USAGE_STAGING : PIPE_USAGE_DEFAULT; 1229 res->flags = flags; 1230 1231 /* We must set the correct texture target and dimensions for a 3D box. */ 1232 if (box->depth > 1 && util_max_layer(orig, level) > 0) { 1233 res->target = PIPE_TEXTURE_2D_ARRAY; 1234 res->array_size = box->depth; 1235 } else { 1236 res->target = PIPE_TEXTURE_2D; 1237 } 1238 } 1239 1240 static bool r600_can_invalidate_texture(struct r600_common_screen *rscreen, 1241 struct r600_texture *rtex, 1242 unsigned transfer_usage, 1243 const struct pipe_box *box) 1244 { 1245 /* r600g doesn't react to dirty_tex_descriptor_counter */ 1246 return rscreen->chip_class >= SI && 1247 !rtex->resource.b.is_shared && 1248 !(transfer_usage & PIPE_TRANSFER_READ) && 1249 rtex->resource.b.b.last_level == 0 && 1250 util_texrange_covers_whole_level(&rtex->resource.b.b, 0, 1251 box->x, box->y, box->z, 1252 box->width, box->height, 1253 box->depth); 1254 } 1255 1256 static void r600_texture_invalidate_storage(struct r600_common_context *rctx, 1257 struct r600_texture *rtex) 1258 { 1259 struct r600_common_screen *rscreen = rctx->screen; 1260 1261 /* There is no point in discarding depth and tiled buffers. */ 1262 assert(!rtex->is_depth); 1263 assert(rtex->surface.is_linear); 1264 1265 /* Reallocate the buffer in the same pipe_resource. */ 1266 r600_alloc_resource(rscreen, &rtex->resource); 1267 1268 /* Initialize the CMASK base address (needed even without CMASK). */ 1269 rtex->cmask.base_address_reg = 1270 (rtex->resource.gpu_address + rtex->cmask.offset) >> 8; 1271 1272 p_atomic_inc(&rscreen->dirty_tex_counter); 1273 1274 rctx->num_alloc_tex_transfer_bytes += rtex->size; 1275 } 1276 1277 static void *r600_texture_transfer_map(struct pipe_context *ctx, 1278 struct pipe_resource *texture, 1279 unsigned level, 1280 unsigned usage, 1281 const struct pipe_box *box, 1282 struct pipe_transfer **ptransfer) 1283 { 1284 struct r600_common_context *rctx = (struct r600_common_context*)ctx; 1285 struct r600_texture *rtex = (struct r600_texture*)texture; 1286 struct r600_transfer *trans; 1287 struct r600_resource *buf; 1288 unsigned offset = 0; 1289 char *map; 1290 bool use_staging_texture = false; 1291 1292 assert(!(texture->flags & R600_RESOURCE_FLAG_TRANSFER)); 1293 assert(box->width && box->height && box->depth); 1294 1295 /* Depth textures use staging unconditionally. */ 1296 if (!rtex->is_depth) { 1297 /* Degrade the tile mode if we get too many transfers on APUs. 1298 * On dGPUs, the staging texture is always faster. 1299 * Only count uploads that are at least 4x4 pixels large. 1300 */ 1301 if (!rctx->screen->info.has_dedicated_vram && 1302 level == 0 && 1303 box->width >= 4 && box->height >= 4 && 1304 p_atomic_inc_return(&rtex->num_level0_transfers) == 10) { 1305 bool can_invalidate = 1306 r600_can_invalidate_texture(rctx->screen, rtex, 1307 usage, box); 1308 1309 r600_reallocate_texture_inplace(rctx, rtex, 1310 PIPE_BIND_LINEAR, 1311 can_invalidate); 1312 } 1313 1314 /* Tiled textures need to be converted into a linear texture for CPU 1315 * access. The staging texture is always linear and is placed in GART. 1316 * 1317 * Reading from VRAM or GTT WC is slow, always use the staging 1318 * texture in this case. 1319 * 1320 * Use the staging texture for uploads if the underlying BO 1321 * is busy. 1322 */ 1323 if (!rtex->surface.is_linear) 1324 use_staging_texture = true; 1325 else if (usage & PIPE_TRANSFER_READ) 1326 use_staging_texture = 1327 rtex->resource.domains & RADEON_DOMAIN_VRAM || 1328 rtex->resource.flags & RADEON_FLAG_GTT_WC; 1329 /* Write & linear only: */ 1330 else if (r600_rings_is_buffer_referenced(rctx, rtex->resource.buf, 1331 RADEON_USAGE_READWRITE) || 1332 !rctx->ws->buffer_wait(rtex->resource.buf, 0, 1333 RADEON_USAGE_READWRITE)) { 1334 /* It's busy. */ 1335 if (r600_can_invalidate_texture(rctx->screen, rtex, 1336 usage, box)) 1337 r600_texture_invalidate_storage(rctx, rtex); 1338 else 1339 use_staging_texture = true; 1340 } 1341 } 1342 1343 trans = CALLOC_STRUCT(r600_transfer); 1344 if (!trans) 1345 return NULL; 1346 pipe_resource_reference(&trans->b.b.resource, texture); 1347 trans->b.b.level = level; 1348 trans->b.b.usage = usage; 1349 trans->b.b.box = *box; 1350 1351 if (rtex->is_depth) { 1352 struct r600_texture *staging_depth; 1353 1354 if (rtex->resource.b.b.nr_samples > 1) { 1355 /* MSAA depth buffers need to be converted to single sample buffers. 1356 * 1357 * Mapping MSAA depth buffers can occur if ReadPixels is called 1358 * with a multisample GLX visual. 1359 * 1360 * First downsample the depth buffer to a temporary texture, 1361 * then decompress the temporary one to staging. 1362 * 1363 * Only the region being mapped is transfered. 1364 */ 1365 struct pipe_resource resource; 1366 1367 r600_init_temp_resource_from_box(&resource, texture, box, level, 0); 1368 1369 if (!r600_init_flushed_depth_texture(ctx, &resource, &staging_depth)) { 1370 R600_ERR("failed to create temporary texture to hold untiled copy\n"); 1371 FREE(trans); 1372 return NULL; 1373 } 1374 1375 if (usage & PIPE_TRANSFER_READ) { 1376 struct pipe_resource *temp = ctx->screen->resource_create(ctx->screen, &resource); 1377 if (!temp) { 1378 R600_ERR("failed to create a temporary depth texture\n"); 1379 FREE(trans); 1380 return NULL; 1381 } 1382 1383 r600_copy_region_with_blit(ctx, temp, 0, 0, 0, 0, texture, level, box); 1384 rctx->blit_decompress_depth(ctx, (struct r600_texture*)temp, staging_depth, 1385 0, 0, 0, box->depth, 0, 0); 1386 pipe_resource_reference(&temp, NULL); 1387 } 1388 1389 /* Just get the strides. */ 1390 r600_texture_get_offset(rctx->screen, staging_depth, level, NULL, 1391 &trans->b.b.stride, 1392 &trans->b.b.layer_stride); 1393 } else { 1394 /* XXX: only readback the rectangle which is being mapped? */ 1395 /* XXX: when discard is true, no need to read back from depth texture */ 1396 if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) { 1397 R600_ERR("failed to create temporary texture to hold untiled copy\n"); 1398 FREE(trans); 1399 return NULL; 1400 } 1401 1402 rctx->blit_decompress_depth(ctx, rtex, staging_depth, 1403 level, level, 1404 box->z, box->z + box->depth - 1, 1405 0, 0); 1406 1407 offset = r600_texture_get_offset(rctx->screen, staging_depth, 1408 level, box, 1409 &trans->b.b.stride, 1410 &trans->b.b.layer_stride); 1411 } 1412 1413 trans->staging = (struct r600_resource*)staging_depth; 1414 buf = trans->staging; 1415 } else if (use_staging_texture) { 1416 struct pipe_resource resource; 1417 struct r600_texture *staging; 1418 1419 r600_init_temp_resource_from_box(&resource, texture, box, level, 1420 R600_RESOURCE_FLAG_TRANSFER); 1421 resource.usage = (usage & PIPE_TRANSFER_READ) ? 1422 PIPE_USAGE_STAGING : PIPE_USAGE_STREAM; 1423 1424 /* Create the temporary texture. */ 1425 staging = (struct r600_texture*)ctx->screen->resource_create(ctx->screen, &resource); 1426 if (!staging) { 1427 R600_ERR("failed to create temporary texture to hold untiled copy\n"); 1428 FREE(trans); 1429 return NULL; 1430 } 1431 trans->staging = &staging->resource; 1432 1433 /* Just get the strides. */ 1434 r600_texture_get_offset(rctx->screen, staging, 0, NULL, 1435 &trans->b.b.stride, 1436 &trans->b.b.layer_stride); 1437 1438 if (usage & PIPE_TRANSFER_READ) 1439 r600_copy_to_staging_texture(ctx, trans); 1440 else 1441 usage |= PIPE_TRANSFER_UNSYNCHRONIZED; 1442 1443 buf = trans->staging; 1444 } else { 1445 /* the resource is mapped directly */ 1446 offset = r600_texture_get_offset(rctx->screen, rtex, level, box, 1447 &trans->b.b.stride, 1448 &trans->b.b.layer_stride); 1449 buf = &rtex->resource; 1450 } 1451 1452 if (!(map = r600_buffer_map_sync_with_rings(rctx, buf, usage))) { 1453 r600_resource_reference(&trans->staging, NULL); 1454 FREE(trans); 1455 return NULL; 1456 } 1457 1458 *ptransfer = &trans->b.b; 1459 return map + offset; 1460 } 1461 1462 static void r600_texture_transfer_unmap(struct pipe_context *ctx, 1463 struct pipe_transfer* transfer) 1464 { 1465 struct r600_common_context *rctx = (struct r600_common_context*)ctx; 1466 struct r600_transfer *rtransfer = (struct r600_transfer*)transfer; 1467 struct pipe_resource *texture = transfer->resource; 1468 struct r600_texture *rtex = (struct r600_texture*)texture; 1469 1470 if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) { 1471 if (rtex->is_depth && rtex->resource.b.b.nr_samples <= 1) { 1472 ctx->resource_copy_region(ctx, texture, transfer->level, 1473 transfer->box.x, transfer->box.y, transfer->box.z, 1474 &rtransfer->staging->b.b, transfer->level, 1475 &transfer->box); 1476 } else { 1477 r600_copy_from_staging_texture(ctx, rtransfer); 1478 } 1479 } 1480 1481 if (rtransfer->staging) { 1482 rctx->num_alloc_tex_transfer_bytes += rtransfer->staging->buf->size; 1483 r600_resource_reference(&rtransfer->staging, NULL); 1484 } 1485 1486 /* Heuristic for {upload, draw, upload, draw, ..}: 1487 * 1488 * Flush the gfx IB if we've allocated too much texture storage. 1489 * 1490 * The idea is that we don't want to build IBs that use too much 1491 * memory and put pressure on the kernel memory manager and we also 1492 * want to make temporary and invalidated buffers go idle ASAP to 1493 * decrease the total memory usage or make them reusable. The memory 1494 * usage will be slightly higher than given here because of the buffer 1495 * cache in the winsys. 1496 * 1497 * The result is that the kernel memory manager is never a bottleneck. 1498 */ 1499 if (rctx->num_alloc_tex_transfer_bytes > rctx->screen->info.gart_size / 4) { 1500 rctx->gfx.flush(rctx, PIPE_FLUSH_ASYNC, NULL); 1501 rctx->num_alloc_tex_transfer_bytes = 0; 1502 } 1503 1504 pipe_resource_reference(&transfer->resource, NULL); 1505 FREE(transfer); 1506 } 1507 1508 static const struct u_resource_vtbl r600_texture_vtbl = 1509 { 1510 NULL, /* get_handle */ 1511 r600_texture_destroy, /* resource_destroy */ 1512 r600_texture_transfer_map, /* transfer_map */ 1513 u_default_transfer_flush_region, /* transfer_flush_region */ 1514 r600_texture_transfer_unmap, /* transfer_unmap */ 1515 }; 1516 1517 struct pipe_surface *r600_create_surface_custom(struct pipe_context *pipe, 1518 struct pipe_resource *texture, 1519 const struct pipe_surface *templ, 1520 unsigned width0, unsigned height0, 1521 unsigned width, unsigned height) 1522 { 1523 struct r600_surface *surface = CALLOC_STRUCT(r600_surface); 1524 1525 if (!surface) 1526 return NULL; 1527 1528 assert(templ->u.tex.first_layer <= util_max_layer(texture, templ->u.tex.level)); 1529 assert(templ->u.tex.last_layer <= util_max_layer(texture, templ->u.tex.level)); 1530 1531 pipe_reference_init(&surface->base.reference, 1); 1532 pipe_resource_reference(&surface->base.texture, texture); 1533 surface->base.context = pipe; 1534 surface->base.format = templ->format; 1535 surface->base.width = width; 1536 surface->base.height = height; 1537 surface->base.u = templ->u; 1538 1539 surface->width0 = width0; 1540 surface->height0 = height0; 1541 1542 return &surface->base; 1543 } 1544 1545 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe, 1546 struct pipe_resource *tex, 1547 const struct pipe_surface *templ) 1548 { 1549 unsigned level = templ->u.tex.level; 1550 unsigned width = u_minify(tex->width0, level); 1551 unsigned height = u_minify(tex->height0, level); 1552 unsigned width0 = tex->width0; 1553 unsigned height0 = tex->height0; 1554 1555 if (tex->target != PIPE_BUFFER && templ->format != tex->format) { 1556 const struct util_format_description *tex_desc 1557 = util_format_description(tex->format); 1558 const struct util_format_description *templ_desc 1559 = util_format_description(templ->format); 1560 1561 assert(tex_desc->block.bits == templ_desc->block.bits); 1562 1563 /* Adjust size of surface if and only if the block width or 1564 * height is changed. */ 1565 if (tex_desc->block.width != templ_desc->block.width || 1566 tex_desc->block.height != templ_desc->block.height) { 1567 unsigned nblks_x = util_format_get_nblocksx(tex->format, width); 1568 unsigned nblks_y = util_format_get_nblocksy(tex->format, height); 1569 1570 width = nblks_x * templ_desc->block.width; 1571 height = nblks_y * templ_desc->block.height; 1572 1573 width0 = util_format_get_nblocksx(tex->format, width0); 1574 height0 = util_format_get_nblocksy(tex->format, height0); 1575 } 1576 } 1577 1578 return r600_create_surface_custom(pipe, tex, templ, 1579 width0, height0, 1580 width, height); 1581 } 1582 1583 static void r600_surface_destroy(struct pipe_context *pipe, 1584 struct pipe_surface *surface) 1585 { 1586 struct r600_surface *surf = (struct r600_surface*)surface; 1587 r600_resource_reference(&surf->cb_buffer_fmask, NULL); 1588 r600_resource_reference(&surf->cb_buffer_cmask, NULL); 1589 pipe_resource_reference(&surface->texture, NULL); 1590 FREE(surface); 1591 } 1592 1593 static void r600_clear_texture(struct pipe_context *pipe, 1594 struct pipe_resource *tex, 1595 unsigned level, 1596 const struct pipe_box *box, 1597 const void *data) 1598 { 1599 struct pipe_screen *screen = pipe->screen; 1600 struct r600_texture *rtex = (struct r600_texture*)tex; 1601 struct pipe_surface tmpl = {{0}}; 1602 struct pipe_surface *sf; 1603 const struct util_format_description *desc = 1604 util_format_description(tex->format); 1605 1606 tmpl.format = tex->format; 1607 tmpl.u.tex.first_layer = box->z; 1608 tmpl.u.tex.last_layer = box->z + box->depth - 1; 1609 tmpl.u.tex.level = level; 1610 sf = pipe->create_surface(pipe, tex, &tmpl); 1611 if (!sf) 1612 return; 1613 1614 if (rtex->is_depth) { 1615 unsigned clear; 1616 float depth; 1617 uint8_t stencil = 0; 1618 1619 /* Depth is always present. */ 1620 clear = PIPE_CLEAR_DEPTH; 1621 desc->unpack_z_float(&depth, 0, data, 0, 1, 1); 1622 1623 if (rtex->surface.has_stencil) { 1624 clear |= PIPE_CLEAR_STENCIL; 1625 desc->unpack_s_8uint(&stencil, 0, data, 0, 1, 1); 1626 } 1627 1628 pipe->clear_depth_stencil(pipe, sf, clear, depth, stencil, 1629 box->x, box->y, 1630 box->width, box->height, false); 1631 } else { 1632 union pipe_color_union color; 1633 1634 /* pipe_color_union requires the full vec4 representation. */ 1635 if (util_format_is_pure_uint(tex->format)) 1636 desc->unpack_rgba_uint(color.ui, 0, data, 0, 1, 1); 1637 else if (util_format_is_pure_sint(tex->format)) 1638 desc->unpack_rgba_sint(color.i, 0, data, 0, 1, 1); 1639 else 1640 desc->unpack_rgba_float(color.f, 0, data, 0, 1, 1); 1641 1642 if (screen->is_format_supported(screen, tex->format, 1643 tex->target, 0, 1644 PIPE_BIND_RENDER_TARGET)) { 1645 pipe->clear_render_target(pipe, sf, &color, 1646 box->x, box->y, 1647 box->width, box->height, false); 1648 } else { 1649 /* Software fallback - just for R9G9B9E5_FLOAT */ 1650 util_clear_render_target(pipe, sf, &color, 1651 box->x, box->y, 1652 box->width, box->height); 1653 } 1654 } 1655 pipe_surface_reference(&sf, NULL); 1656 } 1657 1658 unsigned r600_translate_colorswap(enum pipe_format format, bool do_endian_swap) 1659 { 1660 const struct util_format_description *desc = util_format_description(format); 1661 1662 #define HAS_SWIZZLE(chan,swz) (desc->swizzle[chan] == PIPE_SWIZZLE_##swz) 1663 1664 if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */ 1665 return V_0280A0_SWAP_STD; 1666 1667 if (desc->layout != UTIL_FORMAT_LAYOUT_PLAIN) 1668 return ~0U; 1669 1670 switch (desc->nr_channels) { 1671 case 1: 1672 if (HAS_SWIZZLE(0,X)) 1673 return V_0280A0_SWAP_STD; /* X___ */ 1674 else if (HAS_SWIZZLE(3,X)) 1675 return V_0280A0_SWAP_ALT_REV; /* ___X */ 1676 break; 1677 case 2: 1678 if ((HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,Y)) || 1679 (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(1,NONE)) || 1680 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,Y))) 1681 return V_0280A0_SWAP_STD; /* XY__ */ 1682 else if ((HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,X)) || 1683 (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(1,NONE)) || 1684 (HAS_SWIZZLE(0,NONE) && HAS_SWIZZLE(1,X))) 1685 /* YX__ */ 1686 return (do_endian_swap ? V_0280A0_SWAP_STD : V_0280A0_SWAP_STD_REV); 1687 else if (HAS_SWIZZLE(0,X) && HAS_SWIZZLE(3,Y)) 1688 return V_0280A0_SWAP_ALT; /* X__Y */ 1689 else if (HAS_SWIZZLE(0,Y) && HAS_SWIZZLE(3,X)) 1690 return V_0280A0_SWAP_ALT_REV; /* Y__X */ 1691 break; 1692 case 3: 1693 if (HAS_SWIZZLE(0,X)) 1694 return (do_endian_swap ? V_0280A0_SWAP_STD_REV : V_0280A0_SWAP_STD); 1695 else if (HAS_SWIZZLE(0,Z)) 1696 return V_0280A0_SWAP_STD_REV; /* ZYX */ 1697 break; 1698 case 4: 1699 /* check the middle channels, the 1st and 4th channel can be NONE */ 1700 if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,Z)) { 1701 return V_0280A0_SWAP_STD; /* XYZW */ 1702 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,Y)) { 1703 return V_0280A0_SWAP_STD_REV; /* WZYX */ 1704 } else if (HAS_SWIZZLE(1,Y) && HAS_SWIZZLE(2,X)) { 1705 return V_0280A0_SWAP_ALT; /* ZYXW */ 1706 } else if (HAS_SWIZZLE(1,Z) && HAS_SWIZZLE(2,W)) { 1707 /* YZWX */ 1708 if (desc->is_array) 1709 return V_0280A0_SWAP_ALT_REV; 1710 else 1711 return (do_endian_swap ? V_0280A0_SWAP_ALT : V_0280A0_SWAP_ALT_REV); 1712 } 1713 break; 1714 } 1715 return ~0U; 1716 } 1717 1718 /* FAST COLOR CLEAR */ 1719 1720 static void evergreen_set_clear_color(struct r600_texture *rtex, 1721 enum pipe_format surface_format, 1722 const union pipe_color_union *color) 1723 { 1724 union util_color uc; 1725 1726 memset(&uc, 0, sizeof(uc)); 1727 1728 if (rtex->surface.bpe == 16) { 1729 /* DCC fast clear only: 1730 * CLEAR_WORD0 = R = G = B 1731 * CLEAR_WORD1 = A 1732 */ 1733 assert(color->ui[0] == color->ui[1] && 1734 color->ui[0] == color->ui[2]); 1735 uc.ui[0] = color->ui[0]; 1736 uc.ui[1] = color->ui[3]; 1737 } else if (util_format_is_pure_uint(surface_format)) { 1738 util_format_write_4ui(surface_format, color->ui, 0, &uc, 0, 0, 0, 1, 1); 1739 } else if (util_format_is_pure_sint(surface_format)) { 1740 util_format_write_4i(surface_format, color->i, 0, &uc, 0, 0, 0, 1, 1); 1741 } else { 1742 util_pack_color(color->f, surface_format, &uc); 1743 } 1744 1745 memcpy(rtex->color_clear_value, &uc, 2 * sizeof(uint32_t)); 1746 } 1747 1748 void evergreen_do_fast_color_clear(struct r600_common_context *rctx, 1749 struct pipe_framebuffer_state *fb, 1750 struct r600_atom *fb_state, 1751 unsigned *buffers, ubyte *dirty_cbufs, 1752 const union pipe_color_union *color) 1753 { 1754 int i; 1755 1756 /* This function is broken in BE, so just disable this path for now */ 1757 #ifdef PIPE_ARCH_BIG_ENDIAN 1758 return; 1759 #endif 1760 1761 if (rctx->render_cond) 1762 return; 1763 1764 for (i = 0; i < fb->nr_cbufs; i++) { 1765 struct r600_texture *tex; 1766 unsigned clear_bit = PIPE_CLEAR_COLOR0 << i; 1767 1768 if (!fb->cbufs[i]) 1769 continue; 1770 1771 /* if this colorbuffer is not being cleared */ 1772 if (!(*buffers & clear_bit)) 1773 continue; 1774 1775 tex = (struct r600_texture *)fb->cbufs[i]->texture; 1776 1777 /* the clear is allowed if all layers are bound */ 1778 if (fb->cbufs[i]->u.tex.first_layer != 0 || 1779 fb->cbufs[i]->u.tex.last_layer != util_max_layer(&tex->resource.b.b, 0)) { 1780 continue; 1781 } 1782 1783 /* cannot clear mipmapped textures */ 1784 if (fb->cbufs[i]->texture->last_level != 0) { 1785 continue; 1786 } 1787 1788 /* only supported on tiled surfaces */ 1789 if (tex->surface.is_linear) { 1790 continue; 1791 } 1792 1793 /* shared textures can't use fast clear without an explicit flush, 1794 * because there is no way to communicate the clear color among 1795 * all clients 1796 */ 1797 if (tex->resource.b.is_shared && 1798 !(tex->resource.external_usage & PIPE_HANDLE_USAGE_EXPLICIT_FLUSH)) 1799 continue; 1800 1801 { 1802 /* 128-bit formats are unusupported */ 1803 if (tex->surface.bpe > 8) { 1804 continue; 1805 } 1806 1807 /* ensure CMASK is enabled */ 1808 r600_texture_alloc_cmask_separate(rctx->screen, tex); 1809 if (tex->cmask.size == 0) { 1810 continue; 1811 } 1812 1813 /* Do the fast clear. */ 1814 rctx->clear_buffer(&rctx->b, &tex->cmask_buffer->b.b, 1815 tex->cmask.offset, tex->cmask.size, 0, 1816 R600_COHERENCY_CB_META); 1817 1818 bool need_compressed_update = !tex->dirty_level_mask; 1819 1820 tex->dirty_level_mask |= 1 << fb->cbufs[i]->u.tex.level; 1821 1822 if (need_compressed_update) 1823 p_atomic_inc(&rctx->screen->compressed_colortex_counter); 1824 } 1825 1826 evergreen_set_clear_color(tex, fb->cbufs[i]->format, color); 1827 1828 if (dirty_cbufs) 1829 *dirty_cbufs |= 1 << i; 1830 rctx->set_atom_dirty(rctx, fb_state, true); 1831 *buffers &= ~clear_bit; 1832 } 1833 } 1834 1835 static struct pipe_memory_object * 1836 r600_memobj_from_handle(struct pipe_screen *screen, 1837 struct winsys_handle *whandle, 1838 bool dedicated) 1839 { 1840 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; 1841 struct r600_memory_object *memobj = CALLOC_STRUCT(r600_memory_object); 1842 struct pb_buffer *buf = NULL; 1843 uint32_t stride, offset; 1844 1845 if (!memobj) 1846 return NULL; 1847 1848 buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, 1849 &stride, &offset); 1850 if (!buf) { 1851 free(memobj); 1852 return NULL; 1853 } 1854 1855 memobj->b.dedicated = dedicated; 1856 memobj->buf = buf; 1857 memobj->stride = stride; 1858 memobj->offset = offset; 1859 1860 return (struct pipe_memory_object *)memobj; 1861 1862 } 1863 1864 static void 1865 r600_memobj_destroy(struct pipe_screen *screen, 1866 struct pipe_memory_object *_memobj) 1867 { 1868 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj; 1869 1870 pb_reference(&memobj->buf, NULL); 1871 free(memobj); 1872 } 1873 1874 static struct pipe_resource * 1875 r600_texture_from_memobj(struct pipe_screen *screen, 1876 const struct pipe_resource *templ, 1877 struct pipe_memory_object *_memobj, 1878 uint64_t offset) 1879 { 1880 int r; 1881 struct r600_common_screen *rscreen = (struct r600_common_screen*)screen; 1882 struct r600_memory_object *memobj = (struct r600_memory_object *)_memobj; 1883 struct r600_texture *rtex; 1884 struct radeon_surf surface = {}; 1885 struct radeon_bo_metadata metadata = {}; 1886 enum radeon_surf_mode array_mode; 1887 bool is_scanout; 1888 struct pb_buffer *buf = NULL; 1889 1890 if (memobj->b.dedicated) { 1891 rscreen->ws->buffer_get_metadata(memobj->buf, &metadata); 1892 r600_surface_import_metadata(rscreen, &surface, &metadata, 1893 &array_mode, &is_scanout); 1894 } else { 1895 /** 1896 * The bo metadata is unset for un-dedicated images. So we fall 1897 * back to linear. See answer to question 5 of the 1898 * VK_KHX_external_memory spec for some details. 1899 * 1900 * It is possible that this case isn't going to work if the 1901 * surface pitch isn't correctly aligned by default. 1902 * 1903 * In order to support it correctly we require multi-image 1904 * metadata to be syncrhonized between radv and radeonsi. The 1905 * semantics of associating multiple image metadata to a memory 1906 * object on the vulkan export side are not concretely defined 1907 * either. 1908 * 1909 * All the use cases we are aware of at the moment for memory 1910 * objects use dedicated allocations. So lets keep the initial 1911 * implementation simple. 1912 * 1913 * A possible alternative is to attempt to reconstruct the 1914 * tiling information when the TexParameter TEXTURE_TILING_EXT 1915 * is set. 1916 */ 1917 array_mode = RADEON_SURF_MODE_LINEAR_ALIGNED; 1918 is_scanout = false; 1919 1920 } 1921 1922 r = r600_init_surface(rscreen, &surface, templ, 1923 array_mode, memobj->stride, 1924 offset, true, is_scanout, 1925 false); 1926 if (r) 1927 return NULL; 1928 1929 rtex = r600_texture_create_object(screen, templ, memobj->buf, &surface); 1930 if (!rtex) 1931 return NULL; 1932 1933 /* r600_texture_create_object doesn't increment refcount of 1934 * memobj->buf, so increment it here. 1935 */ 1936 pb_reference(&buf, memobj->buf); 1937 1938 rtex->resource.b.is_shared = true; 1939 rtex->resource.external_usage = PIPE_HANDLE_USAGE_READ_WRITE; 1940 1941 if (rscreen->apply_opaque_metadata) 1942 rscreen->apply_opaque_metadata(rscreen, rtex, &metadata); 1943 1944 return &rtex->resource.b.b; 1945 } 1946 1947 void r600_init_screen_texture_functions(struct r600_common_screen *rscreen) 1948 { 1949 rscreen->b.resource_from_handle = r600_texture_from_handle; 1950 rscreen->b.resource_get_handle = r600_texture_get_handle; 1951 rscreen->b.resource_from_memobj = r600_texture_from_memobj; 1952 rscreen->b.memobj_create_from_handle = r600_memobj_from_handle; 1953 rscreen->b.memobj_destroy = r600_memobj_destroy; 1954 } 1955 1956 void r600_init_context_texture_functions(struct r600_common_context *rctx) 1957 { 1958 rctx->b.create_surface = r600_create_surface; 1959 rctx->b.surface_destroy = r600_surface_destroy; 1960 rctx->b.clear_texture = r600_clear_texture; 1961 } 1962