1 /* 2 * Copyright 2010 Jerome Glisse <glisse (at) freedesktop.org> 3 * Copyright 2014,2015 Advanced Micro Devices, Inc. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * on the rights to use, copy, modify, merge, publish, distribute, sub 9 * license, and/or sell copies of the Software, and to permit persons to whom 10 * the Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 22 * USE OR OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include "sid.h" 26 #include "si_pipe.h" 27 28 static void cik_sdma_copy_buffer(struct si_context *ctx, 29 struct pipe_resource *dst, 30 struct pipe_resource *src, 31 uint64_t dst_offset, 32 uint64_t src_offset, 33 uint64_t size) 34 { 35 struct radeon_winsys_cs *cs = ctx->b.dma.cs; 36 unsigned i, ncopy, csize; 37 struct r600_resource *rdst = r600_resource(dst); 38 struct r600_resource *rsrc = r600_resource(src); 39 40 /* Mark the buffer range of destination as valid (initialized), 41 * so that transfer_map knows it should wait for the GPU when mapping 42 * that range. */ 43 util_range_add(&rdst->valid_buffer_range, dst_offset, 44 dst_offset + size); 45 46 dst_offset += rdst->gpu_address; 47 src_offset += rsrc->gpu_address; 48 49 ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE); 50 si_need_dma_space(&ctx->b, ncopy * 7, rdst, rsrc); 51 52 for (i = 0; i < ncopy; i++) { 53 csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE); 54 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, 55 CIK_SDMA_COPY_SUB_OPCODE_LINEAR, 56 0)); 57 radeon_emit(cs, ctx->b.chip_class >= GFX9 ? csize - 1 : csize); 58 radeon_emit(cs, 0); /* src/dst endian swap */ 59 radeon_emit(cs, src_offset); 60 radeon_emit(cs, src_offset >> 32); 61 radeon_emit(cs, dst_offset); 62 radeon_emit(cs, dst_offset >> 32); 63 dst_offset += csize; 64 src_offset += csize; 65 size -= csize; 66 } 67 } 68 69 static void cik_sdma_clear_buffer(struct pipe_context *ctx, 70 struct pipe_resource *dst, 71 uint64_t offset, 72 uint64_t size, 73 unsigned clear_value) 74 { 75 struct si_context *sctx = (struct si_context *)ctx; 76 struct radeon_winsys_cs *cs = sctx->b.dma.cs; 77 unsigned i, ncopy, csize; 78 struct r600_resource *rdst = r600_resource(dst); 79 80 if (!cs || offset % 4 != 0 || size % 4 != 0 || 81 dst->flags & PIPE_RESOURCE_FLAG_SPARSE) { 82 ctx->clear_buffer(ctx, dst, offset, size, &clear_value, 4); 83 return; 84 } 85 86 /* Mark the buffer range of destination as valid (initialized), 87 * so that transfer_map knows it should wait for the GPU when mapping 88 * that range. */ 89 util_range_add(&rdst->valid_buffer_range, offset, offset + size); 90 91 offset += rdst->gpu_address; 92 93 /* the same maximum size as for copying */ 94 ncopy = DIV_ROUND_UP(size, CIK_SDMA_COPY_MAX_SIZE); 95 si_need_dma_space(&sctx->b, ncopy * 5, rdst, NULL); 96 97 for (i = 0; i < ncopy; i++) { 98 csize = MIN2(size, CIK_SDMA_COPY_MAX_SIZE); 99 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_PACKET_CONSTANT_FILL, 0, 100 0x8000 /* dword copy */)); 101 radeon_emit(cs, offset); 102 radeon_emit(cs, offset >> 32); 103 radeon_emit(cs, clear_value); 104 radeon_emit(cs, sctx->b.chip_class >= GFX9 ? csize - 1 : csize); 105 offset += csize; 106 size -= csize; 107 } 108 } 109 110 static unsigned minify_as_blocks(unsigned width, unsigned level, unsigned blk_w) 111 { 112 width = u_minify(width, level); 113 return DIV_ROUND_UP(width, blk_w); 114 } 115 116 static unsigned encode_tile_info(struct si_context *sctx, 117 struct r600_texture *tex, unsigned level, 118 bool set_bpp) 119 { 120 struct radeon_info *info = &sctx->screen->info; 121 unsigned tile_index = tex->surface.u.legacy.tiling_index[level]; 122 unsigned macro_tile_index = tex->surface.u.legacy.macro_tile_index; 123 unsigned tile_mode = info->si_tile_mode_array[tile_index]; 124 unsigned macro_tile_mode = info->cik_macrotile_mode_array[macro_tile_index]; 125 126 return (set_bpp ? util_logbase2(tex->surface.bpe) : 0) | 127 (G_009910_ARRAY_MODE(tile_mode) << 3) | 128 (G_009910_MICRO_TILE_MODE_NEW(tile_mode) << 8) | 129 /* Non-depth modes don't have TILE_SPLIT set. */ 130 ((util_logbase2(tex->surface.u.legacy.tile_split >> 6)) << 11) | 131 (G_009990_BANK_WIDTH(macro_tile_mode) << 15) | 132 (G_009990_BANK_HEIGHT(macro_tile_mode) << 18) | 133 (G_009990_NUM_BANKS(macro_tile_mode) << 21) | 134 (G_009990_MACRO_TILE_ASPECT(macro_tile_mode) << 24) | 135 (G_009910_PIPE_CONFIG(tile_mode) << 26); 136 } 137 138 static bool cik_sdma_copy_texture(struct si_context *sctx, 139 struct pipe_resource *dst, 140 unsigned dst_level, 141 unsigned dstx, unsigned dsty, unsigned dstz, 142 struct pipe_resource *src, 143 unsigned src_level, 144 const struct pipe_box *src_box) 145 { 146 struct radeon_info *info = &sctx->screen->info; 147 struct r600_texture *rsrc = (struct r600_texture*)src; 148 struct r600_texture *rdst = (struct r600_texture*)dst; 149 unsigned bpp = rdst->surface.bpe; 150 uint64_t dst_address = rdst->resource.gpu_address + 151 rdst->surface.u.legacy.level[dst_level].offset; 152 uint64_t src_address = rsrc->resource.gpu_address + 153 rsrc->surface.u.legacy.level[src_level].offset; 154 unsigned dst_mode = rdst->surface.u.legacy.level[dst_level].mode; 155 unsigned src_mode = rsrc->surface.u.legacy.level[src_level].mode; 156 unsigned dst_tile_index = rdst->surface.u.legacy.tiling_index[dst_level]; 157 unsigned src_tile_index = rsrc->surface.u.legacy.tiling_index[src_level]; 158 unsigned dst_tile_mode = info->si_tile_mode_array[dst_tile_index]; 159 unsigned src_tile_mode = info->si_tile_mode_array[src_tile_index]; 160 unsigned dst_micro_mode = G_009910_MICRO_TILE_MODE_NEW(dst_tile_mode); 161 unsigned src_micro_mode = G_009910_MICRO_TILE_MODE_NEW(src_tile_mode); 162 unsigned dst_tile_swizzle = dst_mode == RADEON_SURF_MODE_2D ? 163 rdst->surface.tile_swizzle : 0; 164 unsigned src_tile_swizzle = src_mode == RADEON_SURF_MODE_2D ? 165 rsrc->surface.tile_swizzle : 0; 166 unsigned dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x; 167 unsigned src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x; 168 uint64_t dst_slice_pitch = ((uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4) / bpp; 169 uint64_t src_slice_pitch = ((uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4) / bpp; 170 unsigned dst_width = minify_as_blocks(rdst->resource.b.b.width0, 171 dst_level, rdst->surface.blk_w); 172 unsigned src_width = minify_as_blocks(rsrc->resource.b.b.width0, 173 src_level, rsrc->surface.blk_w); 174 unsigned dst_height = minify_as_blocks(rdst->resource.b.b.height0, 175 dst_level, rdst->surface.blk_h); 176 unsigned src_height = minify_as_blocks(rsrc->resource.b.b.height0, 177 src_level, rsrc->surface.blk_h); 178 unsigned srcx = src_box->x / rsrc->surface.blk_w; 179 unsigned srcy = src_box->y / rsrc->surface.blk_h; 180 unsigned srcz = src_box->z; 181 unsigned copy_width = DIV_ROUND_UP(src_box->width, rsrc->surface.blk_w); 182 unsigned copy_height = DIV_ROUND_UP(src_box->height, rsrc->surface.blk_h); 183 unsigned copy_depth = src_box->depth; 184 185 assert(src_level <= src->last_level); 186 assert(dst_level <= dst->last_level); 187 assert(rdst->surface.u.legacy.level[dst_level].offset + 188 dst_slice_pitch * bpp * (dstz + src_box->depth) <= 189 rdst->resource.buf->size); 190 assert(rsrc->surface.u.legacy.level[src_level].offset + 191 src_slice_pitch * bpp * (srcz + src_box->depth) <= 192 rsrc->resource.buf->size); 193 194 if (!si_prepare_for_dma_blit(&sctx->b, rdst, dst_level, dstx, dsty, 195 dstz, rsrc, src_level, src_box)) 196 return false; 197 198 dstx /= rdst->surface.blk_w; 199 dsty /= rdst->surface.blk_h; 200 201 if (srcx >= (1 << 14) || 202 srcy >= (1 << 14) || 203 srcz >= (1 << 11) || 204 dstx >= (1 << 14) || 205 dsty >= (1 << 14) || 206 dstz >= (1 << 11)) 207 return false; 208 209 dst_address |= dst_tile_swizzle << 8; 210 src_address |= src_tile_swizzle << 8; 211 212 /* Linear -> linear sub-window copy. */ 213 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED && 214 src_mode == RADEON_SURF_MODE_LINEAR_ALIGNED && 215 /* check if everything fits into the bitfields */ 216 src_pitch <= (1 << 14) && 217 dst_pitch <= (1 << 14) && 218 src_slice_pitch <= (1 << 28) && 219 dst_slice_pitch <= (1 << 28) && 220 copy_width <= (1 << 14) && 221 copy_height <= (1 << 14) && 222 copy_depth <= (1 << 11) && 223 /* HW limitation - CIK: */ 224 (sctx->b.chip_class != CIK || 225 (copy_width < (1 << 14) && 226 copy_height < (1 << 14) && 227 copy_depth < (1 << 11))) && 228 /* HW limitation - some CIK parts: */ 229 ((sctx->b.family != CHIP_BONAIRE && 230 sctx->b.family != CHIP_KAVERI) || 231 (srcx + copy_width != (1 << 14) && 232 srcy + copy_height != (1 << 14)))) { 233 struct radeon_winsys_cs *cs = sctx->b.dma.cs; 234 235 si_need_dma_space(&sctx->b, 13, &rdst->resource, &rsrc->resource); 236 237 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, 238 CIK_SDMA_COPY_SUB_OPCODE_LINEAR_SUB_WINDOW, 0) | 239 (util_logbase2(bpp) << 29)); 240 radeon_emit(cs, src_address); 241 radeon_emit(cs, src_address >> 32); 242 radeon_emit(cs, srcx | (srcy << 16)); 243 radeon_emit(cs, srcz | ((src_pitch - 1) << 16)); 244 radeon_emit(cs, src_slice_pitch - 1); 245 radeon_emit(cs, dst_address); 246 radeon_emit(cs, dst_address >> 32); 247 radeon_emit(cs, dstx | (dsty << 16)); 248 radeon_emit(cs, dstz | ((dst_pitch - 1) << 16)); 249 radeon_emit(cs, dst_slice_pitch - 1); 250 if (sctx->b.chip_class == CIK) { 251 radeon_emit(cs, copy_width | (copy_height << 16)); 252 radeon_emit(cs, copy_depth); 253 } else { 254 radeon_emit(cs, (copy_width - 1) | ((copy_height - 1) << 16)); 255 radeon_emit(cs, (copy_depth - 1)); 256 } 257 return true; 258 } 259 260 /* Tiled <-> linear sub-window copy. */ 261 if ((src_mode >= RADEON_SURF_MODE_1D) != (dst_mode >= RADEON_SURF_MODE_1D)) { 262 struct r600_texture *tiled = src_mode >= RADEON_SURF_MODE_1D ? rsrc : rdst; 263 struct r600_texture *linear = tiled == rsrc ? rdst : rsrc; 264 unsigned tiled_level = tiled == rsrc ? src_level : dst_level; 265 unsigned linear_level = linear == rsrc ? src_level : dst_level; 266 unsigned tiled_x = tiled == rsrc ? srcx : dstx; 267 unsigned linear_x = linear == rsrc ? srcx : dstx; 268 unsigned tiled_y = tiled == rsrc ? srcy : dsty; 269 unsigned linear_y = linear == rsrc ? srcy : dsty; 270 unsigned tiled_z = tiled == rsrc ? srcz : dstz; 271 unsigned linear_z = linear == rsrc ? srcz : dstz; 272 unsigned tiled_width = tiled == rsrc ? src_width : dst_width; 273 unsigned linear_width = linear == rsrc ? src_width : dst_width; 274 unsigned tiled_pitch = tiled == rsrc ? src_pitch : dst_pitch; 275 unsigned linear_pitch = linear == rsrc ? src_pitch : dst_pitch; 276 unsigned tiled_slice_pitch = tiled == rsrc ? src_slice_pitch : dst_slice_pitch; 277 unsigned linear_slice_pitch = linear == rsrc ? src_slice_pitch : dst_slice_pitch; 278 uint64_t tiled_address = tiled == rsrc ? src_address : dst_address; 279 uint64_t linear_address = linear == rsrc ? src_address : dst_address; 280 unsigned tiled_micro_mode = tiled == rsrc ? src_micro_mode : dst_micro_mode; 281 282 assert(tiled_pitch % 8 == 0); 283 assert(tiled_slice_pitch % 64 == 0); 284 unsigned pitch_tile_max = tiled_pitch / 8 - 1; 285 unsigned slice_tile_max = tiled_slice_pitch / 64 - 1; 286 unsigned xalign = MAX2(1, 4 / bpp); 287 unsigned copy_width_aligned = copy_width; 288 289 /* If the region ends at the last pixel and is unaligned, we 290 * can copy the remainder of the line that is not visible to 291 * make it aligned. 292 */ 293 if (copy_width % xalign != 0 && 294 linear_x + copy_width == linear_width && 295 tiled_x + copy_width == tiled_width && 296 linear_x + align(copy_width, xalign) <= linear_pitch && 297 tiled_x + align(copy_width, xalign) <= tiled_pitch) 298 copy_width_aligned = align(copy_width, xalign); 299 300 /* HW limitations. */ 301 if ((sctx->b.family == CHIP_BONAIRE || 302 sctx->b.family == CHIP_KAVERI) && 303 linear_pitch - 1 == 0x3fff && 304 bpp == 16) 305 return false; 306 307 if (sctx->b.chip_class == CIK && 308 (copy_width_aligned == (1 << 14) || 309 copy_height == (1 << 14) || 310 copy_depth == (1 << 11))) 311 return false; 312 313 if ((sctx->b.family == CHIP_BONAIRE || 314 sctx->b.family == CHIP_KAVERI || 315 sctx->b.family == CHIP_KABINI || 316 sctx->b.family == CHIP_MULLINS) && 317 (tiled_x + copy_width == (1 << 14) || 318 tiled_y + copy_height == (1 << 14))) 319 return false; 320 321 /* The hw can read outside of the given linear buffer bounds, 322 * or access those pages but not touch the memory in case 323 * of writes. (it still causes a VM fault) 324 * 325 * Out-of-bounds memory access or page directory access must 326 * be prevented. 327 */ 328 int64_t start_linear_address, end_linear_address; 329 unsigned granularity; 330 331 /* Deduce the size of reads from the linear surface. */ 332 switch (tiled_micro_mode) { 333 case V_009910_ADDR_SURF_DISPLAY_MICRO_TILING: 334 granularity = bpp == 1 ? 64 / (8*bpp) : 335 128 / (8*bpp); 336 break; 337 case V_009910_ADDR_SURF_THIN_MICRO_TILING: 338 case V_009910_ADDR_SURF_DEPTH_MICRO_TILING: 339 if (0 /* TODO: THICK microtiling */) 340 granularity = bpp == 1 ? 32 / (8*bpp) : 341 bpp == 2 ? 64 / (8*bpp) : 342 bpp <= 8 ? 128 / (8*bpp) : 343 256 / (8*bpp); 344 else 345 granularity = bpp <= 2 ? 64 / (8*bpp) : 346 bpp <= 8 ? 128 / (8*bpp) : 347 256 / (8*bpp); 348 break; 349 default: 350 return false; 351 } 352 353 /* The linear reads start at tiled_x & ~(granularity - 1). 354 * If linear_x == 0 && tiled_x % granularity != 0, the hw 355 * starts reading from an address preceding linear_address!!! 356 */ 357 start_linear_address = 358 linear->surface.u.legacy.level[linear_level].offset + 359 bpp * (linear_z * linear_slice_pitch + 360 linear_y * linear_pitch + 361 linear_x); 362 start_linear_address -= (int)(bpp * (tiled_x % granularity)); 363 364 end_linear_address = 365 linear->surface.u.legacy.level[linear_level].offset + 366 bpp * ((linear_z + copy_depth - 1) * linear_slice_pitch + 367 (linear_y + copy_height - 1) * linear_pitch + 368 (linear_x + copy_width)); 369 370 if ((tiled_x + copy_width) % granularity) 371 end_linear_address += granularity - 372 (tiled_x + copy_width) % granularity; 373 374 if (start_linear_address < 0 || 375 end_linear_address > linear->surface.surf_size) 376 return false; 377 378 /* Check requirements. */ 379 if (tiled_address % 256 == 0 && 380 linear_address % 4 == 0 && 381 linear_pitch % xalign == 0 && 382 linear_x % xalign == 0 && 383 tiled_x % xalign == 0 && 384 copy_width_aligned % xalign == 0 && 385 tiled_micro_mode != V_009910_ADDR_SURF_ROTATED_MICRO_TILING && 386 /* check if everything fits into the bitfields */ 387 tiled->surface.u.legacy.tile_split <= 4096 && 388 pitch_tile_max < (1 << 11) && 389 slice_tile_max < (1 << 22) && 390 linear_pitch <= (1 << 14) && 391 linear_slice_pitch <= (1 << 28) && 392 copy_width_aligned <= (1 << 14) && 393 copy_height <= (1 << 14) && 394 copy_depth <= (1 << 11)) { 395 struct radeon_winsys_cs *cs = sctx->b.dma.cs; 396 uint32_t direction = linear == rdst ? 1u << 31 : 0; 397 398 si_need_dma_space(&sctx->b, 14, &rdst->resource, &rsrc->resource); 399 400 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, 401 CIK_SDMA_COPY_SUB_OPCODE_TILED_SUB_WINDOW, 0) | 402 direction); 403 radeon_emit(cs, tiled_address); 404 radeon_emit(cs, tiled_address >> 32); 405 radeon_emit(cs, tiled_x | (tiled_y << 16)); 406 radeon_emit(cs, tiled_z | (pitch_tile_max << 16)); 407 radeon_emit(cs, slice_tile_max); 408 radeon_emit(cs, encode_tile_info(sctx, tiled, tiled_level, true)); 409 radeon_emit(cs, linear_address); 410 radeon_emit(cs, linear_address >> 32); 411 radeon_emit(cs, linear_x | (linear_y << 16)); 412 radeon_emit(cs, linear_z | ((linear_pitch - 1) << 16)); 413 radeon_emit(cs, linear_slice_pitch - 1); 414 if (sctx->b.chip_class == CIK) { 415 radeon_emit(cs, copy_width_aligned | (copy_height << 16)); 416 radeon_emit(cs, copy_depth); 417 } else { 418 radeon_emit(cs, (copy_width_aligned - 1) | ((copy_height - 1) << 16)); 419 radeon_emit(cs, (copy_depth - 1)); 420 } 421 return true; 422 } 423 } 424 425 /* Tiled -> Tiled sub-window copy. */ 426 if (dst_mode >= RADEON_SURF_MODE_1D && 427 src_mode >= RADEON_SURF_MODE_1D && 428 /* check if these fit into the bitfields */ 429 src_address % 256 == 0 && 430 dst_address % 256 == 0 && 431 rsrc->surface.u.legacy.tile_split <= 4096 && 432 rdst->surface.u.legacy.tile_split <= 4096 && 433 dstx % 8 == 0 && 434 dsty % 8 == 0 && 435 srcx % 8 == 0 && 436 srcy % 8 == 0 && 437 /* this can either be equal, or display->rotated (VI+ only) */ 438 (src_micro_mode == dst_micro_mode || 439 (sctx->b.chip_class >= VI && 440 src_micro_mode == V_009910_ADDR_SURF_DISPLAY_MICRO_TILING && 441 dst_micro_mode == V_009910_ADDR_SURF_ROTATED_MICRO_TILING))) { 442 assert(src_pitch % 8 == 0); 443 assert(dst_pitch % 8 == 0); 444 assert(src_slice_pitch % 64 == 0); 445 assert(dst_slice_pitch % 64 == 0); 446 unsigned src_pitch_tile_max = src_pitch / 8 - 1; 447 unsigned dst_pitch_tile_max = dst_pitch / 8 - 1; 448 unsigned src_slice_tile_max = src_slice_pitch / 64 - 1; 449 unsigned dst_slice_tile_max = dst_slice_pitch / 64 - 1; 450 unsigned copy_width_aligned = copy_width; 451 unsigned copy_height_aligned = copy_height; 452 453 /* If the region ends at the last pixel and is unaligned, we 454 * can copy the remainder of the tile that is not visible to 455 * make it aligned. 456 */ 457 if (copy_width % 8 != 0 && 458 srcx + copy_width == src_width && 459 dstx + copy_width == dst_width) 460 copy_width_aligned = align(copy_width, 8); 461 462 if (copy_height % 8 != 0 && 463 srcy + copy_height == src_height && 464 dsty + copy_height == dst_height) 465 copy_height_aligned = align(copy_height, 8); 466 467 /* check if these fit into the bitfields */ 468 if (src_pitch_tile_max < (1 << 11) && 469 dst_pitch_tile_max < (1 << 11) && 470 src_slice_tile_max < (1 << 22) && 471 dst_slice_tile_max < (1 << 22) && 472 copy_width_aligned <= (1 << 14) && 473 copy_height_aligned <= (1 << 14) && 474 copy_depth <= (1 << 11) && 475 copy_width_aligned % 8 == 0 && 476 copy_height_aligned % 8 == 0 && 477 /* HW limitation - CIK: */ 478 (sctx->b.chip_class != CIK || 479 (copy_width_aligned < (1 << 14) && 480 copy_height_aligned < (1 << 14) && 481 copy_depth < (1 << 11))) && 482 /* HW limitation - some CIK parts: */ 483 ((sctx->b.family != CHIP_BONAIRE && 484 sctx->b.family != CHIP_KAVERI && 485 sctx->b.family != CHIP_KABINI && 486 sctx->b.family != CHIP_MULLINS) || 487 (srcx + copy_width_aligned != (1 << 14) && 488 srcy + copy_height_aligned != (1 << 14) && 489 dstx + copy_width != (1 << 14)))) { 490 struct radeon_winsys_cs *cs = sctx->b.dma.cs; 491 492 si_need_dma_space(&sctx->b, 15, &rdst->resource, &rsrc->resource); 493 494 radeon_emit(cs, CIK_SDMA_PACKET(CIK_SDMA_OPCODE_COPY, 495 CIK_SDMA_COPY_SUB_OPCODE_T2T_SUB_WINDOW, 0)); 496 radeon_emit(cs, src_address); 497 radeon_emit(cs, src_address >> 32); 498 radeon_emit(cs, srcx | (srcy << 16)); 499 radeon_emit(cs, srcz | (src_pitch_tile_max << 16)); 500 radeon_emit(cs, src_slice_tile_max); 501 radeon_emit(cs, encode_tile_info(sctx, rsrc, src_level, true)); 502 radeon_emit(cs, dst_address); 503 radeon_emit(cs, dst_address >> 32); 504 radeon_emit(cs, dstx | (dsty << 16)); 505 radeon_emit(cs, dstz | (dst_pitch_tile_max << 16)); 506 radeon_emit(cs, dst_slice_tile_max); 507 radeon_emit(cs, encode_tile_info(sctx, rdst, dst_level, false)); 508 if (sctx->b.chip_class == CIK) { 509 radeon_emit(cs, copy_width_aligned | 510 (copy_height_aligned << 16)); 511 radeon_emit(cs, copy_depth); 512 } else { 513 radeon_emit(cs, (copy_width_aligned - 8) | 514 ((copy_height_aligned - 8) << 16)); 515 radeon_emit(cs, (copy_depth - 1)); 516 } 517 return true; 518 } 519 } 520 521 return false; 522 } 523 524 static void cik_sdma_copy(struct pipe_context *ctx, 525 struct pipe_resource *dst, 526 unsigned dst_level, 527 unsigned dstx, unsigned dsty, unsigned dstz, 528 struct pipe_resource *src, 529 unsigned src_level, 530 const struct pipe_box *src_box) 531 { 532 struct si_context *sctx = (struct si_context *)ctx; 533 534 if (!sctx->b.dma.cs || 535 src->flags & PIPE_RESOURCE_FLAG_SPARSE || 536 dst->flags & PIPE_RESOURCE_FLAG_SPARSE) 537 goto fallback; 538 539 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { 540 cik_sdma_copy_buffer(sctx, dst, src, dstx, src_box->x, src_box->width); 541 return; 542 } 543 544 if ((sctx->b.chip_class == CIK || sctx->b.chip_class == VI) && 545 cik_sdma_copy_texture(sctx, dst, dst_level, dstx, dsty, dstz, 546 src, src_level, src_box)) 547 return; 548 549 fallback: 550 si_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, 551 src, src_level, src_box); 552 } 553 554 void cik_init_sdma_functions(struct si_context *sctx) 555 { 556 sctx->b.dma_copy = cik_sdma_copy; 557 sctx->b.dma_clear_buffer = cik_sdma_clear_buffer; 558 } 559