Home | History | Annotate | Download | only in radeonsi
      1 /*
      2  * Copyright 2013 Advanced Micro Devices, Inc.
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * on the rights to use, copy, modify, merge, publish, distribute, sub
      8  * license, and/or sell copies of the Software, and to permit persons to whom
      9  * the Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  */
     23 
     24 #include "si_pipe.h"
     25 #include "sid.h"
     26 #include "radeon/r600_cs.h"
     27 
     28 /* Recommended maximum sizes for optimal performance.
     29  * Fall back to compute or SDMA if the size is greater.
     30  */
     31 #define CP_DMA_COPY_PERF_THRESHOLD	(64 * 1024) /* copied from Vulkan */
     32 #define CP_DMA_CLEAR_PERF_THRESHOLD	(32 * 1024) /* guess (clear is much slower) */
     33 
     34 /* Set this if you want the ME to wait until CP DMA is done.
     35  * It should be set on the last CP DMA packet. */
     36 #define CP_DMA_SYNC		(1 << 0)
     37 
     38 /* Set this if the source data was used as a destination in a previous CP DMA
     39  * packet. It's for preventing a read-after-write (RAW) hazard between two
     40  * CP DMA packets. */
     41 #define CP_DMA_RAW_WAIT		(1 << 1)
     42 #define CP_DMA_USE_L2		(1 << 2) /* CIK+ */
     43 #define CP_DMA_CLEAR		(1 << 3)
     44 
     45 /* The max number of bytes that can be copied per packet. */
     46 static inline unsigned cp_dma_max_byte_count(struct si_context *sctx)
     47 {
     48 	unsigned max = sctx->b.chip_class >= GFX9 ?
     49 			       S_414_BYTE_COUNT_GFX9(~0u) :
     50 			       S_414_BYTE_COUNT_GFX6(~0u);
     51 
     52 	/* make it aligned for optimal performance */
     53 	return max & ~(SI_CPDMA_ALIGNMENT - 1);
     54 }
     55 
     56 
     57 /* Emit a CP DMA packet to do a copy from one buffer to another, or to clear
     58  * a buffer. The size must fit in bits [20:0]. If CP_DMA_CLEAR is set, src_va is a 32-bit
     59  * clear value.
     60  */
     61 static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va,
     62 			   uint64_t src_va, unsigned size, unsigned flags,
     63 			   enum r600_coherency coher)
     64 {
     65 	struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
     66 	uint32_t header = 0, command = 0;
     67 
     68 	assert(size);
     69 	assert(size <= cp_dma_max_byte_count(sctx));
     70 
     71 	if (sctx->b.chip_class >= GFX9)
     72 		command |= S_414_BYTE_COUNT_GFX9(size);
     73 	else
     74 		command |= S_414_BYTE_COUNT_GFX6(size);
     75 
     76 	/* Sync flags. */
     77 	if (flags & CP_DMA_SYNC)
     78 		header |= S_411_CP_SYNC(1);
     79 	else {
     80 		if (sctx->b.chip_class >= GFX9)
     81 			command |= S_414_DISABLE_WR_CONFIRM_GFX9(1);
     82 		else
     83 			command |= S_414_DISABLE_WR_CONFIRM_GFX6(1);
     84 	}
     85 
     86 	if (flags & CP_DMA_RAW_WAIT)
     87 		command |= S_414_RAW_WAIT(1);
     88 
     89 	/* Src and dst flags. */
     90 	if (sctx->b.chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) &&
     91 	    src_va == dst_va)
     92 		header |= S_411_DSL_SEL(V_411_NOWHERE); /* prefetch only */
     93 	else if (flags & CP_DMA_USE_L2)
     94 		header |= S_411_DSL_SEL(V_411_DST_ADDR_TC_L2);
     95 
     96 	if (flags & CP_DMA_CLEAR)
     97 		header |= S_411_SRC_SEL(V_411_DATA);
     98 	else if (flags & CP_DMA_USE_L2)
     99 		header |= S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2);
    100 
    101 	if (sctx->b.chip_class >= CIK) {
    102 		radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
    103 		radeon_emit(cs, header);
    104 		radeon_emit(cs, src_va);	/* SRC_ADDR_LO [31:0] */
    105 		radeon_emit(cs, src_va >> 32);	/* SRC_ADDR_HI [31:0] */
    106 		radeon_emit(cs, dst_va);	/* DST_ADDR_LO [31:0] */
    107 		radeon_emit(cs, dst_va >> 32);	/* DST_ADDR_HI [31:0] */
    108 		radeon_emit(cs, command);
    109 	} else {
    110 		header |= S_411_SRC_ADDR_HI(src_va >> 32);
    111 
    112 		radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
    113 		radeon_emit(cs, src_va);	/* SRC_ADDR_LO [31:0] */
    114 		radeon_emit(cs, header);	/* SRC_ADDR_HI [15:0] + flags. */
    115 		radeon_emit(cs, dst_va);	/* DST_ADDR_LO [31:0] */
    116 		radeon_emit(cs, (dst_va >> 32) & 0xffff); /* DST_ADDR_HI [15:0] */
    117 		radeon_emit(cs, command);
    118 	}
    119 
    120 	/* CP DMA is executed in ME, but index buffers are read by PFP.
    121 	 * This ensures that ME (CP DMA) is idle before PFP starts fetching
    122 	 * indices. If we wanted to execute CP DMA in PFP, this packet
    123 	 * should precede it.
    124 	 */
    125 	if (coher == R600_COHERENCY_SHADER && flags & CP_DMA_SYNC) {
    126 		radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
    127 		radeon_emit(cs, 0);
    128 	}
    129 }
    130 
    131 static unsigned get_flush_flags(struct si_context *sctx, enum r600_coherency coher)
    132 {
    133 	switch (coher) {
    134 	default:
    135 	case R600_COHERENCY_NONE:
    136 		return 0;
    137 	case R600_COHERENCY_SHADER:
    138 		return SI_CONTEXT_INV_SMEM_L1 |
    139 		       SI_CONTEXT_INV_VMEM_L1 |
    140 		       (sctx->b.chip_class == SI ? SI_CONTEXT_INV_GLOBAL_L2 : 0);
    141 	case R600_COHERENCY_CB_META:
    142 		return SI_CONTEXT_FLUSH_AND_INV_CB;
    143 	}
    144 }
    145 
    146 static unsigned get_tc_l2_flag(struct si_context *sctx, enum r600_coherency coher)
    147 {
    148 	if ((sctx->b.chip_class >= GFX9 && coher == R600_COHERENCY_CB_META) ||
    149 	    (sctx->b.chip_class >= CIK && coher == R600_COHERENCY_SHADER))
    150 		return CP_DMA_USE_L2;
    151 
    152 	return 0;
    153 }
    154 
    155 static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst,
    156 			      struct pipe_resource *src, unsigned byte_count,
    157 			      uint64_t remaining_size, unsigned user_flags,
    158 			      bool *is_first, unsigned *packet_flags)
    159 {
    160 	/* Fast exit for a CPDMA prefetch. */
    161 	if ((user_flags & SI_CPDMA_SKIP_ALL) == SI_CPDMA_SKIP_ALL) {
    162 		*is_first = false;
    163 		return;
    164 	}
    165 
    166 	if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
    167 		/* Count memory usage in so that need_cs_space can take it into account. */
    168 		si_context_add_resource_size(&sctx->b.b, dst);
    169 		if (src)
    170 			si_context_add_resource_size(&sctx->b.b, src);
    171 	}
    172 
    173 	if (!(user_flags & SI_CPDMA_SKIP_CHECK_CS_SPACE))
    174 		si_need_cs_space(sctx);
    175 
    176 	/* This must be done after need_cs_space. */
    177 	if (!(user_flags & SI_CPDMA_SKIP_BO_LIST_UPDATE)) {
    178 		radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
    179 					  (struct r600_resource*)dst,
    180 					  RADEON_USAGE_WRITE, RADEON_PRIO_CP_DMA);
    181 		if (src)
    182 			radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
    183 						  (struct r600_resource*)src,
    184 						  RADEON_USAGE_READ, RADEON_PRIO_CP_DMA);
    185 	}
    186 
    187 	/* Flush the caches for the first copy only.
    188 	 * Also wait for the previous CP DMA operations.
    189 	 */
    190 	if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC) && sctx->b.flags)
    191 		si_emit_cache_flush(sctx);
    192 
    193 	if (!(user_flags & SI_CPDMA_SKIP_SYNC_BEFORE) && *is_first)
    194 		*packet_flags |= CP_DMA_RAW_WAIT;
    195 
    196 	*is_first = false;
    197 
    198 	/* Do the synchronization after the last dma, so that all data
    199 	 * is written to memory.
    200 	 */
    201 	if (!(user_flags & SI_CPDMA_SKIP_SYNC_AFTER) &&
    202 	    byte_count == remaining_size)
    203 		*packet_flags |= CP_DMA_SYNC;
    204 }
    205 
    206 void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
    207 		     uint64_t offset, uint64_t size, unsigned value,
    208 		     enum r600_coherency coher)
    209 {
    210 	struct si_context *sctx = (struct si_context*)ctx;
    211 	struct radeon_winsys *ws = sctx->b.ws;
    212 	struct r600_resource *rdst = r600_resource(dst);
    213 	unsigned tc_l2_flag = get_tc_l2_flag(sctx, coher);
    214 	unsigned flush_flags = get_flush_flags(sctx, coher);
    215 	uint64_t dma_clear_size;
    216 	bool is_first = true;
    217 
    218 	if (!size)
    219 		return;
    220 
    221        dma_clear_size = size & ~3ull;
    222 
    223 	/* Mark the buffer range of destination as valid (initialized),
    224 	 * so that transfer_map knows it should wait for the GPU when mapping
    225 	 * that range. */
    226 	util_range_add(&rdst->valid_buffer_range, offset,
    227 		       offset + dma_clear_size);
    228 
    229 	/* dma_clear_buffer can use clear_buffer on failure. Make sure that
    230 	 * doesn't happen. We don't want an infinite recursion: */
    231 	if (sctx->b.dma.cs &&
    232 	    !(dst->flags & PIPE_RESOURCE_FLAG_SPARSE) &&
    233 	    (offset % 4 == 0) &&
    234 	    /* CP DMA is very slow. Always use SDMA for big clears. This
    235 	     * alone improves DeusEx:MD performance by 70%. */
    236 	    (size > CP_DMA_CLEAR_PERF_THRESHOLD ||
    237 	     /* Buffers not used by the GFX IB yet will be cleared by SDMA.
    238 	      * This happens to move most buffer clears to SDMA, including
    239 	      * DCC and CMASK clears, because pipe->clear clears them before
    240 	      * si_emit_framebuffer_state (in a draw call) adds them.
    241 	      * For example, DeusEx:MD has 21 buffer clears per frame and all
    242 	      * of them are moved to SDMA thanks to this. */
    243 	     !ws->cs_is_buffer_referenced(sctx->b.gfx.cs, rdst->buf,
    244 				          RADEON_USAGE_READWRITE))) {
    245 		sctx->b.dma_clear_buffer(ctx, dst, offset, dma_clear_size, value);
    246 
    247 		offset += dma_clear_size;
    248 		size -= dma_clear_size;
    249 	} else if (dma_clear_size >= 4) {
    250 		uint64_t va = rdst->gpu_address + offset;
    251 
    252 		offset += dma_clear_size;
    253 		size -= dma_clear_size;
    254 
    255 		/* Flush the caches. */
    256 		sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
    257 				 SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
    258 
    259 		while (dma_clear_size) {
    260 			unsigned byte_count = MIN2(dma_clear_size, cp_dma_max_byte_count(sctx));
    261 			unsigned dma_flags = tc_l2_flag  | CP_DMA_CLEAR;
    262 
    263 			si_cp_dma_prepare(sctx, dst, NULL, byte_count, dma_clear_size, 0,
    264 					  &is_first, &dma_flags);
    265 
    266 			/* Emit the clear packet. */
    267 			si_emit_cp_dma(sctx, va, value, byte_count, dma_flags, coher);
    268 
    269 			dma_clear_size -= byte_count;
    270 			va += byte_count;
    271 		}
    272 
    273 		if (tc_l2_flag)
    274 			rdst->TC_L2_dirty = true;
    275 
    276 		/* If it's not a framebuffer fast clear... */
    277 		if (coher == R600_COHERENCY_SHADER)
    278 			sctx->b.num_cp_dma_calls++;
    279 	}
    280 
    281 	if (size) {
    282 		/* Handle non-dword alignment.
    283 		 *
    284 		 * This function is called for embedded texture metadata clears,
    285 		 * but those should always be properly aligned. */
    286 		assert(dst->target == PIPE_BUFFER);
    287 		assert(size < 4);
    288 
    289 		pipe_buffer_write(ctx, dst, offset, size, &value);
    290 	}
    291 }
    292 
    293 static void si_pipe_clear_buffer(struct pipe_context *ctx,
    294 				 struct pipe_resource *dst,
    295 				 unsigned offset, unsigned size,
    296 				 const void *clear_value_ptr,
    297 				 int clear_value_size)
    298 {
    299 	struct si_context *sctx = (struct si_context*)ctx;
    300 	uint32_t dword_value;
    301 	unsigned i;
    302 
    303 	assert(offset % clear_value_size == 0);
    304 	assert(size % clear_value_size == 0);
    305 
    306 	if (clear_value_size > 4) {
    307 		const uint32_t *u32 = clear_value_ptr;
    308 		bool clear_dword_duplicated = true;
    309 
    310 		/* See if we can lower large fills to dword fills. */
    311 		for (i = 1; i < clear_value_size / 4; i++)
    312 			if (u32[0] != u32[i]) {
    313 				clear_dword_duplicated = false;
    314 				break;
    315 			}
    316 
    317 		if (!clear_dword_duplicated) {
    318 			/* Use transform feedback for 64-bit, 96-bit, and
    319 			 * 128-bit fills.
    320 			 */
    321 			union pipe_color_union clear_value;
    322 
    323 			memcpy(&clear_value, clear_value_ptr, clear_value_size);
    324 			si_blitter_begin(ctx, SI_DISABLE_RENDER_COND);
    325 			util_blitter_clear_buffer(sctx->blitter, dst, offset,
    326 						  size, clear_value_size / 4,
    327 						  &clear_value);
    328 			si_blitter_end(ctx);
    329 			return;
    330 		}
    331 	}
    332 
    333 	/* Expand the clear value to a dword. */
    334 	switch (clear_value_size) {
    335 	case 1:
    336 		dword_value = *(uint8_t*)clear_value_ptr;
    337 		dword_value |= (dword_value << 8) |
    338 			       (dword_value << 16) |
    339 			       (dword_value << 24);
    340 		break;
    341 	case 2:
    342 		dword_value = *(uint16_t*)clear_value_ptr;
    343 		dword_value |= dword_value << 16;
    344 		break;
    345 	default:
    346 		dword_value = *(uint32_t*)clear_value_ptr;
    347 	}
    348 
    349 	si_clear_buffer(ctx, dst, offset, size, dword_value,
    350 			R600_COHERENCY_SHADER);
    351 }
    352 
    353 /**
    354  * Realign the CP DMA engine. This must be done after a copy with an unaligned
    355  * size.
    356  *
    357  * \param size  Remaining size to the CP DMA alignment.
    358  */
    359 static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size,
    360 				     unsigned user_flags, bool *is_first)
    361 {
    362 	uint64_t va;
    363 	unsigned dma_flags = 0;
    364 	unsigned scratch_size = SI_CPDMA_ALIGNMENT * 2;
    365 
    366 	assert(size < SI_CPDMA_ALIGNMENT);
    367 
    368 	/* Use the scratch buffer as the dummy buffer. The 3D engine should be
    369 	 * idle at this point.
    370 	 */
    371 	if (!sctx->scratch_buffer ||
    372 	    sctx->scratch_buffer->b.b.width0 < scratch_size) {
    373 		r600_resource_reference(&sctx->scratch_buffer, NULL);
    374 		sctx->scratch_buffer = (struct r600_resource*)
    375 			si_aligned_buffer_create(&sctx->screen->b,
    376 						   R600_RESOURCE_FLAG_UNMAPPABLE,
    377 						   PIPE_USAGE_DEFAULT,
    378 						   scratch_size, 256);
    379 		if (!sctx->scratch_buffer)
    380 			return;
    381 
    382 		si_mark_atom_dirty(sctx, &sctx->scratch_state);
    383 	}
    384 
    385 	si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
    386 			  &sctx->scratch_buffer->b.b, size, size, user_flags,
    387 			  is_first, &dma_flags);
    388 
    389 	va = sctx->scratch_buffer->gpu_address;
    390 	si_emit_cp_dma(sctx, va, va + SI_CPDMA_ALIGNMENT, size, dma_flags,
    391 		       R600_COHERENCY_SHADER);
    392 }
    393 
    394 /**
    395  * Do memcpy between buffers using CP DMA.
    396  *
    397  * \param user_flags	bitmask of SI_CPDMA_*
    398  */
    399 void si_copy_buffer(struct si_context *sctx,
    400 		    struct pipe_resource *dst, struct pipe_resource *src,
    401 		    uint64_t dst_offset, uint64_t src_offset, unsigned size,
    402 		    unsigned user_flags)
    403 {
    404 	uint64_t main_dst_offset, main_src_offset;
    405 	unsigned skipped_size = 0;
    406 	unsigned realign_size = 0;
    407 	unsigned tc_l2_flag = get_tc_l2_flag(sctx, R600_COHERENCY_SHADER);
    408 	unsigned flush_flags = get_flush_flags(sctx, R600_COHERENCY_SHADER);
    409 	bool is_first = true;
    410 
    411 	if (!size)
    412 		return;
    413 
    414 	if (dst != src || dst_offset != src_offset) {
    415 		/* Mark the buffer range of destination as valid (initialized),
    416 		 * so that transfer_map knows it should wait for the GPU when mapping
    417 		 * that range. */
    418 		util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
    419 			       dst_offset + size);
    420 	}
    421 
    422 	dst_offset += r600_resource(dst)->gpu_address;
    423 	src_offset += r600_resource(src)->gpu_address;
    424 
    425 	/* The workarounds aren't needed on Fiji and beyond. */
    426 	if (sctx->b.family <= CHIP_CARRIZO ||
    427 	    sctx->b.family == CHIP_STONEY) {
    428 		/* If the size is not aligned, we must add a dummy copy at the end
    429 		 * just to align the internal counter. Otherwise, the DMA engine
    430 		 * would slow down by an order of magnitude for following copies.
    431 		 */
    432 		if (size % SI_CPDMA_ALIGNMENT)
    433 			realign_size = SI_CPDMA_ALIGNMENT - (size % SI_CPDMA_ALIGNMENT);
    434 
    435 		/* If the copy begins unaligned, we must start copying from the next
    436 		 * aligned block and the skipped part should be copied after everything
    437 		 * else has been copied. Only the src alignment matters, not dst.
    438 		 */
    439 		if (src_offset % SI_CPDMA_ALIGNMENT) {
    440 			skipped_size = SI_CPDMA_ALIGNMENT - (src_offset % SI_CPDMA_ALIGNMENT);
    441 			/* The main part will be skipped if the size is too small. */
    442 			skipped_size = MIN2(skipped_size, size);
    443 			size -= skipped_size;
    444 		}
    445 	}
    446 
    447 	/* Flush the caches. */
    448 	if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC))
    449 		sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
    450 				 SI_CONTEXT_CS_PARTIAL_FLUSH | flush_flags;
    451 
    452 	/* This is the main part doing the copying. Src is always aligned. */
    453 	main_dst_offset = dst_offset + skipped_size;
    454 	main_src_offset = src_offset + skipped_size;
    455 
    456 	while (size) {
    457 		unsigned dma_flags = tc_l2_flag;
    458 		unsigned byte_count = MIN2(size, cp_dma_max_byte_count(sctx));
    459 
    460 		si_cp_dma_prepare(sctx, dst, src, byte_count,
    461 				  size + skipped_size + realign_size,
    462 				  user_flags, &is_first, &dma_flags);
    463 
    464 		si_emit_cp_dma(sctx, main_dst_offset, main_src_offset,
    465 			       byte_count, dma_flags, R600_COHERENCY_SHADER);
    466 
    467 		size -= byte_count;
    468 		main_src_offset += byte_count;
    469 		main_dst_offset += byte_count;
    470 	}
    471 
    472 	/* Copy the part we skipped because src wasn't aligned. */
    473 	if (skipped_size) {
    474 		unsigned dma_flags = tc_l2_flag;
    475 
    476 		si_cp_dma_prepare(sctx, dst, src, skipped_size,
    477 				  skipped_size + realign_size, user_flags,
    478 				  &is_first, &dma_flags);
    479 
    480 		si_emit_cp_dma(sctx, dst_offset, src_offset, skipped_size,
    481 			       dma_flags, R600_COHERENCY_SHADER);
    482 	}
    483 
    484 	/* Finally, realign the engine if the size wasn't aligned. */
    485 	if (realign_size)
    486 		si_cp_dma_realign_engine(sctx, realign_size, user_flags,
    487 					 &is_first);
    488 
    489 	if (tc_l2_flag)
    490 		r600_resource(dst)->TC_L2_dirty = true;
    491 
    492 	/* If it's not a prefetch... */
    493 	if (dst_offset != src_offset)
    494 		sctx->b.num_cp_dma_calls++;
    495 }
    496 
    497 void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
    498 			      uint64_t offset, unsigned size)
    499 {
    500 	assert(sctx->b.chip_class >= CIK);
    501 
    502 	si_copy_buffer(sctx, buf, buf, offset, offset, size, SI_CPDMA_SKIP_ALL);
    503 }
    504 
    505 static void cik_prefetch_shader_async(struct si_context *sctx,
    506 				      struct si_pm4_state *state)
    507 {
    508 	struct pipe_resource *bo = &state->bo[0]->b.b;
    509 	assert(state->nbo == 1);
    510 
    511 	cik_prefetch_TC_L2_async(sctx, bo, 0, bo->width0);
    512 }
    513 
    514 static void cik_prefetch_VBO_descriptors(struct si_context *sctx)
    515 {
    516 	if (!sctx->vertex_elements)
    517 		return;
    518 
    519 	cik_prefetch_TC_L2_async(sctx, &sctx->vertex_buffers.buffer->b.b,
    520 				 sctx->vertex_buffers.gpu_address -
    521 				 sctx->vertex_buffers.buffer->gpu_address,
    522 				 sctx->vertex_elements->desc_list_byte_size);
    523 }
    524 
    525 void cik_emit_prefetch_L2(struct si_context *sctx)
    526 {
    527 	/* Prefetch shaders and VBO descriptors to TC L2. */
    528 	if (sctx->b.chip_class >= GFX9) {
    529 		/* Choose the right spot for the VBO prefetch. */
    530 		if (sctx->tes_shader.cso) {
    531 			if (sctx->prefetch_L2_mask & SI_PREFETCH_HS)
    532 				cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
    533 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
    534 				cik_prefetch_VBO_descriptors(sctx);
    535 			if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
    536 				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
    537 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
    538 				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
    539 		} else if (sctx->gs_shader.cso) {
    540 			if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
    541 				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
    542 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
    543 				cik_prefetch_VBO_descriptors(sctx);
    544 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
    545 				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
    546 		} else {
    547 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
    548 				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
    549 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
    550 				cik_prefetch_VBO_descriptors(sctx);
    551 		}
    552 	} else {
    553 		/* SI-CI-VI */
    554 		/* Choose the right spot for the VBO prefetch. */
    555 		if (sctx->tes_shader.cso) {
    556 			if (sctx->prefetch_L2_mask & SI_PREFETCH_LS)
    557 				cik_prefetch_shader_async(sctx, sctx->queued.named.ls);
    558 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
    559 				cik_prefetch_VBO_descriptors(sctx);
    560 			if (sctx->prefetch_L2_mask & SI_PREFETCH_HS)
    561 				cik_prefetch_shader_async(sctx, sctx->queued.named.hs);
    562 			if (sctx->prefetch_L2_mask & SI_PREFETCH_ES)
    563 				cik_prefetch_shader_async(sctx, sctx->queued.named.es);
    564 			if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
    565 				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
    566 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
    567 				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
    568 		} else if (sctx->gs_shader.cso) {
    569 			if (sctx->prefetch_L2_mask & SI_PREFETCH_ES)
    570 				cik_prefetch_shader_async(sctx, sctx->queued.named.es);
    571 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
    572 				cik_prefetch_VBO_descriptors(sctx);
    573 			if (sctx->prefetch_L2_mask & SI_PREFETCH_GS)
    574 				cik_prefetch_shader_async(sctx, sctx->queued.named.gs);
    575 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
    576 				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
    577 		} else {
    578 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VS)
    579 				cik_prefetch_shader_async(sctx, sctx->queued.named.vs);
    580 			if (sctx->prefetch_L2_mask & SI_PREFETCH_VBO_DESCRIPTORS)
    581 				cik_prefetch_VBO_descriptors(sctx);
    582 		}
    583 	}
    584 
    585 	if (sctx->prefetch_L2_mask & SI_PREFETCH_PS)
    586 		cik_prefetch_shader_async(sctx, sctx->queued.named.ps);
    587 
    588 	sctx->prefetch_L2_mask = 0;
    589 }
    590 
    591 void si_init_cp_dma_functions(struct si_context *sctx)
    592 {
    593 	sctx->b.b.clear_buffer = si_pipe_clear_buffer;
    594 }
    595