Home | History | Annotate | Download | only in radeonsi

Lines Matching refs:sctx

51 static void si_emit_cp_dma(struct si_context *sctx, uint64_t dst_va,
55 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
79 if (sctx->b.chip_class >= CIK) {
109 static unsigned get_flush_flags(struct si_context *sctx, enum r600_coherency coher)
118 (sctx->b.chip_class == SI ? SI_CONTEXT_INV_GLOBAL_L2 : 0);
125 static unsigned get_tc_l2_flag(struct si_context *sctx, enum r600_coherency coher)
128 sctx->b.chip_class >= CIK ? CP_DMA_USE_L2 : 0;
131 static void si_cp_dma_prepare(struct si_context *sctx, struct pipe_resource *dst,
144 r600_context_add_resource_size(&sctx->b.b, dst);
146 r600_context_add_resource_size(&sctx->b.b, src);
150 si_need_cs_space(sctx);
154 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
158 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
166 if (!(user_flags & SI_CPDMA_SKIP_GFX_SYNC) && sctx->b.flags)
167 si_emit_cache_flush(sctx);
186 struct si_context *sctx = (struct si_context*)ctx;
187 struct radeon_winsys *ws = sctx->b.ws;
189 unsigned tc_l2_flag = get_tc_l2_flag(sctx, coher);
190 unsigned flush_flags = get_flush_flags(sctx, coher);
204 uint8_t *map = r600_buffer_map_sync_with_rings(&sctx->b, rdst,
216 if (sctx->b.dma.cs &&
226 !ws->cs_is_buffer_referenced(sctx->b.gfx.cs, rdst->buf,
228 sctx->b.dma_clear_buffer(ctx, dst, offset, size, value);
235 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
242 si_cp_dma_prepare(sctx, dst, NULL, byte_count, size, 0,
246 si_emit_cp_dma(sctx, va, value, byte_count, dma_flags, coher);
257 sctx->b.num_cp_dma_calls++;
266 static void si_cp_dma_realign_engine(struct si_context *sctx, unsigned size,
278 if (!sctx->scratch_buffer ||
279 sctx->scratch_buffer->b.b.width0 < scratch_size) {
280 r600_resource_reference(&sctx->scratch_buffer, NULL);
281 sctx->scratch_buffer = (struct r600_resource*)
282 pipe_buffer_create(&sctx->screen->b.b, 0,
284 if (!sctx->scratch_buffer)
286 sctx->emit_scratch_reloc = true;
289 si_cp_dma_prepare(sctx, &sctx->scratch_buffer->b.b,
290 &sctx->scratch_buffer->b.b, size, size, user_flags,
293 va = sctx->scratch_buffer->gpu_address;
294 si_emit_cp_dma(sctx, va, va + CP_DMA_ALIGNMENT, size, dma_flags,
303 void si_copy_buffer(struct si_context *sctx,
311 unsigned tc_l2_flag = get_tc_l2_flag(sctx, R600_COHERENCY_SHADER);
312 unsigned flush_flags = get_flush_flags(sctx, R600_COHERENCY_SHADER);
330 if (sctx->b.family <= CHIP_CARRIZO ||
331 sctx->b.family == CHIP_STONEY) {
353 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
364 si_cp_dma_prepare(sctx, dst, src, byte_count,
368 si_emit_cp_dma(sctx, main_dst_offset, main_src_offset,
380 si_cp_dma_prepare(sctx, dst, src, skipped_size,
384 si_emit_cp_dma(sctx, dst_offset, src_offset, skipped_size,
390 si_cp_dma_realign_engine(sctx, realign_size, user_flags,
398 sctx->b.num_cp_dma_calls++;
401 void cik_prefetch_TC_L2_async(struct si_context *sctx, struct pipe_resource *buf,
404 assert(sctx->b.chip_class >= CIK);
406 si_copy_buffer(sctx, buf, buf, offset, offset, size, SI_CPDMA_SKIP_ALL);
409 void si_init_cp_dma_functions(struct si_context *sctx)
411 sctx->b.clear_buffer = si_clear_buffer;