Home | History | Annotate | Download | only in r600
      1 /*
      2  * Copyright 2013 Advanced Micro Devices, Inc.
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * on the rights to use, copy, modify, merge, publish, distribute, sub
      8  * license, and/or sell copies of the Software, and to permit persons to whom
      9  * the Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *      Marek Olk
     25  */
     26 
     27 #include "r600_cs.h"
     28 #include "util/u_memory.h"
     29 #include "util/u_upload_mgr.h"
     30 #include <inttypes.h>
     31 #include <stdio.h>
     32 
     33 bool r600_rings_is_buffer_referenced(struct r600_common_context *ctx,
     34 				     struct pb_buffer *buf,
     35 				     enum radeon_bo_usage usage)
     36 {
     37 	if (ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs, buf, usage)) {
     38 		return true;
     39 	}
     40 	if (radeon_emitted(ctx->dma.cs, 0) &&
     41 	    ctx->ws->cs_is_buffer_referenced(ctx->dma.cs, buf, usage)) {
     42 		return true;
     43 	}
     44 	return false;
     45 }
     46 
     47 void *r600_buffer_map_sync_with_rings(struct r600_common_context *ctx,
     48                                       struct r600_resource *resource,
     49                                       unsigned usage)
     50 {
     51 	enum radeon_bo_usage rusage = RADEON_USAGE_READWRITE;
     52 	bool busy = false;
     53 
     54 	assert(!(resource->flags & RADEON_FLAG_SPARSE));
     55 
     56 	if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
     57 		return ctx->ws->buffer_map(resource->buf, NULL, usage);
     58 	}
     59 
     60 	if (!(usage & PIPE_TRANSFER_WRITE)) {
     61 		/* have to wait for the last write */
     62 		rusage = RADEON_USAGE_WRITE;
     63 	}
     64 
     65 	if (radeon_emitted(ctx->gfx.cs, ctx->initial_gfx_cs_size) &&
     66 	    ctx->ws->cs_is_buffer_referenced(ctx->gfx.cs,
     67 					     resource->buf, rusage)) {
     68 		if (usage & PIPE_TRANSFER_DONTBLOCK) {
     69 			ctx->gfx.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
     70 			return NULL;
     71 		} else {
     72 			ctx->gfx.flush(ctx, 0, NULL);
     73 			busy = true;
     74 		}
     75 	}
     76 	if (radeon_emitted(ctx->dma.cs, 0) &&
     77 	    ctx->ws->cs_is_buffer_referenced(ctx->dma.cs,
     78 					     resource->buf, rusage)) {
     79 		if (usage & PIPE_TRANSFER_DONTBLOCK) {
     80 			ctx->dma.flush(ctx, PIPE_FLUSH_ASYNC, NULL);
     81 			return NULL;
     82 		} else {
     83 			ctx->dma.flush(ctx, 0, NULL);
     84 			busy = true;
     85 		}
     86 	}
     87 
     88 	if (busy || !ctx->ws->buffer_wait(resource->buf, 0, rusage)) {
     89 		if (usage & PIPE_TRANSFER_DONTBLOCK) {
     90 			return NULL;
     91 		} else {
     92 			/* We will be wait for the GPU. Wait for any offloaded
     93 			 * CS flush to complete to avoid busy-waiting in the winsys. */
     94 			ctx->ws->cs_sync_flush(ctx->gfx.cs);
     95 			if (ctx->dma.cs)
     96 				ctx->ws->cs_sync_flush(ctx->dma.cs);
     97 		}
     98 	}
     99 
    100 	/* Setting the CS to NULL will prevent doing checks we have done already. */
    101 	return ctx->ws->buffer_map(resource->buf, NULL, usage);
    102 }
    103 
    104 void r600_init_resource_fields(struct r600_common_screen *rscreen,
    105 			       struct r600_resource *res,
    106 			       uint64_t size, unsigned alignment)
    107 {
    108 	struct r600_texture *rtex = (struct r600_texture*)res;
    109 
    110 	res->bo_size = size;
    111 	res->bo_alignment = alignment;
    112 	res->flags = 0;
    113 	res->texture_handle_allocated = false;
    114 	res->image_handle_allocated = false;
    115 
    116 	switch (res->b.b.usage) {
    117 	case PIPE_USAGE_STREAM:
    118 		res->flags = RADEON_FLAG_GTT_WC;
    119 		/* fall through */
    120 	case PIPE_USAGE_STAGING:
    121 		/* Transfers are likely to occur more often with these
    122 		 * resources. */
    123 		res->domains = RADEON_DOMAIN_GTT;
    124 		break;
    125 	case PIPE_USAGE_DYNAMIC:
    126 		/* Older kernels didn't always flush the HDP cache before
    127 		 * CS execution
    128 		 */
    129 		if (rscreen->info.drm_major == 2 &&
    130 		    rscreen->info.drm_minor < 40) {
    131 			res->domains = RADEON_DOMAIN_GTT;
    132 			res->flags |= RADEON_FLAG_GTT_WC;
    133 			break;
    134 		}
    135 		/* fall through */
    136 	case PIPE_USAGE_DEFAULT:
    137 	case PIPE_USAGE_IMMUTABLE:
    138 	default:
    139 		/* Not listing GTT here improves performance in some
    140 		 * apps. */
    141 		res->domains = RADEON_DOMAIN_VRAM;
    142 		res->flags |= RADEON_FLAG_GTT_WC;
    143 		break;
    144 	}
    145 
    146 	if (res->b.b.target == PIPE_BUFFER &&
    147 	    res->b.b.flags & (PIPE_RESOURCE_FLAG_MAP_PERSISTENT |
    148 			      PIPE_RESOURCE_FLAG_MAP_COHERENT)) {
    149 		/* Use GTT for all persistent mappings with older
    150 		 * kernels, because they didn't always flush the HDP
    151 		 * cache before CS execution.
    152 		 *
    153 		 * Write-combined CPU mappings are fine, the kernel
    154 		 * ensures all CPU writes finish before the GPU
    155 		 * executes a command stream.
    156 		 */
    157 		if (rscreen->info.drm_major == 2 &&
    158 		    rscreen->info.drm_minor < 40)
    159 			res->domains = RADEON_DOMAIN_GTT;
    160 	}
    161 
    162 	/* Tiled textures are unmappable. Always put them in VRAM. */
    163 	if ((res->b.b.target != PIPE_BUFFER && !rtex->surface.is_linear) ||
    164 	    res->flags & R600_RESOURCE_FLAG_UNMAPPABLE) {
    165 		res->domains = RADEON_DOMAIN_VRAM;
    166 		res->flags |= RADEON_FLAG_NO_CPU_ACCESS |
    167 			 RADEON_FLAG_GTT_WC;
    168 	}
    169 
    170 	/* Only displayable single-sample textures can be shared between
    171 	 * processes. */
    172 	if (res->b.b.target == PIPE_BUFFER ||
    173 	    res->b.b.nr_samples >= 2 ||
    174 	    (rtex->surface.micro_tile_mode != RADEON_MICRO_MODE_DISPLAY &&
    175 	     /* Raven doesn't use display micro mode for 32bpp, so check this: */
    176 	     !(res->b.b.bind & PIPE_BIND_SCANOUT)))
    177 		res->flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
    178 
    179 	if (rscreen->debug_flags & DBG_NO_WC)
    180 		res->flags &= ~RADEON_FLAG_GTT_WC;
    181 
    182 	if (res->b.b.bind & PIPE_BIND_SHARED)
    183 		res->flags |= RADEON_FLAG_NO_SUBALLOC;
    184 
    185 	/* Set expected VRAM and GART usage for the buffer. */
    186 	res->vram_usage = 0;
    187 	res->gart_usage = 0;
    188 
    189 	if (res->domains & RADEON_DOMAIN_VRAM)
    190 		res->vram_usage = size;
    191 	else if (res->domains & RADEON_DOMAIN_GTT)
    192 		res->gart_usage = size;
    193 }
    194 
    195 bool r600_alloc_resource(struct r600_common_screen *rscreen,
    196 			 struct r600_resource *res)
    197 {
    198 	struct pb_buffer *old_buf, *new_buf;
    199 
    200 	/* Allocate a new resource. */
    201 	new_buf = rscreen->ws->buffer_create(rscreen->ws, res->bo_size,
    202 					     res->bo_alignment,
    203 					     res->domains, res->flags);
    204 	if (!new_buf) {
    205 		return false;
    206 	}
    207 
    208 	/* Replace the pointer such that if res->buf wasn't NULL, it won't be
    209 	 * NULL. This should prevent crashes with multiple contexts using
    210 	 * the same buffer where one of the contexts invalidates it while
    211 	 * the others are using it. */
    212 	old_buf = res->buf;
    213 	res->buf = new_buf; /* should be atomic */
    214 
    215 	if (rscreen->info.has_virtual_memory)
    216 		res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
    217 	else
    218 		res->gpu_address = 0;
    219 
    220 	pb_reference(&old_buf, NULL);
    221 
    222 	util_range_set_empty(&res->valid_buffer_range);
    223 
    224 	/* Print debug information. */
    225 	if (rscreen->debug_flags & DBG_VM && res->b.b.target == PIPE_BUFFER) {
    226 		fprintf(stderr, "VM start=0x%"PRIX64"  end=0x%"PRIX64" | Buffer %"PRIu64" bytes\n",
    227 			res->gpu_address, res->gpu_address + res->buf->size,
    228 			res->buf->size);
    229 	}
    230 	return true;
    231 }
    232 
    233 static void r600_buffer_destroy(struct pipe_screen *screen,
    234 				struct pipe_resource *buf)
    235 {
    236 	struct r600_resource *rbuffer = r600_resource(buf);
    237 
    238 	threaded_resource_deinit(buf);
    239 	util_range_destroy(&rbuffer->valid_buffer_range);
    240 	pipe_resource_reference((struct pipe_resource**)&rbuffer->immed_buffer, NULL);
    241 	pb_reference(&rbuffer->buf, NULL);
    242 	FREE(rbuffer);
    243 }
    244 
    245 static bool
    246 r600_invalidate_buffer(struct r600_common_context *rctx,
    247 		       struct r600_resource *rbuffer)
    248 {
    249 	/* Shared buffers can't be reallocated. */
    250 	if (rbuffer->b.is_shared)
    251 		return false;
    252 
    253 	/* Sparse buffers can't be reallocated. */
    254 	if (rbuffer->flags & RADEON_FLAG_SPARSE)
    255 		return false;
    256 
    257 	/* In AMD_pinned_memory, the user pointer association only gets
    258 	 * broken when the buffer is explicitly re-allocated.
    259 	 */
    260 	if (rbuffer->b.is_user_ptr)
    261 		return false;
    262 
    263 	/* Check if mapping this buffer would cause waiting for the GPU. */
    264 	if (r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
    265 	    !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
    266 		rctx->invalidate_buffer(&rctx->b, &rbuffer->b.b);
    267 	} else {
    268 		util_range_set_empty(&rbuffer->valid_buffer_range);
    269 	}
    270 
    271 	return true;
    272 }
    273 
    274 /* Replace the storage of dst with src. */
    275 void r600_replace_buffer_storage(struct pipe_context *ctx,
    276 				 struct pipe_resource *dst,
    277 				 struct pipe_resource *src)
    278 {
    279 	struct r600_common_context *rctx = (struct r600_common_context *)ctx;
    280 	struct r600_resource *rdst = r600_resource(dst);
    281 	struct r600_resource *rsrc = r600_resource(src);
    282 	uint64_t old_gpu_address = rdst->gpu_address;
    283 
    284 	pb_reference(&rdst->buf, rsrc->buf);
    285 	rdst->gpu_address = rsrc->gpu_address;
    286 	rdst->b.b.bind = rsrc->b.b.bind;
    287 	rdst->flags = rsrc->flags;
    288 
    289 	assert(rdst->vram_usage == rsrc->vram_usage);
    290 	assert(rdst->gart_usage == rsrc->gart_usage);
    291 	assert(rdst->bo_size == rsrc->bo_size);
    292 	assert(rdst->bo_alignment == rsrc->bo_alignment);
    293 	assert(rdst->domains == rsrc->domains);
    294 
    295 	rctx->rebind_buffer(ctx, dst, old_gpu_address);
    296 }
    297 
    298 void r600_invalidate_resource(struct pipe_context *ctx,
    299 			      struct pipe_resource *resource)
    300 {
    301 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
    302 	struct r600_resource *rbuffer = r600_resource(resource);
    303 
    304 	/* We currently only do anyting here for buffers */
    305 	if (resource->target == PIPE_BUFFER)
    306 		(void)r600_invalidate_buffer(rctx, rbuffer);
    307 }
    308 
    309 static void *r600_buffer_get_transfer(struct pipe_context *ctx,
    310 				      struct pipe_resource *resource,
    311                                       unsigned usage,
    312                                       const struct pipe_box *box,
    313 				      struct pipe_transfer **ptransfer,
    314 				      void *data, struct r600_resource *staging,
    315 				      unsigned offset)
    316 {
    317 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
    318 	struct r600_transfer *transfer;
    319 
    320 	if (usage & TC_TRANSFER_MAP_THREADED_UNSYNC)
    321 		transfer = slab_alloc(&rctx->pool_transfers_unsync);
    322 	else
    323 		transfer = slab_alloc(&rctx->pool_transfers);
    324 
    325 	transfer->b.b.resource = NULL;
    326 	pipe_resource_reference(&transfer->b.b.resource, resource);
    327 	transfer->b.b.level = 0;
    328 	transfer->b.b.usage = usage;
    329 	transfer->b.b.box = *box;
    330 	transfer->b.b.stride = 0;
    331 	transfer->b.b.layer_stride = 0;
    332 	transfer->b.staging = NULL;
    333 	transfer->offset = offset;
    334 	transfer->staging = staging;
    335 	*ptransfer = &transfer->b.b;
    336 	return data;
    337 }
    338 
    339 static bool r600_can_dma_copy_buffer(struct r600_common_context *rctx,
    340 				     unsigned dstx, unsigned srcx, unsigned size)
    341 {
    342 	bool dword_aligned = !(dstx % 4) && !(srcx % 4) && !(size % 4);
    343 
    344 	return rctx->screen->has_cp_dma ||
    345 	       (dword_aligned && (rctx->dma.cs ||
    346 				  rctx->screen->has_streamout));
    347 
    348 }
    349 
    350 static void *r600_buffer_transfer_map(struct pipe_context *ctx,
    351                                       struct pipe_resource *resource,
    352                                       unsigned level,
    353                                       unsigned usage,
    354                                       const struct pipe_box *box,
    355                                       struct pipe_transfer **ptransfer)
    356 {
    357 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
    358 	struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen;
    359 	struct r600_resource *rbuffer = r600_resource(resource);
    360 	uint8_t *data;
    361 
    362 	assert(box->x + box->width <= resource->width0);
    363 
    364 	/* From GL_AMD_pinned_memory issues:
    365 	 *
    366 	 *     4) Is glMapBuffer on a shared buffer guaranteed to return the
    367 	 *        same system address which was specified at creation time?
    368 	 *
    369 	 *        RESOLVED: NO. The GL implementation might return a different
    370 	 *        virtual mapping of that memory, although the same physical
    371 	 *        page will be used.
    372 	 *
    373 	 * So don't ever use staging buffers.
    374 	 */
    375 	if (rbuffer->b.is_user_ptr)
    376 		usage |= PIPE_TRANSFER_PERSISTENT;
    377 
    378 	/* See if the buffer range being mapped has never been initialized,
    379 	 * in which case it can be mapped unsynchronized. */
    380 	if (!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
    381 		       TC_TRANSFER_MAP_NO_INFER_UNSYNCHRONIZED)) &&
    382 	    usage & PIPE_TRANSFER_WRITE &&
    383 	    !rbuffer->b.is_shared &&
    384 	    !util_ranges_intersect(&rbuffer->valid_buffer_range, box->x, box->x + box->width)) {
    385 		usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
    386 	}
    387 
    388 	/* If discarding the entire range, discard the whole resource instead. */
    389 	if (usage & PIPE_TRANSFER_DISCARD_RANGE &&
    390 	    box->x == 0 && box->width == resource->width0) {
    391 		usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
    392 	}
    393 
    394 	if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE &&
    395 	    !(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
    396 		       TC_TRANSFER_MAP_NO_INVALIDATE))) {
    397 		assert(usage & PIPE_TRANSFER_WRITE);
    398 
    399 		if (r600_invalidate_buffer(rctx, rbuffer)) {
    400 			/* At this point, the buffer is always idle. */
    401 			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
    402 		} else {
    403 			/* Fall back to a temporary buffer. */
    404 			usage |= PIPE_TRANSFER_DISCARD_RANGE;
    405 		}
    406 	}
    407 
    408 	if ((usage & PIPE_TRANSFER_DISCARD_RANGE) &&
    409 	    !(rscreen->debug_flags & DBG_NO_DISCARD_RANGE) &&
    410 	    ((!(usage & (PIPE_TRANSFER_UNSYNCHRONIZED |
    411 			 PIPE_TRANSFER_PERSISTENT)) &&
    412 	      r600_can_dma_copy_buffer(rctx, box->x, 0, box->width)) ||
    413 	     (rbuffer->flags & RADEON_FLAG_SPARSE))) {
    414 		assert(usage & PIPE_TRANSFER_WRITE);
    415 
    416 		/* Check if mapping this buffer would cause waiting for the GPU.
    417 		 */
    418 		if (rbuffer->flags & RADEON_FLAG_SPARSE ||
    419 		    r600_rings_is_buffer_referenced(rctx, rbuffer->buf, RADEON_USAGE_READWRITE) ||
    420 		    !rctx->ws->buffer_wait(rbuffer->buf, 0, RADEON_USAGE_READWRITE)) {
    421 			/* Do a wait-free write-only transfer using a temporary buffer. */
    422 			unsigned offset;
    423 			struct r600_resource *staging = NULL;
    424 
    425 			u_upload_alloc(ctx->stream_uploader, 0,
    426                                        box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT),
    427 				       rctx->screen->info.tcc_cache_line_size,
    428 				       &offset, (struct pipe_resource**)&staging,
    429                                        (void**)&data);
    430 
    431 			if (staging) {
    432 				data += box->x % R600_MAP_BUFFER_ALIGNMENT;
    433 				return r600_buffer_get_transfer(ctx, resource, usage, box,
    434 								ptransfer, data, staging, offset);
    435 			} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
    436 				return NULL;
    437 			}
    438 		} else {
    439 			/* At this point, the buffer is always idle (we checked it above). */
    440 			usage |= PIPE_TRANSFER_UNSYNCHRONIZED;
    441 		}
    442 	}
    443 	/* Use a staging buffer in cached GTT for reads. */
    444 	else if (((usage & PIPE_TRANSFER_READ) &&
    445 		  !(usage & PIPE_TRANSFER_PERSISTENT) &&
    446 		  (rbuffer->domains & RADEON_DOMAIN_VRAM ||
    447 		   rbuffer->flags & RADEON_FLAG_GTT_WC) &&
    448 		  r600_can_dma_copy_buffer(rctx, 0, box->x, box->width)) ||
    449 		 (rbuffer->flags & RADEON_FLAG_SPARSE)) {
    450 		struct r600_resource *staging;
    451 
    452 		assert(!(usage & TC_TRANSFER_MAP_THREADED_UNSYNC));
    453 		staging = (struct r600_resource*) pipe_buffer_create(
    454 				ctx->screen, 0, PIPE_USAGE_STAGING,
    455 				box->width + (box->x % R600_MAP_BUFFER_ALIGNMENT));
    456 		if (staging) {
    457 			/* Copy the VRAM buffer to the staging buffer. */
    458 			rctx->dma_copy(ctx, &staging->b.b, 0,
    459 				       box->x % R600_MAP_BUFFER_ALIGNMENT,
    460 				       0, 0, resource, 0, box);
    461 
    462 			data = r600_buffer_map_sync_with_rings(rctx, staging,
    463 							       usage & ~PIPE_TRANSFER_UNSYNCHRONIZED);
    464 			if (!data) {
    465 				r600_resource_reference(&staging, NULL);
    466 				return NULL;
    467 			}
    468 			data += box->x % R600_MAP_BUFFER_ALIGNMENT;
    469 
    470 			return r600_buffer_get_transfer(ctx, resource, usage, box,
    471 							ptransfer, data, staging, 0);
    472 		} else if (rbuffer->flags & RADEON_FLAG_SPARSE) {
    473 			return NULL;
    474 		}
    475 	}
    476 
    477 	data = r600_buffer_map_sync_with_rings(rctx, rbuffer, usage);
    478 	if (!data) {
    479 		return NULL;
    480 	}
    481 	data += box->x;
    482 
    483 	return r600_buffer_get_transfer(ctx, resource, usage, box,
    484 					ptransfer, data, NULL, 0);
    485 }
    486 
    487 static void r600_buffer_do_flush_region(struct pipe_context *ctx,
    488 					struct pipe_transfer *transfer,
    489 				        const struct pipe_box *box)
    490 {
    491 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
    492 	struct r600_resource *rbuffer = r600_resource(transfer->resource);
    493 
    494 	if (rtransfer->staging) {
    495 		struct pipe_resource *dst, *src;
    496 		unsigned soffset;
    497 		struct pipe_box dma_box;
    498 
    499 		dst = transfer->resource;
    500 		src = &rtransfer->staging->b.b;
    501 		soffset = rtransfer->offset + box->x % R600_MAP_BUFFER_ALIGNMENT;
    502 
    503 		u_box_1d(soffset, box->width, &dma_box);
    504 
    505 		/* Copy the staging buffer into the original one. */
    506 		ctx->resource_copy_region(ctx, dst, 0, box->x, 0, 0, src, 0, &dma_box);
    507 	}
    508 
    509 	util_range_add(&rbuffer->valid_buffer_range, box->x,
    510 		       box->x + box->width);
    511 }
    512 
    513 static void r600_buffer_flush_region(struct pipe_context *ctx,
    514 				     struct pipe_transfer *transfer,
    515 				     const struct pipe_box *rel_box)
    516 {
    517 	unsigned required_usage = PIPE_TRANSFER_WRITE |
    518 				  PIPE_TRANSFER_FLUSH_EXPLICIT;
    519 
    520 	if ((transfer->usage & required_usage) == required_usage) {
    521 		struct pipe_box box;
    522 
    523 		u_box_1d(transfer->box.x + rel_box->x, rel_box->width, &box);
    524 		r600_buffer_do_flush_region(ctx, transfer, &box);
    525 	}
    526 }
    527 
    528 static void r600_buffer_transfer_unmap(struct pipe_context *ctx,
    529 				       struct pipe_transfer *transfer)
    530 {
    531 	struct r600_common_context *rctx = (struct r600_common_context*)ctx;
    532 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
    533 
    534 	if (transfer->usage & PIPE_TRANSFER_WRITE &&
    535 	    !(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT))
    536 		r600_buffer_do_flush_region(ctx, transfer, &transfer->box);
    537 
    538 	r600_resource_reference(&rtransfer->staging, NULL);
    539 	assert(rtransfer->b.staging == NULL); /* for threaded context only */
    540 	pipe_resource_reference(&transfer->resource, NULL);
    541 
    542 	/* Don't use pool_transfers_unsync. We are always in the driver
    543 	 * thread. */
    544 	slab_free(&rctx->pool_transfers, transfer);
    545 }
    546 
    547 void r600_buffer_subdata(struct pipe_context *ctx,
    548 			 struct pipe_resource *buffer,
    549 			 unsigned usage, unsigned offset,
    550 			 unsigned size, const void *data)
    551 {
    552 	struct pipe_transfer *transfer = NULL;
    553 	struct pipe_box box;
    554 	uint8_t *map = NULL;
    555 
    556 	u_box_1d(offset, size, &box);
    557 	map = r600_buffer_transfer_map(ctx, buffer, 0,
    558 				       PIPE_TRANSFER_WRITE |
    559 				       PIPE_TRANSFER_DISCARD_RANGE |
    560 				       usage,
    561 				       &box, &transfer);
    562 	if (!map)
    563 		return;
    564 
    565 	memcpy(map, data, size);
    566 	r600_buffer_transfer_unmap(ctx, transfer);
    567 }
    568 
    569 static const struct u_resource_vtbl r600_buffer_vtbl =
    570 {
    571 	NULL,				/* get_handle */
    572 	r600_buffer_destroy,		/* resource_destroy */
    573 	r600_buffer_transfer_map,	/* transfer_map */
    574 	r600_buffer_flush_region,	/* transfer_flush_region */
    575 	r600_buffer_transfer_unmap,	/* transfer_unmap */
    576 };
    577 
    578 static struct r600_resource *
    579 r600_alloc_buffer_struct(struct pipe_screen *screen,
    580 			 const struct pipe_resource *templ)
    581 {
    582 	struct r600_resource *rbuffer;
    583 
    584 	rbuffer = MALLOC_STRUCT(r600_resource);
    585 
    586 	rbuffer->b.b = *templ;
    587 	rbuffer->b.b.next = NULL;
    588 	pipe_reference_init(&rbuffer->b.b.reference, 1);
    589 	rbuffer->b.b.screen = screen;
    590 
    591 	rbuffer->b.vtbl = &r600_buffer_vtbl;
    592 	threaded_resource_init(&rbuffer->b.b);
    593 
    594 	rbuffer->buf = NULL;
    595 	rbuffer->bind_history = 0;
    596 	rbuffer->immed_buffer = NULL;
    597 	util_range_init(&rbuffer->valid_buffer_range);
    598 	return rbuffer;
    599 }
    600 
    601 struct pipe_resource *r600_buffer_create(struct pipe_screen *screen,
    602 					 const struct pipe_resource *templ,
    603 					 unsigned alignment)
    604 {
    605 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
    606 	struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
    607 
    608 	r600_init_resource_fields(rscreen, rbuffer, templ->width0, alignment);
    609 
    610 	if (templ->flags & PIPE_RESOURCE_FLAG_SPARSE)
    611 		rbuffer->flags |= RADEON_FLAG_SPARSE;
    612 
    613 	if (!r600_alloc_resource(rscreen, rbuffer)) {
    614 		FREE(rbuffer);
    615 		return NULL;
    616 	}
    617 	return &rbuffer->b.b;
    618 }
    619 
    620 struct pipe_resource *r600_aligned_buffer_create(struct pipe_screen *screen,
    621 						 unsigned flags,
    622 						 unsigned usage,
    623 						 unsigned size,
    624 						 unsigned alignment)
    625 {
    626 	struct pipe_resource buffer;
    627 
    628 	memset(&buffer, 0, sizeof buffer);
    629 	buffer.target = PIPE_BUFFER;
    630 	buffer.format = PIPE_FORMAT_R8_UNORM;
    631 	buffer.bind = 0;
    632 	buffer.usage = usage;
    633 	buffer.flags = flags;
    634 	buffer.width0 = size;
    635 	buffer.height0 = 1;
    636 	buffer.depth0 = 1;
    637 	buffer.array_size = 1;
    638 	return r600_buffer_create(screen, &buffer, alignment);
    639 }
    640 
    641 struct pipe_resource *
    642 r600_buffer_from_user_memory(struct pipe_screen *screen,
    643 			     const struct pipe_resource *templ,
    644 			     void *user_memory)
    645 {
    646 	struct r600_common_screen *rscreen = (struct r600_common_screen*)screen;
    647 	struct radeon_winsys *ws = rscreen->ws;
    648 	struct r600_resource *rbuffer = r600_alloc_buffer_struct(screen, templ);
    649 
    650 	rbuffer->domains = RADEON_DOMAIN_GTT;
    651 	rbuffer->flags = 0;
    652 	rbuffer->b.is_user_ptr = true;
    653 	util_range_add(&rbuffer->valid_buffer_range, 0, templ->width0);
    654 	util_range_add(&rbuffer->b.valid_buffer_range, 0, templ->width0);
    655 
    656 	/* Convert a user pointer to a buffer. */
    657 	rbuffer->buf = ws->buffer_from_ptr(ws, user_memory, templ->width0);
    658 	if (!rbuffer->buf) {
    659 		FREE(rbuffer);
    660 		return NULL;
    661 	}
    662 
    663 	if (rscreen->info.has_virtual_memory)
    664 		rbuffer->gpu_address =
    665 			ws->buffer_get_virtual_address(rbuffer->buf);
    666 	else
    667 		rbuffer->gpu_address = 0;
    668 
    669 	rbuffer->vram_usage = 0;
    670 	rbuffer->gart_usage = templ->width0;
    671 
    672 	return &rbuffer->b.b;
    673 }
    674