Home | History | Annotate | Download | only in radeonsi
      1 /*
      2  * Copyright 2016 Advanced Micro Devices, Inc.
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
      8  * and/or sell copies of the Software, and to permit persons to whom the
      9  * Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     21  * SOFTWARE.
     22  *
     23  */
     24 
     25 /* This file implements randomized SDMA texture blit tests. */
     26 
     27 #include "si_pipe.h"
     28 #include "util/u_surface.h"
     29 #include "util/rand_xor.h"
     30 
     31 static uint64_t seed_xorshift128plus[2];
     32 
     33 #define RAND_NUM_SIZE 8
     34 
     35 /* The GPU blits are emulated on the CPU using these CPU textures. */
     36 
     37 struct cpu_texture {
     38 	uint8_t *ptr;
     39 	uint64_t size;
     40 	uint64_t layer_stride;
     41 	unsigned stride;
     42 };
     43 
     44 static void alloc_cpu_texture(struct cpu_texture *tex,
     45 			      struct pipe_resource *templ, int bpp)
     46 {
     47 	tex->stride = align(templ->width0 * bpp, RAND_NUM_SIZE);
     48 	tex->layer_stride = (uint64_t)tex->stride * templ->height0;
     49 	tex->size = tex->layer_stride * templ->array_size;
     50 	tex->ptr = malloc(tex->size);
     51 	assert(tex->ptr);
     52 }
     53 
     54 static void set_random_pixels(struct pipe_context *ctx,
     55 			      struct pipe_resource *tex,
     56 			      struct cpu_texture *cpu)
     57 {
     58 	struct pipe_transfer *t;
     59 	uint8_t *map;
     60 	int x,y,z;
     61 
     62 	map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_WRITE,
     63 				   0, 0, 0, tex->width0, tex->height0,
     64 				   tex->array_size, &t);
     65 	assert(map);
     66 
     67 	for (z = 0; z < tex->array_size; z++) {
     68 		for (y = 0; y < tex->height0; y++) {
     69 			uint64_t *ptr = (uint64_t*)
     70 				(map + t->layer_stride*z + t->stride*y);
     71 			uint64_t *ptr_cpu = (uint64_t*)
     72 				(cpu->ptr + cpu->layer_stride*z + cpu->stride*y);
     73 			unsigned size = cpu->stride / RAND_NUM_SIZE;
     74 
     75 			assert(t->stride % RAND_NUM_SIZE == 0);
     76 			assert(cpu->stride % RAND_NUM_SIZE == 0);
     77 
     78 			for (x = 0; x < size; x++) {
     79 				*ptr++ = *ptr_cpu++ =
     80 					rand_xorshift128plus(seed_xorshift128plus);
     81 			}
     82 		}
     83 	}
     84 
     85 	pipe_transfer_unmap(ctx, t);
     86 }
     87 
     88 static bool compare_textures(struct pipe_context *ctx,
     89 			     struct pipe_resource *tex,
     90 			     struct cpu_texture *cpu, int bpp)
     91 {
     92 	struct pipe_transfer *t;
     93 	uint8_t *map;
     94 	int y,z;
     95 	bool pass = true;
     96 
     97 	map = pipe_transfer_map_3d(ctx, tex, 0, PIPE_TRANSFER_READ,
     98 				   0, 0, 0, tex->width0, tex->height0,
     99 				   tex->array_size, &t);
    100 	assert(map);
    101 
    102 	for (z = 0; z < tex->array_size; z++) {
    103 		for (y = 0; y < tex->height0; y++) {
    104 			uint8_t *ptr = map + t->layer_stride*z + t->stride*y;
    105 			uint8_t *cpu_ptr = cpu->ptr +
    106 					   cpu->layer_stride*z + cpu->stride*y;
    107 
    108 			if (memcmp(ptr, cpu_ptr, tex->width0 * bpp)) {
    109 				pass = false;
    110 				goto done;
    111 			}
    112 		}
    113 	}
    114 done:
    115 	pipe_transfer_unmap(ctx, t);
    116 	return pass;
    117 }
    118 
    119 static enum pipe_format get_format_from_bpp(int bpp)
    120 {
    121 	switch (bpp) {
    122 	case 1:
    123 		return PIPE_FORMAT_R8_UINT;
    124 	case 2:
    125 		return PIPE_FORMAT_R16_UINT;
    126 	case 4:
    127 		return PIPE_FORMAT_R32_UINT;
    128 	case 8:
    129 		return PIPE_FORMAT_R32G32_UINT;
    130 	case 16:
    131 		return PIPE_FORMAT_R32G32B32A32_UINT;
    132 	default:
    133 		assert(0);
    134 		return PIPE_FORMAT_NONE;
    135 	}
    136 }
    137 
    138 static const char *array_mode_to_string(struct si_screen *sscreen,
    139 					struct radeon_surf *surf)
    140 {
    141 	if (sscreen->info.chip_class >= GFX9) {
    142 		/* TODO */
    143 		return "       UNKNOWN";
    144 	} else {
    145 		switch (surf->u.legacy.level[0].mode) {
    146 		case RADEON_SURF_MODE_LINEAR_ALIGNED:
    147 			return "LINEAR_ALIGNED";
    148 		case RADEON_SURF_MODE_1D:
    149 			return "1D_TILED_THIN1";
    150 		case RADEON_SURF_MODE_2D:
    151 			return "2D_TILED_THIN1";
    152 		default:
    153 			assert(0);
    154 			return "       UNKNOWN";
    155 		}
    156 	}
    157 }
    158 
    159 static unsigned generate_max_tex_side(unsigned max_tex_side)
    160 {
    161 	switch (rand() % 4) {
    162 	case 0:
    163 		/* Try to hit large sizes in 1/4 of the cases. */
    164 		return max_tex_side;
    165 	case 1:
    166 		/* Try to hit 1D tiling in 1/4 of the cases. */
    167 		return 128;
    168 	default:
    169 		/* Try to hit common sizes in 2/4 of the cases. */
    170 		return 2048;
    171 	}
    172 }
    173 
    174 void si_test_dma(struct si_screen *sscreen)
    175 {
    176 	struct pipe_screen *screen = &sscreen->b;
    177 	struct pipe_context *ctx = screen->context_create(screen, NULL, 0);
    178 	struct si_context *sctx = (struct si_context*)ctx;
    179 	uint64_t max_alloc_size;
    180 	unsigned i, iterations, num_partial_copies, max_levels, max_tex_side;
    181 	unsigned num_pass = 0, num_fail = 0;
    182 
    183 	max_levels = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
    184 	max_tex_side = 1 << (max_levels - 1);
    185 
    186 	/* Max 128 MB allowed for both textures. */
    187 	max_alloc_size = 128 * 1024 * 1024;
    188 
    189 	/* the seed for random test parameters */
    190 	srand(0x9b47d95b);
    191 	/* the seed for random pixel data */
    192 	s_rand_xorshift128plus(seed_xorshift128plus, false);
    193 
    194 	iterations = 1000000000; /* just kill it when you are bored */
    195 	num_partial_copies = 30;
    196 
    197 	/* These parameters are randomly generated per test:
    198 	 * - whether to do one whole-surface copy or N partial copies per test
    199 	 * - which tiling modes to use (LINEAR_ALIGNED, 1D, 2D)
    200 	 * - which texture dimensions to use
    201 	 * - whether to use VRAM (all tiling modes) and GTT (staging, linear
    202 	 *   only) allocations
    203 	 * - random initial pixels in src
    204 	 * - generate random subrectangle copies for partial blits
    205 	 */
    206 	for (i = 0; i < iterations; i++) {
    207 		struct pipe_resource tsrc = {}, tdst = {}, *src, *dst;
    208 		struct r600_texture *rdst;
    209 		struct r600_texture *rsrc;
    210 		struct cpu_texture src_cpu, dst_cpu;
    211 		unsigned bpp, max_width, max_height, max_depth, j, num;
    212 		unsigned gfx_blits = 0, dma_blits = 0, max_tex_side_gen;
    213 		unsigned max_tex_layers;
    214 		bool pass;
    215 		bool do_partial_copies = rand() & 1;
    216 
    217 		/* generate a random test case */
    218 		tsrc.target = tdst.target = PIPE_TEXTURE_2D_ARRAY;
    219 		tsrc.depth0 = tdst.depth0 = 1;
    220 
    221 		bpp = 1 << (rand() % 5);
    222 		tsrc.format = tdst.format = get_format_from_bpp(bpp);
    223 
    224 		max_tex_side_gen = generate_max_tex_side(max_tex_side);
    225 		max_tex_layers = rand() % 4 ? 1 : 5;
    226 
    227 		tsrc.width0 = (rand() % max_tex_side_gen) + 1;
    228 		tsrc.height0 = (rand() % max_tex_side_gen) + 1;
    229 		tsrc.array_size = (rand() % max_tex_layers) + 1;
    230 
    231 		/* Have a 1/4 chance of getting power-of-two dimensions. */
    232 		if (rand() % 4 == 0) {
    233 			tsrc.width0 = util_next_power_of_two(tsrc.width0);
    234 			tsrc.height0 = util_next_power_of_two(tsrc.height0);
    235 		}
    236 
    237 		if (!do_partial_copies) {
    238 			/* whole-surface copies only, same dimensions */
    239 			tdst = tsrc;
    240 		} else {
    241 			max_tex_side_gen = generate_max_tex_side(max_tex_side);
    242 			max_tex_layers = rand() % 4 ? 1 : 5;
    243 
    244 			/* many partial copies, dimensions can be different */
    245 			tdst.width0 = (rand() % max_tex_side_gen) + 1;
    246 			tdst.height0 = (rand() % max_tex_side_gen) + 1;
    247 			tdst.array_size = (rand() % max_tex_layers) + 1;
    248 
    249 			/* Have a 1/4 chance of getting power-of-two dimensions. */
    250 			if (rand() % 4 == 0) {
    251 				tdst.width0 = util_next_power_of_two(tdst.width0);
    252 				tdst.height0 = util_next_power_of_two(tdst.height0);
    253 			}
    254 		}
    255 
    256 		/* check texture sizes */
    257 		if ((uint64_t)tsrc.width0 * tsrc.height0 * tsrc.array_size * bpp +
    258 		    (uint64_t)tdst.width0 * tdst.height0 * tdst.array_size * bpp >
    259 		    max_alloc_size) {
    260 			/* too large, try again */
    261 			i--;
    262 			continue;
    263 		}
    264 
    265 		/* VRAM + the tiling mode depends on dimensions (3/4 of cases),
    266 		 * or GTT + linear only (1/4 of cases)
    267 		 */
    268 		tsrc.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
    269 		tdst.usage = rand() % 4 ? PIPE_USAGE_DEFAULT : PIPE_USAGE_STAGING;
    270 
    271 		/* Allocate textures (both the GPU and CPU copies).
    272 		 * The CPU will emulate what the GPU should be doing.
    273 		 */
    274 		src = screen->resource_create(screen, &tsrc);
    275 		dst = screen->resource_create(screen, &tdst);
    276 		assert(src);
    277 		assert(dst);
    278 		rdst = (struct r600_texture*)dst;
    279 		rsrc = (struct r600_texture*)src;
    280 		alloc_cpu_texture(&src_cpu, &tsrc, bpp);
    281 		alloc_cpu_texture(&dst_cpu, &tdst, bpp);
    282 
    283 		printf("%4u: dst = (%5u x %5u x %u, %s), "
    284 		       " src = (%5u x %5u x %u, %s), bpp = %2u, ",
    285 		       i, tdst.width0, tdst.height0, tdst.array_size,
    286 		       array_mode_to_string(sscreen, &rdst->surface),
    287 		       tsrc.width0, tsrc.height0, tsrc.array_size,
    288 		       array_mode_to_string(sscreen, &rsrc->surface), bpp);
    289 		fflush(stdout);
    290 
    291 		/* set src pixels */
    292 		set_random_pixels(ctx, src, &src_cpu);
    293 
    294 		/* clear dst pixels */
    295 		si_clear_buffer(ctx, dst, 0, rdst->surface.surf_size, 0, true);
    296 		memset(dst_cpu.ptr, 0, dst_cpu.layer_stride * tdst.array_size);
    297 
    298 		/* preparation */
    299 		max_width = MIN2(tsrc.width0, tdst.width0);
    300 		max_height = MIN2(tsrc.height0, tdst.height0);
    301 		max_depth = MIN2(tsrc.array_size, tdst.array_size);
    302 
    303 		num = do_partial_copies ? num_partial_copies : 1;
    304 		for (j = 0; j < num; j++) {
    305 			int width, height, depth;
    306 			int srcx, srcy, srcz, dstx, dsty, dstz;
    307 			struct pipe_box box;
    308 			unsigned old_num_draw_calls = sctx->b.num_draw_calls;
    309 			unsigned old_num_dma_calls = sctx->b.num_dma_calls;
    310 
    311 			if (!do_partial_copies) {
    312 				/* copy whole src to dst */
    313 				width = max_width;
    314 				height = max_height;
    315 				depth = max_depth;
    316 
    317 				srcx = srcy = srcz = dstx = dsty = dstz = 0;
    318 			} else {
    319 				/* random sub-rectangle copies from src to dst */
    320 				depth = (rand() % max_depth) + 1;
    321 				srcz = rand() % (tsrc.array_size - depth + 1);
    322 				dstz = rand() % (tdst.array_size - depth + 1);
    323 
    324 				/* special code path to hit the tiled partial copies */
    325 				if (!rsrc->surface.is_linear &&
    326 				    !rdst->surface.is_linear &&
    327 				    rand() & 1) {
    328 					if (max_width < 8 || max_height < 8)
    329 						continue;
    330 					width = ((rand() % (max_width / 8)) + 1) * 8;
    331 					height = ((rand() % (max_height / 8)) + 1) * 8;
    332 
    333 					srcx = rand() % (tsrc.width0 - width + 1) & ~0x7;
    334 					srcy = rand() % (tsrc.height0 - height + 1) & ~0x7;
    335 
    336 					dstx = rand() % (tdst.width0 - width + 1) & ~0x7;
    337 					dsty = rand() % (tdst.height0 - height + 1) & ~0x7;
    338 				} else {
    339 					/* just make sure that it doesn't divide by zero */
    340 					assert(max_width > 0 && max_height > 0);
    341 
    342 					width = (rand() % max_width) + 1;
    343 					height = (rand() % max_height) + 1;
    344 
    345 					srcx = rand() % (tsrc.width0 - width + 1);
    346 					srcy = rand() % (tsrc.height0 - height + 1);
    347 
    348 					dstx = rand() % (tdst.width0 - width + 1);
    349 					dsty = rand() % (tdst.height0 - height + 1);
    350 				}
    351 
    352 				/* special code path to hit out-of-bounds reads in L2T */
    353 				if (rsrc->surface.is_linear &&
    354 				    !rdst->surface.is_linear &&
    355 				    rand() % 4 == 0) {
    356 					srcx = 0;
    357 					srcy = 0;
    358 					srcz = 0;
    359 				}
    360 			}
    361 
    362 			/* GPU copy */
    363 			u_box_3d(srcx, srcy, srcz, width, height, depth, &box);
    364 			sctx->b.dma_copy(ctx, dst, 0, dstx, dsty, dstz, src, 0, &box);
    365 
    366 			/* See which engine was used. */
    367 			gfx_blits += sctx->b.num_draw_calls > old_num_draw_calls;
    368 			dma_blits += sctx->b.num_dma_calls > old_num_dma_calls;
    369 
    370 			/* CPU copy */
    371 			util_copy_box(dst_cpu.ptr, tdst.format, dst_cpu.stride,
    372 				      dst_cpu.layer_stride,
    373 				      dstx, dsty, dstz, width, height, depth,
    374 				      src_cpu.ptr, src_cpu.stride,
    375 				      src_cpu.layer_stride,
    376 				      srcx, srcy, srcz);
    377 		}
    378 
    379 		pass = compare_textures(ctx, dst, &dst_cpu, bpp);
    380 		if (pass)
    381 			num_pass++;
    382 		else
    383 			num_fail++;
    384 
    385 		printf("BLITs: GFX = %2u, DMA = %2u, %s [%u/%u]\n",
    386 		       gfx_blits, dma_blits, pass ? "pass" : "fail",
    387 		       num_pass, num_pass+num_fail);
    388 
    389 		/* cleanup */
    390 		pipe_resource_reference(&src, NULL);
    391 		pipe_resource_reference(&dst, NULL);
    392 		free(src_cpu.ptr);
    393 		free(dst_cpu.ptr);
    394 	}
    395 
    396 	ctx->destroy(ctx);
    397 	exit(0);
    398 }
    399