Home | History | Annotate | Download | only in r600
      1 /*
      2  * Copyright 2010 Jerome Glisse <glisse (at) freedesktop.org>
      3  *
      4  * Permission is hereby granted, free of charge, to any person obtaining a
      5  * copy of this software and associated documentation files (the "Software"),
      6  * to deal in the Software without restriction, including without limitation
      7  * on the rights to use, copy, modify, merge, publish, distribute, sub
      8  * license, and/or sell copies of the Software, and to permit persons to whom
      9  * the Software is furnished to do so, subject to the following conditions:
     10  *
     11  * The above copyright notice and this permission notice (including the next
     12  * paragraph) shall be included in all copies or substantial portions of the
     13  * Software.
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  * Authors:
     24  *      Jerome Glisse
     25  *      Corbin Simpson
     26  */
     27 #include "r600_formats.h"
     28 #include "r600d.h"
     29 
     30 #include <errno.h>
     31 #include "util/u_format_s3tc.h"
     32 #include "util/u_memory.h"
     33 
     34 /* Copy from a full GPU texture to a transfer's staging one. */
     35 static void r600_copy_to_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
     36 {
     37 	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
     38 	struct pipe_resource *texture = transfer->resource;
     39 
     40 	ctx->resource_copy_region(ctx, &rtransfer->staging->b.b,
     41 				0, 0, 0, 0, texture, transfer->level,
     42 				&transfer->box);
     43 }
     44 
     45 
     46 /* Copy from a transfer's staging texture to a full GPU one. */
     47 static void r600_copy_from_staging_texture(struct pipe_context *ctx, struct r600_transfer *rtransfer)
     48 {
     49 	struct pipe_transfer *transfer = (struct pipe_transfer*)rtransfer;
     50 	struct pipe_resource *texture = transfer->resource;
     51 	struct pipe_box sbox;
     52 
     53 	u_box_origin_2d(transfer->box.width, transfer->box.height, &sbox);
     54 
     55 	ctx->resource_copy_region(ctx, texture, transfer->level,
     56 				  transfer->box.x, transfer->box.y, transfer->box.z,
     57 				  &rtransfer->staging->b.b,
     58 				  0, &sbox);
     59 }
     60 
     61 unsigned r600_texture_get_offset(struct r600_texture *rtex,
     62 					unsigned level, unsigned layer)
     63 {
     64 	return rtex->surface.level[level].offset +
     65 	       layer * rtex->surface.level[level].slice_size;
     66 }
     67 
     68 static int r600_init_surface(struct r600_screen *rscreen,
     69 			     struct radeon_surface *surface,
     70 			     const struct pipe_resource *ptex,
     71 			     unsigned array_mode,
     72 			     bool is_transfer, bool is_flushed_depth)
     73 {
     74 	const struct util_format_description *desc =
     75 		util_format_description(ptex->format);
     76 	bool is_depth, is_stencil;
     77 
     78 	is_depth = util_format_has_depth(desc);
     79 	is_stencil = util_format_has_stencil(desc);
     80 
     81 	surface->npix_x = ptex->width0;
     82 	surface->npix_y = ptex->height0;
     83 	surface->npix_z = ptex->depth0;
     84 	surface->blk_w = util_format_get_blockwidth(ptex->format);
     85 	surface->blk_h = util_format_get_blockheight(ptex->format);
     86 	surface->blk_d = 1;
     87 	surface->array_size = 1;
     88 	surface->last_level = ptex->last_level;
     89 
     90 	if (rscreen->chip_class >= EVERGREEN &&
     91 	    !is_transfer && !is_flushed_depth &&
     92 	    ptex->format == PIPE_FORMAT_Z32_FLOAT_S8X24_UINT) {
     93 		surface->bpe = 4; /* stencil is allocated separately on evergreen */
     94 	} else {
     95 		surface->bpe = util_format_get_blocksize(ptex->format);
     96 		/* align byte per element on dword */
     97 		if (surface->bpe == 3) {
     98 			surface->bpe = 4;
     99 		}
    100 	}
    101 
    102 	surface->nsamples = ptex->nr_samples ? ptex->nr_samples : 1;
    103 	surface->flags = 0;
    104 
    105 	switch (array_mode) {
    106 	case V_038000_ARRAY_1D_TILED_THIN1:
    107 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_1D, MODE);
    108 		break;
    109 	case V_038000_ARRAY_2D_TILED_THIN1:
    110 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_2D, MODE);
    111 		break;
    112 	case V_038000_ARRAY_LINEAR_ALIGNED:
    113 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR_ALIGNED, MODE);
    114 		break;
    115 	case V_038000_ARRAY_LINEAR_GENERAL:
    116 	default:
    117 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_MODE_LINEAR, MODE);
    118 		break;
    119 	}
    120 	switch (ptex->target) {
    121 	case PIPE_TEXTURE_1D:
    122 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D, TYPE);
    123 		break;
    124 	case PIPE_TEXTURE_RECT:
    125 	case PIPE_TEXTURE_2D:
    126 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D, TYPE);
    127 		break;
    128 	case PIPE_TEXTURE_3D:
    129 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_3D, TYPE);
    130 		break;
    131 	case PIPE_TEXTURE_1D_ARRAY:
    132 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_1D_ARRAY, TYPE);
    133 		surface->array_size = ptex->array_size;
    134 		break;
    135 	case PIPE_TEXTURE_2D_ARRAY:
    136 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_2D_ARRAY, TYPE);
    137 		surface->array_size = ptex->array_size;
    138 		break;
    139 	case PIPE_TEXTURE_CUBE:
    140 		surface->flags |= RADEON_SURF_SET(RADEON_SURF_TYPE_CUBEMAP, TYPE);
    141 		break;
    142 	case PIPE_BUFFER:
    143 	default:
    144 		return -EINVAL;
    145 	}
    146 	if (ptex->bind & PIPE_BIND_SCANOUT) {
    147 		surface->flags |= RADEON_SURF_SCANOUT;
    148 	}
    149 
    150 	if (!is_transfer && !is_flushed_depth && is_depth) {
    151 		surface->flags |= RADEON_SURF_ZBUFFER;
    152 
    153 		if (is_stencil) {
    154 			surface->flags |= RADEON_SURF_SBUFFER;
    155 		}
    156 	}
    157 	return 0;
    158 }
    159 
    160 static int r600_setup_surface(struct pipe_screen *screen,
    161 			      struct r600_texture *rtex,
    162 			      unsigned pitch_in_bytes_override)
    163 {
    164 	struct pipe_resource *ptex = &rtex->resource.b.b;
    165 	struct r600_screen *rscreen = (struct r600_screen*)screen;
    166 	unsigned i;
    167 	int r;
    168 
    169 	r = rscreen->ws->surface_init(rscreen->ws, &rtex->surface);
    170 	if (r) {
    171 		return r;
    172 	}
    173 	rtex->size = rtex->surface.bo_size;
    174 	if (pitch_in_bytes_override && pitch_in_bytes_override != rtex->surface.level[0].pitch_bytes) {
    175 		/* old ddx on evergreen over estimate alignment for 1d, only 1 level
    176 		 * for those
    177 		 */
    178 		rtex->surface.level[0].nblk_x = pitch_in_bytes_override / rtex->surface.bpe;
    179 		rtex->surface.level[0].pitch_bytes = pitch_in_bytes_override;
    180 		rtex->surface.level[0].slice_size = pitch_in_bytes_override * rtex->surface.level[0].nblk_y;
    181 		if (rtex->surface.flags & RADEON_SURF_SBUFFER) {
    182 			rtex->surface.stencil_offset = rtex->surface.level[0].slice_size;
    183 		}
    184 	}
    185 	for (i = 0; i <= ptex->last_level; i++) {
    186 		switch (rtex->surface.level[i].mode) {
    187 		case RADEON_SURF_MODE_LINEAR_ALIGNED:
    188 			rtex->array_mode[i] = V_038000_ARRAY_LINEAR_ALIGNED;
    189 			break;
    190 		case RADEON_SURF_MODE_1D:
    191 			rtex->array_mode[i] = V_038000_ARRAY_1D_TILED_THIN1;
    192 			break;
    193 		case RADEON_SURF_MODE_2D:
    194 			rtex->array_mode[i] = V_038000_ARRAY_2D_TILED_THIN1;
    195 			break;
    196 		default:
    197 		case RADEON_SURF_MODE_LINEAR:
    198 			rtex->array_mode[i] = 0;
    199 			break;
    200 		}
    201 	}
    202 	return 0;
    203 }
    204 
    205 static boolean r600_texture_get_handle(struct pipe_screen* screen,
    206 					struct pipe_resource *ptex,
    207 					struct winsys_handle *whandle)
    208 {
    209 	struct r600_texture *rtex = (struct r600_texture*)ptex;
    210 	struct r600_resource *resource = &rtex->resource;
    211 	struct radeon_surface *surface = &rtex->surface;
    212 	struct r600_screen *rscreen = (struct r600_screen*)screen;
    213 
    214 	rscreen->ws->buffer_set_tiling(resource->buf,
    215 				       NULL,
    216 				       surface->level[0].mode >= RADEON_SURF_MODE_1D ?
    217 				       RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
    218 				       surface->level[0].mode >= RADEON_SURF_MODE_2D ?
    219 				       RADEON_LAYOUT_TILED : RADEON_LAYOUT_LINEAR,
    220 				       surface->bankw, surface->bankh,
    221 				       surface->tile_split,
    222 				       surface->stencil_tile_split,
    223 				       surface->mtilea,
    224 				       rtex->surface.level[0].pitch_bytes);
    225 
    226 	return rscreen->ws->buffer_get_handle(resource->buf,
    227 					      rtex->surface.level[0].pitch_bytes, whandle);
    228 }
    229 
    230 static void r600_texture_destroy(struct pipe_screen *screen,
    231 				 struct pipe_resource *ptex)
    232 {
    233 	struct r600_texture *rtex = (struct r600_texture*)ptex;
    234 	struct r600_resource *resource = &rtex->resource;
    235 
    236 	if (rtex->flushed_depth_texture)
    237 		pipe_resource_reference((struct pipe_resource **)&rtex->flushed_depth_texture, NULL);
    238 
    239 	pb_reference(&resource->buf, NULL);
    240 	FREE(rtex);
    241 }
    242 
    243 static const struct u_resource_vtbl r600_texture_vtbl =
    244 {
    245 	r600_texture_get_handle,	/* get_handle */
    246 	r600_texture_destroy,		/* resource_destroy */
    247 	r600_texture_get_transfer,	/* get_transfer */
    248 	r600_texture_transfer_destroy,	/* transfer_destroy */
    249 	r600_texture_transfer_map,	/* transfer_map */
    250 	NULL,				/* transfer_flush_region */
    251 	r600_texture_transfer_unmap,	/* transfer_unmap */
    252 	NULL				/* transfer_inline_write */
    253 };
    254 
    255 /* The number of samples can be specified independently of the texture. */
    256 void r600_texture_get_fmask_info(struct r600_screen *rscreen,
    257 				 struct r600_texture *rtex,
    258 				 unsigned nr_samples,
    259 				 struct r600_fmask_info *out)
    260 {
    261 	/* FMASK is allocated pretty much like an ordinary texture.
    262 	 * Here we use bpe in the units of bits, not bytes. */
    263 	struct radeon_surface fmask = rtex->surface;
    264 
    265 	switch (nr_samples) {
    266 	case 2:
    267 		/* This should be 8,1, but we should set nsamples > 1
    268 		 * for the allocator to treat it as a multisample surface.
    269 		 * Let's set 4,2 then. */
    270 	case 4:
    271 		fmask.bpe = 4;
    272 		fmask.nsamples = 2;
    273 		break;
    274 	case 8:
    275 		fmask.bpe = 8;
    276 		fmask.nsamples = 4;
    277 		break;
    278 	case 16:
    279 		fmask.bpe = 16;
    280 		fmask.nsamples = 4;
    281 		break;
    282 	default:
    283 		R600_ERR("Invalid sample count for FMASK allocation.\n");
    284 		return;
    285 	}
    286 
    287 	/* R600-R700 errata? Anyway, this fixes colorbuffer corruption. */
    288 	if (rscreen->chip_class <= R700) {
    289 		fmask.bpe *= 2;
    290 	}
    291 
    292 	if (rscreen->chip_class >= EVERGREEN) {
    293 		fmask.bankh = nr_samples <= 4 ? 4 : 1;
    294 	}
    295 
    296 	if (rscreen->ws->surface_init(rscreen->ws, &fmask)) {
    297 		R600_ERR("Got error in surface_init while allocating FMASK.\n");
    298 		return;
    299 	}
    300 	assert(fmask.level[0].mode == RADEON_SURF_MODE_2D);
    301 
    302 	out->bank_height = fmask.bankh;
    303 	out->alignment = MAX2(256, fmask.bo_alignment);
    304 	out->size = (fmask.bo_size + 7) / 8;
    305 }
    306 
    307 static void r600_texture_allocate_fmask(struct r600_screen *rscreen,
    308 					struct r600_texture *rtex)
    309 {
    310 	struct r600_fmask_info fmask;
    311 
    312 	r600_texture_get_fmask_info(rscreen, rtex,
    313 				    rtex->resource.b.b.nr_samples, &fmask);
    314 
    315 	/* Reserve space for FMASK while converting bits back to bytes. */
    316 	rtex->fmask_bank_height = fmask.bank_height;
    317 	rtex->fmask_offset = align(rtex->size, fmask.alignment);
    318 	rtex->fmask_size = fmask.size;
    319 	rtex->size = rtex->fmask_offset + rtex->fmask_size;
    320 #if 0
    321 	printf("FMASK width=%u, height=%i, bits=%u, size=%u\n",
    322 	       fmask.npix_x, fmask.npix_y, fmask.bpe * fmask.nsamples, rtex->fmask_size);
    323 #endif
    324 }
    325 
    326 void r600_texture_get_cmask_info(struct r600_screen *rscreen,
    327 				 struct r600_texture *rtex,
    328 				 struct r600_cmask_info *out)
    329 {
    330 	unsigned cmask_tile_width = 8;
    331 	unsigned cmask_tile_height = 8;
    332 	unsigned cmask_tile_elements = cmask_tile_width * cmask_tile_height;
    333 	unsigned element_bits = 4;
    334 	unsigned cmask_cache_bits = 1024;
    335 	unsigned num_pipes = rscreen->tiling_info.num_channels;
    336 	unsigned pipe_interleave_bytes = rscreen->tiling_info.group_bytes;
    337 
    338 	unsigned elements_per_macro_tile = (cmask_cache_bits / element_bits) * num_pipes;
    339 	unsigned pixels_per_macro_tile = elements_per_macro_tile * cmask_tile_elements;
    340 	unsigned sqrt_pixels_per_macro_tile = sqrt(pixels_per_macro_tile);
    341 	unsigned macro_tile_width = util_next_power_of_two(sqrt_pixels_per_macro_tile);
    342 	unsigned macro_tile_height = pixels_per_macro_tile / macro_tile_width;
    343 
    344 	unsigned pitch_elements = align(rtex->surface.npix_x, macro_tile_width);
    345 	unsigned height = align(rtex->surface.npix_y, macro_tile_height);
    346 
    347 	unsigned base_align = num_pipes * pipe_interleave_bytes;
    348 	unsigned slice_bytes =
    349 		((pitch_elements * height * element_bits + 7) / 8) / cmask_tile_elements;
    350 
    351 	assert(macro_tile_width % 128 == 0);
    352 	assert(macro_tile_height % 128 == 0);
    353 
    354 	out->slice_tile_max = ((pitch_elements * height) / (128*128)) - 1;
    355 	out->alignment = MAX2(256, base_align);
    356 	out->size = rtex->surface.array_size * align(slice_bytes, base_align);
    357 }
    358 
    359 static void r600_texture_allocate_cmask(struct r600_screen *rscreen,
    360 					struct r600_texture *rtex)
    361 {
    362 	struct r600_cmask_info cmask;
    363 
    364 	r600_texture_get_cmask_info(rscreen, rtex, &cmask);
    365 
    366 	rtex->cmask_slice_tile_max = cmask.slice_tile_max;
    367 	rtex->cmask_offset = align(rtex->size, cmask.alignment);
    368 	rtex->cmask_size = cmask.size;
    369 	rtex->size = rtex->cmask_offset + rtex->cmask_size;
    370 #if 0
    371 	printf("CMASK: macro tile width = %u, macro tile height = %u, "
    372 	       "pitch elements = %u, height = %u, slice tile max = %u\n",
    373 	       macro_tile_width, macro_tile_height, pitch_elements, height,
    374 	       rtex->cmask_slice_tile_max);
    375 #endif
    376 }
    377 
    378 static struct r600_texture *
    379 r600_texture_create_object(struct pipe_screen *screen,
    380 			   const struct pipe_resource *base,
    381 			   unsigned pitch_in_bytes_override,
    382 			   struct pb_buffer *buf,
    383 			   boolean alloc_bo,
    384 			   struct radeon_surface *surface)
    385 {
    386 	struct r600_texture *rtex;
    387 	struct r600_resource *resource;
    388 	struct r600_screen *rscreen = (struct r600_screen*)screen;
    389 	int r;
    390 
    391 	rtex = CALLOC_STRUCT(r600_texture);
    392 	if (rtex == NULL)
    393 		return NULL;
    394 
    395 	resource = &rtex->resource;
    396 	resource->b.b = *base;
    397 	resource->b.vtbl = &r600_texture_vtbl;
    398 	pipe_reference_init(&resource->b.b.reference, 1);
    399 	resource->b.b.screen = screen;
    400 	rtex->pitch_override = pitch_in_bytes_override;
    401 
    402 	/* don't include stencil-only formats which we don't support for rendering */
    403 	rtex->is_depth = util_format_has_depth(util_format_description(rtex->resource.b.b.format));
    404 
    405 	rtex->surface = *surface;
    406 	r = r600_setup_surface(screen, rtex,
    407 			       pitch_in_bytes_override);
    408 	if (r) {
    409 		FREE(rtex);
    410 		return NULL;
    411 	}
    412 
    413 	if (base->nr_samples > 1 && !rtex->is_depth && alloc_bo) {
    414 		r600_texture_allocate_cmask(rscreen, rtex);
    415 		r600_texture_allocate_fmask(rscreen, rtex);
    416 	}
    417 
    418 	if (!rtex->is_depth && base->nr_samples > 1 &&
    419 	    (!rtex->fmask_size || !rtex->cmask_size)) {
    420 		FREE(rtex);
    421 		return NULL;
    422 	}
    423 
    424 	/* Now create the backing buffer. */
    425 	if (!buf && alloc_bo) {
    426 		unsigned base_align = rtex->surface.bo_alignment;
    427 		unsigned usage = R600_TEX_IS_TILED(rtex, 0) ? PIPE_USAGE_STATIC : base->usage;
    428 
    429 		if (!r600_init_resource(rscreen, resource, rtex->size, base_align, base->bind, usage)) {
    430 			FREE(rtex);
    431 			return NULL;
    432 		}
    433 	} else if (buf) {
    434 		resource->buf = buf;
    435 		resource->cs_buf = rscreen->ws->buffer_get_cs_handle(buf);
    436 		resource->domains = RADEON_DOMAIN_GTT | RADEON_DOMAIN_VRAM;
    437 	}
    438 
    439 	if (rtex->cmask_size) {
    440 		/* Initialize the cmask to 0xCC (= compressed state). */
    441 		char *ptr = rscreen->ws->buffer_map(resource->cs_buf, NULL, PIPE_TRANSFER_WRITE);
    442 		memset(ptr + rtex->cmask_offset, 0xCC, rtex->cmask_size);
    443 		rscreen->ws->buffer_unmap(resource->cs_buf);
    444 	}
    445 	return rtex;
    446 }
    447 
    448 struct pipe_resource *r600_texture_create(struct pipe_screen *screen,
    449 						const struct pipe_resource *templ)
    450 {
    451 	struct r600_screen *rscreen = (struct r600_screen*)screen;
    452 	struct radeon_surface surface;
    453 	unsigned array_mode = 0;
    454 	int r;
    455 
    456 	if (!(templ->flags & R600_RESOURCE_FLAG_TRANSFER)) {
    457 		if (templ->flags & R600_RESOURCE_FLAG_FORCE_TILING) {
    458 			array_mode = V_038000_ARRAY_2D_TILED_THIN1;
    459 		} else if (!(templ->bind & PIPE_BIND_SCANOUT) &&
    460 		    templ->usage != PIPE_USAGE_STAGING &&
    461 		    templ->usage != PIPE_USAGE_STREAM) {
    462 			array_mode = V_038000_ARRAY_2D_TILED_THIN1;
    463 		} else if (util_format_is_compressed(templ->format)) {
    464 			array_mode = V_038000_ARRAY_1D_TILED_THIN1;
    465 		}
    466 	}
    467 
    468 	/* XXX tiling is broken for the 422 formats */
    469 	if (util_format_description(templ->format)->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED)
    470 		array_mode = V_038000_ARRAY_LINEAR_ALIGNED;
    471 
    472 	r = r600_init_surface(rscreen, &surface, templ, array_mode,
    473 			      templ->flags & R600_RESOURCE_FLAG_TRANSFER,
    474 			      templ->flags & R600_RESOURCE_FLAG_FLUSHED_DEPTH);
    475 	if (r) {
    476 		return NULL;
    477 	}
    478 	r = rscreen->ws->surface_best(rscreen->ws, &surface);
    479 	if (r) {
    480 		return NULL;
    481 	}
    482 	return (struct pipe_resource *)r600_texture_create_object(screen, templ,
    483 								  0, NULL, TRUE, &surface);
    484 }
    485 
    486 static struct pipe_surface *r600_create_surface(struct pipe_context *pipe,
    487 						struct pipe_resource *texture,
    488 						const struct pipe_surface *templ)
    489 {
    490 	struct r600_texture *rtex = (struct r600_texture*)texture;
    491 	struct r600_surface *surface = CALLOC_STRUCT(r600_surface);
    492 	unsigned level = templ->u.tex.level;
    493 
    494 	assert(templ->u.tex.first_layer == templ->u.tex.last_layer);
    495 	if (surface == NULL)
    496 		return NULL;
    497 	pipe_reference_init(&surface->base.reference, 1);
    498 	pipe_resource_reference(&surface->base.texture, texture);
    499 	surface->base.context = pipe;
    500 	surface->base.format = templ->format;
    501 	surface->base.width = rtex->surface.level[level].npix_x;
    502 	surface->base.height = rtex->surface.level[level].npix_y;
    503 	surface->base.usage = templ->usage;
    504 	surface->base.u = templ->u;
    505 	return &surface->base;
    506 }
    507 
    508 static void r600_surface_destroy(struct pipe_context *pipe,
    509 				 struct pipe_surface *surface)
    510 {
    511 	struct r600_surface *surf = (struct r600_surface*)surface;
    512 	pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_fmask, NULL);
    513 	pipe_resource_reference((struct pipe_resource**)&surf->cb_buffer_cmask, NULL);
    514 	pipe_resource_reference(&surface->texture, NULL);
    515 	FREE(surface);
    516 }
    517 
    518 struct pipe_resource *r600_texture_from_handle(struct pipe_screen *screen,
    519 					       const struct pipe_resource *templ,
    520 					       struct winsys_handle *whandle)
    521 {
    522 	struct r600_screen *rscreen = (struct r600_screen*)screen;
    523 	struct pb_buffer *buf = NULL;
    524 	unsigned stride = 0;
    525 	unsigned array_mode = 0;
    526 	enum radeon_bo_layout micro, macro;
    527 	struct radeon_surface surface;
    528 	int r;
    529 
    530 	/* Support only 2D textures without mipmaps */
    531 	if ((templ->target != PIPE_TEXTURE_2D && templ->target != PIPE_TEXTURE_RECT) ||
    532 	      templ->depth0 != 1 || templ->last_level != 0)
    533 		return NULL;
    534 
    535 	buf = rscreen->ws->buffer_from_handle(rscreen->ws, whandle, &stride);
    536 	if (!buf)
    537 		return NULL;
    538 
    539 	rscreen->ws->buffer_get_tiling(buf, &micro, &macro,
    540 				       &surface.bankw, &surface.bankh,
    541 				       &surface.tile_split,
    542 				       &surface.stencil_tile_split,
    543 				       &surface.mtilea);
    544 
    545 	if (macro == RADEON_LAYOUT_TILED)
    546 		array_mode = V_0280A0_ARRAY_2D_TILED_THIN1;
    547 	else if (micro == RADEON_LAYOUT_TILED)
    548 		array_mode = V_0280A0_ARRAY_1D_TILED_THIN1;
    549 	else
    550 		array_mode = 0;
    551 
    552 	r = r600_init_surface(rscreen, &surface, templ, array_mode, false, false);
    553 	if (r) {
    554 		return NULL;
    555 	}
    556 	return (struct pipe_resource *)r600_texture_create_object(screen, templ,
    557 								  stride, buf, FALSE, &surface);
    558 }
    559 
    560 bool r600_init_flushed_depth_texture(struct pipe_context *ctx,
    561 				     struct pipe_resource *texture,
    562 				     struct r600_texture **staging)
    563 {
    564 	struct r600_texture *rtex = (struct r600_texture*)texture;
    565 	struct pipe_resource resource;
    566 	struct r600_texture **flushed_depth_texture = staging ?
    567 			staging : &rtex->flushed_depth_texture;
    568 
    569 	if (!staging && rtex->flushed_depth_texture)
    570 		return true; /* it's ready */
    571 
    572 	resource.target = texture->target;
    573 	resource.format = texture->format;
    574 	resource.width0 = texture->width0;
    575 	resource.height0 = texture->height0;
    576 	resource.depth0 = texture->depth0;
    577 	resource.array_size = texture->array_size;
    578 	resource.last_level = texture->last_level;
    579 	resource.nr_samples = texture->nr_samples;
    580 	resource.usage = staging ? PIPE_USAGE_STAGING : PIPE_USAGE_STATIC;
    581 	resource.bind = texture->bind & ~PIPE_BIND_DEPTH_STENCIL;
    582 	resource.flags = texture->flags | R600_RESOURCE_FLAG_FLUSHED_DEPTH;
    583 
    584 	if (staging)
    585 		resource.flags |= R600_RESOURCE_FLAG_TRANSFER;
    586 
    587 	*flushed_depth_texture = (struct r600_texture *)ctx->screen->resource_create(ctx->screen, &resource);
    588 	if (*flushed_depth_texture == NULL) {
    589 		R600_ERR("failed to create temporary texture to hold flushed depth\n");
    590 		return false;
    591 	}
    592 
    593 	(*flushed_depth_texture)->is_flushing_texture = TRUE;
    594 	return true;
    595 }
    596 
    597 struct pipe_transfer* r600_texture_get_transfer(struct pipe_context *ctx,
    598 						struct pipe_resource *texture,
    599 						unsigned level,
    600 						unsigned usage,
    601 						const struct pipe_box *box)
    602 {
    603 	struct r600_context *rctx = (struct r600_context*)ctx;
    604 	struct r600_texture *rtex = (struct r600_texture*)texture;
    605 	struct pipe_resource resource;
    606 	struct r600_transfer *trans;
    607 	boolean use_staging_texture = FALSE;
    608 
    609 	/* We cannot map a tiled texture directly because the data is
    610 	 * in a different order, therefore we do detiling using a blit.
    611 	 *
    612 	 * Also, use a temporary in GTT memory for read transfers, as
    613 	 * the CPU is much happier reading out of cached system memory
    614 	 * than uncached VRAM.
    615 	 */
    616 	if (R600_TEX_IS_TILED(rtex, level)) {
    617 		use_staging_texture = TRUE;
    618 	}
    619 
    620 	/* Use a staging texture for uploads if the underlying BO is busy. */
    621 	if (!(usage & PIPE_TRANSFER_READ) &&
    622 	    (rctx->ws->cs_is_buffer_referenced(rctx->cs, rtex->resource.cs_buf, RADEON_USAGE_READWRITE) ||
    623 	     rctx->ws->buffer_is_busy(rtex->resource.buf, RADEON_USAGE_READWRITE))) {
    624 		use_staging_texture = TRUE;
    625 	}
    626 
    627 	if (texture->flags & R600_RESOURCE_FLAG_TRANSFER) {
    628 		use_staging_texture = FALSE;
    629 	}
    630 
    631 	if (use_staging_texture && (usage & PIPE_TRANSFER_MAP_DIRECTLY)) {
    632 		return NULL;
    633 	}
    634 
    635 	trans = CALLOC_STRUCT(r600_transfer);
    636 	if (trans == NULL)
    637 		return NULL;
    638 	pipe_resource_reference(&trans->transfer.resource, texture);
    639 	trans->transfer.level = level;
    640 	trans->transfer.usage = usage;
    641 	trans->transfer.box = *box;
    642 	if (rtex->is_depth) {
    643 		/* XXX: only readback the rectangle which is being mapped?
    644 		*/
    645 		/* XXX: when discard is true, no need to read back from depth texture
    646 		*/
    647 		struct r600_texture *staging_depth;
    648 
    649 		if (!r600_init_flushed_depth_texture(ctx, texture, &staging_depth)) {
    650 			R600_ERR("failed to create temporary texture to hold untiled copy\n");
    651 			pipe_resource_reference(&trans->transfer.resource, NULL);
    652 			FREE(trans);
    653 			return NULL;
    654 		}
    655 
    656 		r600_blit_decompress_depth(ctx, rtex, staging_depth,
    657 					   level, level,
    658 					   box->z, box->z + box->depth - 1,
    659 					   0, 0);
    660 
    661 		trans->transfer.stride = staging_depth->surface.level[level].pitch_bytes;
    662 		trans->offset = r600_texture_get_offset(staging_depth, level, box->z);
    663 		trans->staging = (struct r600_resource*)staging_depth;
    664 		return &trans->transfer;
    665 	} else if (use_staging_texture) {
    666 		resource.target = PIPE_TEXTURE_2D;
    667 		resource.format = texture->format;
    668 		resource.width0 = box->width;
    669 		resource.height0 = box->height;
    670 		resource.depth0 = 1;
    671 		resource.array_size = 1;
    672 		resource.last_level = 0;
    673 		resource.nr_samples = 0;
    674 		resource.usage = PIPE_USAGE_STAGING;
    675 		resource.bind = 0;
    676 		resource.flags = R600_RESOURCE_FLAG_TRANSFER;
    677 		/* For texture reading, the temporary (detiled) texture is used as
    678 		 * a render target when blitting from a tiled texture. */
    679 		if (usage & PIPE_TRANSFER_READ) {
    680 			resource.bind |= PIPE_BIND_RENDER_TARGET;
    681 		}
    682 		/* For texture writing, the temporary texture is used as a sampler
    683 		 * when blitting into a tiled texture. */
    684 		if (usage & PIPE_TRANSFER_WRITE) {
    685 			resource.bind |= PIPE_BIND_SAMPLER_VIEW;
    686 		}
    687 		/* Create the temporary texture. */
    688 		trans->staging = (struct r600_resource*)ctx->screen->resource_create(ctx->screen, &resource);
    689 		if (trans->staging == NULL) {
    690 			R600_ERR("failed to create temporary texture to hold untiled copy\n");
    691 			pipe_resource_reference(&trans->transfer.resource, NULL);
    692 			FREE(trans);
    693 			return NULL;
    694 		}
    695 
    696 		trans->transfer.stride =
    697 			((struct r600_texture *)trans->staging)->surface.level[0].pitch_bytes;
    698 		if (usage & PIPE_TRANSFER_READ) {
    699 			r600_copy_to_staging_texture(ctx, trans);
    700 			/* Always referenced in the blit. */
    701 			r600_flush(ctx, NULL, 0);
    702 		}
    703 		return &trans->transfer;
    704 	}
    705 	trans->transfer.stride = rtex->surface.level[level].pitch_bytes;
    706 	trans->transfer.layer_stride = rtex->surface.level[level].slice_size;
    707 	trans->offset = r600_texture_get_offset(rtex, level, box->z);
    708 	return &trans->transfer;
    709 }
    710 
    711 void r600_texture_transfer_destroy(struct pipe_context *ctx,
    712 				   struct pipe_transfer *transfer)
    713 {
    714 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
    715 	struct pipe_resource *texture = transfer->resource;
    716 	struct r600_texture *rtex = (struct r600_texture*)texture;
    717 
    718 	if ((transfer->usage & PIPE_TRANSFER_WRITE) && rtransfer->staging) {
    719 		if (rtex->is_depth) {
    720 			ctx->resource_copy_region(ctx, texture, transfer->level,
    721 						  transfer->box.x, transfer->box.y, transfer->box.z,
    722 						  &rtransfer->staging->b.b, transfer->level,
    723 						  &transfer->box);
    724 		} else {
    725 			r600_copy_from_staging_texture(ctx, rtransfer);
    726 		}
    727 	}
    728 
    729 	if (rtransfer->staging)
    730 		pipe_resource_reference((struct pipe_resource**)&rtransfer->staging, NULL);
    731 
    732 	pipe_resource_reference(&transfer->resource, NULL);
    733 	FREE(transfer);
    734 }
    735 
    736 void* r600_texture_transfer_map(struct pipe_context *ctx,
    737 				struct pipe_transfer* transfer)
    738 {
    739 	struct r600_context *rctx = (struct r600_context *)ctx;
    740 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
    741 	struct radeon_winsys_cs_handle *buf;
    742 	struct r600_texture *rtex =
    743 			(struct r600_texture*)transfer->resource;
    744 	enum pipe_format format = transfer->resource->format;
    745 	unsigned offset = 0;
    746 	char *map;
    747 
    748 	if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) {
    749 		return r600_compute_global_transfer_map(ctx, transfer);
    750 	}
    751 
    752 	if (rtransfer->staging) {
    753 		buf = ((struct r600_resource *)rtransfer->staging)->cs_buf;
    754 	} else {
    755 		buf = ((struct r600_resource *)transfer->resource)->cs_buf;
    756 	}
    757 
    758 	if (rtex->is_depth || !rtransfer->staging)
    759 		offset = rtransfer->offset +
    760 			transfer->box.y / util_format_get_blockheight(format) * transfer->stride +
    761 			transfer->box.x / util_format_get_blockwidth(format) * util_format_get_blocksize(format);
    762 
    763 	if (!(map = rctx->ws->buffer_map(buf, rctx->cs, transfer->usage))) {
    764 		return NULL;
    765 	}
    766 
    767 	return map + offset;
    768 }
    769 
    770 void r600_texture_transfer_unmap(struct pipe_context *ctx,
    771 				 struct pipe_transfer* transfer)
    772 {
    773 	struct r600_transfer *rtransfer = (struct r600_transfer*)transfer;
    774 	struct r600_context *rctx = (struct r600_context*)ctx;
    775 	struct radeon_winsys_cs_handle *buf;
    776 
    777 	if ((transfer->resource->bind & PIPE_BIND_GLOBAL) && transfer->resource->target == PIPE_BUFFER) {
    778 		return r600_compute_global_transfer_unmap(ctx, transfer);
    779 	}
    780 
    781 	if (rtransfer->staging) {
    782 		buf = ((struct r600_resource *)rtransfer->staging)->cs_buf;
    783 	} else {
    784 		buf = ((struct r600_resource *)transfer->resource)->cs_buf;
    785 	}
    786 	rctx->ws->buffer_unmap(buf);
    787 }
    788 
    789 void r600_init_surface_functions(struct r600_context *r600)
    790 {
    791 	r600->context.create_surface = r600_create_surface;
    792 	r600->context.surface_destroy = r600_surface_destroy;
    793 }
    794 
    795 static unsigned r600_get_swizzle_combined(const unsigned char *swizzle_format,
    796 		const unsigned char *swizzle_view)
    797 {
    798 	unsigned i;
    799 	unsigned char swizzle[4];
    800 	unsigned result = 0;
    801 	const uint32_t swizzle_shift[4] = {
    802 		16, 19, 22, 25,
    803 	};
    804 	const uint32_t swizzle_bit[4] = {
    805 		0, 1, 2, 3,
    806 	};
    807 
    808 	if (swizzle_view) {
    809 		util_format_compose_swizzles(swizzle_format, swizzle_view, swizzle);
    810 	} else {
    811 		memcpy(swizzle, swizzle_format, 4);
    812 	}
    813 
    814 	/* Get swizzle. */
    815 	for (i = 0; i < 4; i++) {
    816 		switch (swizzle[i]) {
    817 		case UTIL_FORMAT_SWIZZLE_Y:
    818 			result |= swizzle_bit[1] << swizzle_shift[i];
    819 			break;
    820 		case UTIL_FORMAT_SWIZZLE_Z:
    821 			result |= swizzle_bit[2] << swizzle_shift[i];
    822 			break;
    823 		case UTIL_FORMAT_SWIZZLE_W:
    824 			result |= swizzle_bit[3] << swizzle_shift[i];
    825 			break;
    826 		case UTIL_FORMAT_SWIZZLE_0:
    827 			result |= V_038010_SQ_SEL_0 << swizzle_shift[i];
    828 			break;
    829 		case UTIL_FORMAT_SWIZZLE_1:
    830 			result |= V_038010_SQ_SEL_1 << swizzle_shift[i];
    831 			break;
    832 		default: /* UTIL_FORMAT_SWIZZLE_X */
    833 			result |= swizzle_bit[0] << swizzle_shift[i];
    834 		}
    835 	}
    836 	return result;
    837 }
    838 
    839 /* texture format translate */
    840 uint32_t r600_translate_texformat(struct pipe_screen *screen,
    841 				  enum pipe_format format,
    842 				  const unsigned char *swizzle_view,
    843 				  uint32_t *word4_p, uint32_t *yuv_format_p)
    844 {
    845 	uint32_t result = 0, word4 = 0, yuv_format = 0;
    846 	const struct util_format_description *desc;
    847 	boolean uniform = TRUE;
    848 	static int r600_enable_s3tc = -1;
    849 	bool is_srgb_valid = FALSE;
    850 
    851 	int i;
    852 	const uint32_t sign_bit[4] = {
    853 		S_038010_FORMAT_COMP_X(V_038010_SQ_FORMAT_COMP_SIGNED),
    854 		S_038010_FORMAT_COMP_Y(V_038010_SQ_FORMAT_COMP_SIGNED),
    855 		S_038010_FORMAT_COMP_Z(V_038010_SQ_FORMAT_COMP_SIGNED),
    856 		S_038010_FORMAT_COMP_W(V_038010_SQ_FORMAT_COMP_SIGNED)
    857 	};
    858 	desc = util_format_description(format);
    859 
    860 	word4 |= r600_get_swizzle_combined(desc->swizzle, swizzle_view);
    861 
    862 	/* Colorspace (return non-RGB formats directly). */
    863 	switch (desc->colorspace) {
    864 		/* Depth stencil formats */
    865 	case UTIL_FORMAT_COLORSPACE_ZS:
    866 		switch (format) {
    867 		case PIPE_FORMAT_Z16_UNORM:
    868 			result = FMT_16;
    869 			goto out_word4;
    870 		case PIPE_FORMAT_X24S8_UINT:
    871 			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
    872 		case PIPE_FORMAT_Z24X8_UNORM:
    873 		case PIPE_FORMAT_Z24_UNORM_S8_UINT:
    874 			result = FMT_8_24;
    875 			goto out_word4;
    876 		case PIPE_FORMAT_S8X24_UINT:
    877 			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
    878 		case PIPE_FORMAT_X8Z24_UNORM:
    879 		case PIPE_FORMAT_S8_UINT_Z24_UNORM:
    880 			result = FMT_24_8;
    881 			goto out_word4;
    882 		case PIPE_FORMAT_S8_UINT:
    883 			result = FMT_8;
    884 			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
    885 			goto out_word4;
    886 		case PIPE_FORMAT_Z32_FLOAT:
    887 			result = FMT_32_FLOAT;
    888 			goto out_word4;
    889 		case PIPE_FORMAT_X32_S8X24_UINT:
    890 			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
    891 		case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT:
    892 			result = FMT_X24_8_32_FLOAT;
    893 			goto out_word4;
    894 		default:
    895 			goto out_unknown;
    896 		}
    897 
    898 	case UTIL_FORMAT_COLORSPACE_YUV:
    899 		yuv_format |= (1 << 30);
    900 		switch (format) {
    901 		case PIPE_FORMAT_UYVY:
    902 		case PIPE_FORMAT_YUYV:
    903 		default:
    904 			break;
    905 		}
    906 		goto out_unknown; /* XXX */
    907 
    908 	case UTIL_FORMAT_COLORSPACE_SRGB:
    909 		word4 |= S_038010_FORCE_DEGAMMA(1);
    910 		break;
    911 
    912 	default:
    913 		break;
    914 	}
    915 
    916 	if (r600_enable_s3tc == -1) {
    917 		struct r600_screen *rscreen = (struct r600_screen *)screen;
    918 		if (rscreen->info.drm_minor >= 9)
    919 			r600_enable_s3tc = 1;
    920 		else
    921 			r600_enable_s3tc = debug_get_bool_option("R600_ENABLE_S3TC", FALSE);
    922 	}
    923 
    924 	if (desc->layout == UTIL_FORMAT_LAYOUT_RGTC) {
    925 		if (!r600_enable_s3tc)
    926 			goto out_unknown;
    927 
    928 		switch (format) {
    929 		case PIPE_FORMAT_RGTC1_SNORM:
    930 		case PIPE_FORMAT_LATC1_SNORM:
    931 			word4 |= sign_bit[0];
    932 		case PIPE_FORMAT_RGTC1_UNORM:
    933 		case PIPE_FORMAT_LATC1_UNORM:
    934 			result = FMT_BC4;
    935 			goto out_word4;
    936 		case PIPE_FORMAT_RGTC2_SNORM:
    937 		case PIPE_FORMAT_LATC2_SNORM:
    938 			word4 |= sign_bit[0] | sign_bit[1];
    939 		case PIPE_FORMAT_RGTC2_UNORM:
    940 		case PIPE_FORMAT_LATC2_UNORM:
    941 			result = FMT_BC5;
    942 			goto out_word4;
    943 		default:
    944 			goto out_unknown;
    945 		}
    946 	}
    947 
    948 	if (desc->layout == UTIL_FORMAT_LAYOUT_S3TC) {
    949 
    950 		if (!r600_enable_s3tc)
    951 			goto out_unknown;
    952 
    953 		if (!util_format_s3tc_enabled) {
    954 			goto out_unknown;
    955 		}
    956 
    957 		switch (format) {
    958 		case PIPE_FORMAT_DXT1_RGB:
    959 		case PIPE_FORMAT_DXT1_RGBA:
    960 		case PIPE_FORMAT_DXT1_SRGB:
    961 		case PIPE_FORMAT_DXT1_SRGBA:
    962 			result = FMT_BC1;
    963 			is_srgb_valid = TRUE;
    964 			goto out_word4;
    965 		case PIPE_FORMAT_DXT3_RGBA:
    966 		case PIPE_FORMAT_DXT3_SRGBA:
    967 			result = FMT_BC2;
    968 			is_srgb_valid = TRUE;
    969 			goto out_word4;
    970 		case PIPE_FORMAT_DXT5_RGBA:
    971 		case PIPE_FORMAT_DXT5_SRGBA:
    972 			result = FMT_BC3;
    973 			is_srgb_valid = TRUE;
    974 			goto out_word4;
    975 		default:
    976 			goto out_unknown;
    977 		}
    978 	}
    979 
    980 	if (desc->layout == UTIL_FORMAT_LAYOUT_SUBSAMPLED) {
    981 		switch (format) {
    982 		case PIPE_FORMAT_R8G8_B8G8_UNORM:
    983 		case PIPE_FORMAT_G8R8_B8R8_UNORM:
    984 			result = FMT_GB_GR;
    985 			goto out_word4;
    986 		case PIPE_FORMAT_G8R8_G8B8_UNORM:
    987 		case PIPE_FORMAT_R8G8_R8B8_UNORM:
    988 			result = FMT_BG_RG;
    989 			goto out_word4;
    990 		default:
    991 			goto out_unknown;
    992 		}
    993 	}
    994 
    995 	if (format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
    996 		result = FMT_5_9_9_9_SHAREDEXP;
    997 		goto out_word4;
    998 	} else if (format == PIPE_FORMAT_R11G11B10_FLOAT) {
    999 		result = FMT_10_11_11_FLOAT;
   1000 		goto out_word4;
   1001 	}
   1002 
   1003 
   1004 	for (i = 0; i < desc->nr_channels; i++) {
   1005 		if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) {
   1006 			word4 |= sign_bit[i];
   1007 		}
   1008 	}
   1009 
   1010 	/* R8G8Bx_SNORM - XXX CxV8U8 */
   1011 
   1012 	/* See whether the components are of the same size. */
   1013 	for (i = 1; i < desc->nr_channels; i++) {
   1014 		uniform = uniform && desc->channel[0].size == desc->channel[i].size;
   1015 	}
   1016 
   1017 	/* Non-uniform formats. */
   1018 	if (!uniform) {
   1019 		if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB &&
   1020 		    desc->channel[0].pure_integer)
   1021 			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
   1022 		switch(desc->nr_channels) {
   1023 		case 3:
   1024 			if (desc->channel[0].size == 5 &&
   1025 			    desc->channel[1].size == 6 &&
   1026 			    desc->channel[2].size == 5) {
   1027 				result = FMT_5_6_5;
   1028 				goto out_word4;
   1029 			}
   1030 			goto out_unknown;
   1031 		case 4:
   1032 			if (desc->channel[0].size == 5 &&
   1033 			    desc->channel[1].size == 5 &&
   1034 			    desc->channel[2].size == 5 &&
   1035 			    desc->channel[3].size == 1) {
   1036 				result = FMT_1_5_5_5;
   1037 				goto out_word4;
   1038 			}
   1039 			if (desc->channel[0].size == 10 &&
   1040 			    desc->channel[1].size == 10 &&
   1041 			    desc->channel[2].size == 10 &&
   1042 			    desc->channel[3].size == 2) {
   1043 				result = FMT_2_10_10_10;
   1044 				goto out_word4;
   1045 			}
   1046 			goto out_unknown;
   1047 		}
   1048 		goto out_unknown;
   1049 	}
   1050 
   1051 	/* Find the first non-VOID channel. */
   1052 	for (i = 0; i < 4; i++) {
   1053 		if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) {
   1054 			break;
   1055 		}
   1056 	}
   1057 
   1058 	if (i == 4)
   1059 		goto out_unknown;
   1060 
   1061 	/* uniform formats */
   1062 	switch (desc->channel[i].type) {
   1063 	case UTIL_FORMAT_TYPE_UNSIGNED:
   1064 	case UTIL_FORMAT_TYPE_SIGNED:
   1065 #if 0
   1066 		if (!desc->channel[i].normalized &&
   1067 		    desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB) {
   1068 			goto out_unknown;
   1069 		}
   1070 #endif
   1071 		if (desc->colorspace != UTIL_FORMAT_COLORSPACE_SRGB &&
   1072 		    desc->channel[i].pure_integer)
   1073 			word4 |= S_038010_NUM_FORMAT_ALL(V_038010_SQ_NUM_FORMAT_INT);
   1074 
   1075 		switch (desc->channel[i].size) {
   1076 		case 4:
   1077 			switch (desc->nr_channels) {
   1078 			case 2:
   1079 				result = FMT_4_4;
   1080 				goto out_word4;
   1081 			case 4:
   1082 				result = FMT_4_4_4_4;
   1083 				goto out_word4;
   1084 			}
   1085 			goto out_unknown;
   1086 		case 8:
   1087 			switch (desc->nr_channels) {
   1088 			case 1:
   1089 				result = FMT_8;
   1090 				goto out_word4;
   1091 			case 2:
   1092 				result = FMT_8_8;
   1093 				goto out_word4;
   1094 			case 4:
   1095 				result = FMT_8_8_8_8;
   1096 				is_srgb_valid = TRUE;
   1097 				goto out_word4;
   1098 			}
   1099 			goto out_unknown;
   1100 		case 16:
   1101 			switch (desc->nr_channels) {
   1102 			case 1:
   1103 				result = FMT_16;
   1104 				goto out_word4;
   1105 			case 2:
   1106 				result = FMT_16_16;
   1107 				goto out_word4;
   1108 			case 4:
   1109 				result = FMT_16_16_16_16;
   1110 				goto out_word4;
   1111 			}
   1112 			goto out_unknown;
   1113 		case 32:
   1114 			switch (desc->nr_channels) {
   1115 			case 1:
   1116 				result = FMT_32;
   1117 				goto out_word4;
   1118 			case 2:
   1119 				result = FMT_32_32;
   1120 				goto out_word4;
   1121 			case 4:
   1122 				result = FMT_32_32_32_32;
   1123 				goto out_word4;
   1124 			}
   1125 		}
   1126 		goto out_unknown;
   1127 
   1128 	case UTIL_FORMAT_TYPE_FLOAT:
   1129 		switch (desc->channel[i].size) {
   1130 		case 16:
   1131 			switch (desc->nr_channels) {
   1132 			case 1:
   1133 				result = FMT_16_FLOAT;
   1134 				goto out_word4;
   1135 			case 2:
   1136 				result = FMT_16_16_FLOAT;
   1137 				goto out_word4;
   1138 			case 4:
   1139 				result = FMT_16_16_16_16_FLOAT;
   1140 				goto out_word4;
   1141 			}
   1142 			goto out_unknown;
   1143 		case 32:
   1144 			switch (desc->nr_channels) {
   1145 			case 1:
   1146 				result = FMT_32_FLOAT;
   1147 				goto out_word4;
   1148 			case 2:
   1149 				result = FMT_32_32_FLOAT;
   1150 				goto out_word4;
   1151 			case 4:
   1152 				result = FMT_32_32_32_32_FLOAT;
   1153 				goto out_word4;
   1154 			}
   1155 		}
   1156 		goto out_unknown;
   1157 	}
   1158 
   1159 out_word4:
   1160 
   1161 	if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB && !is_srgb_valid)
   1162 		return ~0;
   1163 	if (word4_p)
   1164 		*word4_p = word4;
   1165 	if (yuv_format_p)
   1166 		*yuv_format_p = yuv_format;
   1167 	return result;
   1168 out_unknown:
   1169 	/* R600_ERR("Unable to handle texformat %d %s\n", format, util_format_name(format)); */
   1170 	return ~0;
   1171 }
   1172