Home | History | Annotate | Download | only in freedreno
      1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
      2 
      3 /*
      4  * Copyright (C) 2012 Rob Clark <robclark (at) freedesktop.org>
      5  *
      6  * Permission is hereby granted, free of charge, to any person obtaining a
      7  * copy of this software and associated documentation files (the "Software"),
      8  * to deal in the Software without restriction, including without limitation
      9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
     10  * and/or sell copies of the Software, and to permit persons to whom the
     11  * Software is furnished to do so, subject to the following conditions:
     12  *
     13  * The above copyright notice and this permission notice (including the next
     14  * paragraph) shall be included in all copies or substantial portions of the
     15  * Software.
     16  *
     17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
     20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
     21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
     22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
     23  * SOFTWARE.
     24  *
     25  * Authors:
     26  *    Rob Clark <robclark (at) freedesktop.org>
     27  */
     28 
     29 #include "pipe/p_state.h"
     30 #include "util/u_dual_blend.h"
     31 #include "util/u_string.h"
     32 #include "util/u_memory.h"
     33 #include "util/u_helpers.h"
     34 
     35 #include "freedreno_state.h"
     36 #include "freedreno_context.h"
     37 #include "freedreno_resource.h"
     38 #include "freedreno_texture.h"
     39 #include "freedreno_gmem.h"
     40 #include "freedreno_query_hw.h"
     41 #include "freedreno_util.h"
     42 
     43 /* All the generic state handling.. In case of CSO's that are specific
     44  * to the GPU version, when the bind and the delete are common they can
     45  * go in here.
     46  */
     47 
     48 static void
     49 fd_set_blend_color(struct pipe_context *pctx,
     50 		const struct pipe_blend_color *blend_color)
     51 {
     52 	struct fd_context *ctx = fd_context(pctx);
     53 	ctx->blend_color = *blend_color;
     54 	ctx->dirty |= FD_DIRTY_BLEND_COLOR;
     55 }
     56 
     57 static void
     58 fd_set_stencil_ref(struct pipe_context *pctx,
     59 		const struct pipe_stencil_ref *stencil_ref)
     60 {
     61 	struct fd_context *ctx = fd_context(pctx);
     62 	ctx->stencil_ref =* stencil_ref;
     63 	ctx->dirty |= FD_DIRTY_STENCIL_REF;
     64 }
     65 
     66 static void
     67 fd_set_clip_state(struct pipe_context *pctx,
     68 		const struct pipe_clip_state *clip)
     69 {
     70 	struct fd_context *ctx = fd_context(pctx);
     71 	ctx->ucp = *clip;
     72 	ctx->dirty |= FD_DIRTY_UCP;
     73 }
     74 
     75 static void
     76 fd_set_sample_mask(struct pipe_context *pctx, unsigned sample_mask)
     77 {
     78 	struct fd_context *ctx = fd_context(pctx);
     79 	ctx->sample_mask = (uint16_t)sample_mask;
     80 	ctx->dirty |= FD_DIRTY_SAMPLE_MASK;
     81 }
     82 
     83 /* notes from calim on #dri-devel:
     84  * index==0 will be non-UBO (ie. glUniformXYZ()) all packed together padded
     85  * out to vec4's
     86  * I should be able to consider that I own the user_ptr until the next
     87  * set_constant_buffer() call, at which point I don't really care about the
     88  * previous values.
     89  * index>0 will be UBO's.. well, I'll worry about that later
     90  */
     91 static void
     92 fd_set_constant_buffer(struct pipe_context *pctx, uint shader, uint index,
     93 		const struct pipe_constant_buffer *cb)
     94 {
     95 	struct fd_context *ctx = fd_context(pctx);
     96 	struct fd_constbuf_stateobj *so = &ctx->constbuf[shader];
     97 
     98 	util_copy_constant_buffer(&so->cb[index], cb);
     99 
    100 	/* Note that the state tracker can unbind constant buffers by
    101 	 * passing NULL here.
    102 	 */
    103 	if (unlikely(!cb)) {
    104 		so->enabled_mask &= ~(1 << index);
    105 		so->dirty_mask &= ~(1 << index);
    106 		return;
    107 	}
    108 
    109 	so->enabled_mask |= 1 << index;
    110 	so->dirty_mask |= 1 << index;
    111 	ctx->dirty |= FD_DIRTY_CONSTBUF;
    112 }
    113 
    114 static void
    115 fd_set_framebuffer_state(struct pipe_context *pctx,
    116 		const struct pipe_framebuffer_state *framebuffer)
    117 {
    118 	struct fd_context *ctx = fd_context(pctx);
    119 	struct pipe_framebuffer_state *cso;
    120 
    121 	if (ctx->screen->reorder) {
    122 		struct fd_batch *batch, *old_batch = NULL;
    123 
    124 		fd_batch_reference(&old_batch, ctx->batch);
    125 
    126 		if (likely(old_batch))
    127 			fd_hw_query_set_stage(old_batch, old_batch->draw, FD_STAGE_NULL);
    128 
    129 		batch = fd_batch_from_fb(&ctx->screen->batch_cache, ctx, framebuffer);
    130 		fd_batch_reference(&ctx->batch, NULL);
    131 		fd_reset_wfi(batch);
    132 		ctx->batch = batch;
    133 		ctx->dirty = ~0;
    134 
    135 		if (old_batch && old_batch->blit && !old_batch->back_blit) {
    136 			/* for blits, there is not really much point in hanging on
    137 			 * to the uncommitted batch (ie. you probably don't blit
    138 			 * multiple times to the same surface), so we might as
    139 			 * well go ahead and flush this one:
    140 			 */
    141 			fd_batch_flush(old_batch, false);
    142 		}
    143 
    144 		fd_batch_reference(&old_batch, NULL);
    145 	} else {
    146 		DBG("%d: cbufs[0]=%p, zsbuf=%p", ctx->batch->needs_flush,
    147 				framebuffer->cbufs[0], framebuffer->zsbuf);
    148 		fd_batch_flush(ctx->batch, false);
    149 	}
    150 
    151 	cso = &ctx->batch->framebuffer;
    152 
    153 	util_copy_framebuffer_state(cso, framebuffer);
    154 
    155 	ctx->dirty |= FD_DIRTY_FRAMEBUFFER;
    156 
    157 	ctx->disabled_scissor.minx = 0;
    158 	ctx->disabled_scissor.miny = 0;
    159 	ctx->disabled_scissor.maxx = cso->width;
    160 	ctx->disabled_scissor.maxy = cso->height;
    161 
    162 	ctx->dirty |= FD_DIRTY_SCISSOR;
    163 }
    164 
    165 static void
    166 fd_set_polygon_stipple(struct pipe_context *pctx,
    167 		const struct pipe_poly_stipple *stipple)
    168 {
    169 	struct fd_context *ctx = fd_context(pctx);
    170 	ctx->stipple = *stipple;
    171 	ctx->dirty |= FD_DIRTY_STIPPLE;
    172 }
    173 
    174 static void
    175 fd_set_scissor_states(struct pipe_context *pctx,
    176 		unsigned start_slot,
    177 		unsigned num_scissors,
    178 		const struct pipe_scissor_state *scissor)
    179 {
    180 	struct fd_context *ctx = fd_context(pctx);
    181 
    182 	ctx->scissor = *scissor;
    183 	ctx->dirty |= FD_DIRTY_SCISSOR;
    184 }
    185 
    186 static void
    187 fd_set_viewport_states(struct pipe_context *pctx,
    188 		unsigned start_slot,
    189 		unsigned num_viewports,
    190 		const struct pipe_viewport_state *viewport)
    191 {
    192 	struct fd_context *ctx = fd_context(pctx);
    193 	ctx->viewport = *viewport;
    194 	ctx->dirty |= FD_DIRTY_VIEWPORT;
    195 }
    196 
    197 static void
    198 fd_set_vertex_buffers(struct pipe_context *pctx,
    199 		unsigned start_slot, unsigned count,
    200 		const struct pipe_vertex_buffer *vb)
    201 {
    202 	struct fd_context *ctx = fd_context(pctx);
    203 	struct fd_vertexbuf_stateobj *so = &ctx->vtx.vertexbuf;
    204 	int i;
    205 
    206 	/* on a2xx, pitch is encoded in the vtx fetch instruction, so
    207 	 * we need to mark VTXSTATE as dirty as well to trigger patching
    208 	 * and re-emitting the vtx shader:
    209 	 */
    210 	if (ctx->screen->gpu_id < 300) {
    211 		for (i = 0; i < count; i++) {
    212 			bool new_enabled = vb && (vb[i].buffer || vb[i].user_buffer);
    213 			bool old_enabled = so->vb[i].buffer || so->vb[i].user_buffer;
    214 			uint32_t new_stride = vb ? vb[i].stride : 0;
    215 			uint32_t old_stride = so->vb[i].stride;
    216 			if ((new_enabled != old_enabled) || (new_stride != old_stride)) {
    217 				ctx->dirty |= FD_DIRTY_VTXSTATE;
    218 				break;
    219 			}
    220 		}
    221 	}
    222 
    223 	util_set_vertex_buffers_mask(so->vb, &so->enabled_mask, vb, start_slot, count);
    224 	so->count = util_last_bit(so->enabled_mask);
    225 
    226 	ctx->dirty |= FD_DIRTY_VTXBUF;
    227 }
    228 
    229 static void
    230 fd_set_index_buffer(struct pipe_context *pctx,
    231 		const struct pipe_index_buffer *ib)
    232 {
    233 	struct fd_context *ctx = fd_context(pctx);
    234 
    235 	if (ib) {
    236 		pipe_resource_reference(&ctx->indexbuf.buffer, ib->buffer);
    237 		ctx->indexbuf.index_size = ib->index_size;
    238 		ctx->indexbuf.offset = ib->offset;
    239 		ctx->indexbuf.user_buffer = ib->user_buffer;
    240 	} else {
    241 		pipe_resource_reference(&ctx->indexbuf.buffer, NULL);
    242 	}
    243 
    244 	ctx->dirty |= FD_DIRTY_INDEXBUF;
    245 }
    246 
    247 static void
    248 fd_blend_state_bind(struct pipe_context *pctx, void *hwcso)
    249 {
    250 	struct fd_context *ctx = fd_context(pctx);
    251 	struct pipe_blend_state *cso = hwcso;
    252 	bool old_is_dual = ctx->blend ?
    253 		ctx->blend->rt[0].blend_enable && util_blend_state_is_dual(ctx->blend, 0) :
    254 		false;
    255 	bool new_is_dual = cso ?
    256 		cso->rt[0].blend_enable && util_blend_state_is_dual(cso, 0) :
    257 		false;
    258 	ctx->blend = hwcso;
    259 	ctx->dirty |= FD_DIRTY_BLEND;
    260 	if (old_is_dual != new_is_dual)
    261 		ctx->dirty |= FD_DIRTY_BLEND_DUAL;
    262 }
    263 
    264 static void
    265 fd_blend_state_delete(struct pipe_context *pctx, void *hwcso)
    266 {
    267 	FREE(hwcso);
    268 }
    269 
    270 static void
    271 fd_rasterizer_state_bind(struct pipe_context *pctx, void *hwcso)
    272 {
    273 	struct fd_context *ctx = fd_context(pctx);
    274 	struct pipe_scissor_state *old_scissor = fd_context_get_scissor(ctx);
    275 
    276 	ctx->rasterizer = hwcso;
    277 	ctx->dirty |= FD_DIRTY_RASTERIZER;
    278 
    279 	/* if scissor enable bit changed we need to mark scissor
    280 	 * state as dirty as well:
    281 	 * NOTE: we can do a shallow compare, since we only care
    282 	 * if it changed to/from &ctx->disable_scissor
    283 	 */
    284 	if (old_scissor != fd_context_get_scissor(ctx))
    285 		ctx->dirty |= FD_DIRTY_SCISSOR;
    286 }
    287 
    288 static void
    289 fd_rasterizer_state_delete(struct pipe_context *pctx, void *hwcso)
    290 {
    291 	FREE(hwcso);
    292 }
    293 
    294 static void
    295 fd_zsa_state_bind(struct pipe_context *pctx, void *hwcso)
    296 {
    297 	struct fd_context *ctx = fd_context(pctx);
    298 	ctx->zsa = hwcso;
    299 	ctx->dirty |= FD_DIRTY_ZSA;
    300 }
    301 
    302 static void
    303 fd_zsa_state_delete(struct pipe_context *pctx, void *hwcso)
    304 {
    305 	FREE(hwcso);
    306 }
    307 
    308 static void *
    309 fd_vertex_state_create(struct pipe_context *pctx, unsigned num_elements,
    310 		const struct pipe_vertex_element *elements)
    311 {
    312 	struct fd_vertex_stateobj *so = CALLOC_STRUCT(fd_vertex_stateobj);
    313 
    314 	if (!so)
    315 		return NULL;
    316 
    317 	memcpy(so->pipe, elements, sizeof(*elements) * num_elements);
    318 	so->num_elements = num_elements;
    319 
    320 	return so;
    321 }
    322 
    323 static void
    324 fd_vertex_state_delete(struct pipe_context *pctx, void *hwcso)
    325 {
    326 	FREE(hwcso);
    327 }
    328 
    329 static void
    330 fd_vertex_state_bind(struct pipe_context *pctx, void *hwcso)
    331 {
    332 	struct fd_context *ctx = fd_context(pctx);
    333 	ctx->vtx.vtx = hwcso;
    334 	ctx->dirty |= FD_DIRTY_VTXSTATE;
    335 }
    336 
    337 static struct pipe_stream_output_target *
    338 fd_create_stream_output_target(struct pipe_context *pctx,
    339 		struct pipe_resource *prsc, unsigned buffer_offset,
    340 		unsigned buffer_size)
    341 {
    342 	struct pipe_stream_output_target *target;
    343 	struct fd_resource *rsc = fd_resource(prsc);
    344 
    345 	target = CALLOC_STRUCT(pipe_stream_output_target);
    346 	if (!target)
    347 		return NULL;
    348 
    349 	pipe_reference_init(&target->reference, 1);
    350 	pipe_resource_reference(&target->buffer, prsc);
    351 
    352 	target->context = pctx;
    353 	target->buffer_offset = buffer_offset;
    354 	target->buffer_size = buffer_size;
    355 
    356 	assert(rsc->base.b.target == PIPE_BUFFER);
    357 	util_range_add(&rsc->valid_buffer_range,
    358 		buffer_offset, buffer_offset + buffer_size);
    359 
    360 	return target;
    361 }
    362 
    363 static void
    364 fd_stream_output_target_destroy(struct pipe_context *pctx,
    365 		struct pipe_stream_output_target *target)
    366 {
    367 	pipe_resource_reference(&target->buffer, NULL);
    368 	FREE(target);
    369 }
    370 
    371 static void
    372 fd_set_stream_output_targets(struct pipe_context *pctx,
    373 		unsigned num_targets, struct pipe_stream_output_target **targets,
    374 		const unsigned *offsets)
    375 {
    376 	struct fd_context *ctx = fd_context(pctx);
    377 	struct fd_streamout_stateobj *so = &ctx->streamout;
    378 	unsigned i;
    379 
    380 	debug_assert(num_targets <= ARRAY_SIZE(so->targets));
    381 
    382 	for (i = 0; i < num_targets; i++) {
    383 		boolean changed = targets[i] != so->targets[i];
    384 		boolean append = (offsets[i] == (unsigned)-1);
    385 
    386 		if (!changed && append)
    387 			continue;
    388 
    389 		if (!append)
    390 			so->offsets[i] = offsets[i];
    391 
    392 		pipe_so_target_reference(&so->targets[i], targets[i]);
    393 	}
    394 
    395 	for (; i < so->num_targets; i++) {
    396 		pipe_so_target_reference(&so->targets[i], NULL);
    397 	}
    398 
    399 	so->num_targets = num_targets;
    400 
    401 	ctx->dirty |= FD_DIRTY_STREAMOUT;
    402 }
    403 
    404 void
    405 fd_state_init(struct pipe_context *pctx)
    406 {
    407 	pctx->set_blend_color = fd_set_blend_color;
    408 	pctx->set_stencil_ref = fd_set_stencil_ref;
    409 	pctx->set_clip_state = fd_set_clip_state;
    410 	pctx->set_sample_mask = fd_set_sample_mask;
    411 	pctx->set_constant_buffer = fd_set_constant_buffer;
    412 	pctx->set_framebuffer_state = fd_set_framebuffer_state;
    413 	pctx->set_polygon_stipple = fd_set_polygon_stipple;
    414 	pctx->set_scissor_states = fd_set_scissor_states;
    415 	pctx->set_viewport_states = fd_set_viewport_states;
    416 
    417 	pctx->set_vertex_buffers = fd_set_vertex_buffers;
    418 	pctx->set_index_buffer = fd_set_index_buffer;
    419 
    420 	pctx->bind_blend_state = fd_blend_state_bind;
    421 	pctx->delete_blend_state = fd_blend_state_delete;
    422 
    423 	pctx->bind_rasterizer_state = fd_rasterizer_state_bind;
    424 	pctx->delete_rasterizer_state = fd_rasterizer_state_delete;
    425 
    426 	pctx->bind_depth_stencil_alpha_state = fd_zsa_state_bind;
    427 	pctx->delete_depth_stencil_alpha_state = fd_zsa_state_delete;
    428 
    429 	pctx->create_vertex_elements_state = fd_vertex_state_create;
    430 	pctx->delete_vertex_elements_state = fd_vertex_state_delete;
    431 	pctx->bind_vertex_elements_state = fd_vertex_state_bind;
    432 
    433 	pctx->create_stream_output_target = fd_create_stream_output_target;
    434 	pctx->stream_output_target_destroy = fd_stream_output_target_destroy;
    435 	pctx->set_stream_output_targets = fd_set_stream_output_targets;
    436 }
    437