Home | History | Annotate | Download | only in radeon
      1 /**************************************************************************
      2 
      3 Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
      4 
      5 The Weather Channel (TM) funded Tungsten Graphics to develop the
      6 initial release of the Radeon 8500 driver under the XFree86 license.
      7 This notice must be preserved.
      8 
      9 Permission is hereby granted, free of charge, to any person obtaining
     10 a copy of this software and associated documentation files (the
     11 "Software"), to deal in the Software without restriction, including
     12 without limitation the rights to use, copy, modify, merge, publish,
     13 distribute, sublicense, and/or sell copies of the Software, and to
     14 permit persons to whom the Software is furnished to do so, subject to
     15 the following conditions:
     16 
     17 The above copyright notice and this permission notice (including the
     18 next paragraph) shall be included in all copies or substantial
     19 portions of the Software.
     20 
     21 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
     22 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
     23 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
     24 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
     25 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
     26 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
     27 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
     28 
     29 **************************************************************************/
     30 
     31 /*
     32  * Authors:
     33  *   Keith Whitwell <keith (at) tungstengraphics.com>
     34  */
     35 
     36 /*
     37    - Scissor implementation
     38    - buffer swap/copy ioctls
     39    - finish/flush
     40    - state emission
     41    - cmdbuffer management
     42 */
     43 
     44 #include <errno.h>
     45 #include "main/glheader.h"
     46 #include "main/imports.h"
     47 #include "main/context.h"
     48 #include "main/enums.h"
     49 #include "main/fbobject.h"
     50 #include "main/framebuffer.h"
     51 #include "main/renderbuffer.h"
     52 #include "drivers/common/meta.h"
     53 
     54 #include "radeon_common.h"
     55 #include "radeon_drm.h"
     56 #include "radeon_queryobj.h"
     57 
     58 /**
     59  * Enable verbose debug output for emit code.
     60  * 0 no output
     61  * 1 most output
     62  * 2 also print state alues
     63  */
     64 #define RADEON_CMDBUF         0
     65 
     66 /* =============================================================
     67  * Scissoring
     68  */
     69 
     70 /**
     71  * Update cliprects and scissors.
     72  */
     73 void radeonSetCliprects(radeonContextPtr radeon)
     74 {
     75 	__DRIdrawable *const drawable = radeon_get_drawable(radeon);
     76 	__DRIdrawable *const readable = radeon_get_readable(radeon);
     77 
     78 	if(drawable == NULL && readable == NULL)
     79 		return;
     80 
     81 	struct radeon_framebuffer *const draw_rfb = drawable->driverPrivate;
     82 	struct radeon_framebuffer *const read_rfb = readable->driverPrivate;
     83 
     84 	if ((draw_rfb->base.Width != drawable->w) ||
     85 	    (draw_rfb->base.Height != drawable->h)) {
     86 		_mesa_resize_framebuffer(radeon->glCtx, &draw_rfb->base,
     87 					 drawable->w, drawable->h);
     88 		draw_rfb->base.Initialized = GL_TRUE;
     89 	}
     90 
     91 	if (drawable != readable) {
     92 		if ((read_rfb->base.Width != readable->w) ||
     93 		    (read_rfb->base.Height != readable->h)) {
     94 			_mesa_resize_framebuffer(radeon->glCtx, &read_rfb->base,
     95 						 readable->w, readable->h);
     96 			read_rfb->base.Initialized = GL_TRUE;
     97 		}
     98 	}
     99 
    100 	if (radeon->state.scissor.enabled)
    101 		radeonUpdateScissor(radeon->glCtx);
    102 
    103 }
    104 
    105 
    106 
    107 void radeonUpdateScissor( struct gl_context *ctx )
    108 {
    109 	radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
    110 	GLint x = ctx->Scissor.X, y = ctx->Scissor.Y;
    111 	GLsizei w = ctx->Scissor.Width, h = ctx->Scissor.Height;
    112 	int x1, y1, x2, y2;
    113 	int min_x, min_y, max_x, max_y;
    114 
    115 	if (!ctx->DrawBuffer)
    116 	    return;
    117 	min_x = min_y = 0;
    118 	max_x = ctx->DrawBuffer->Width - 1;
    119 	max_y = ctx->DrawBuffer->Height - 1;
    120 
    121 	if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
    122 		x1 = x;
    123 		y1 = ctx->DrawBuffer->Height - (y + h);
    124 		x2 = x + w - 1;
    125 		y2 = y1 + h - 1;
    126 	} else {
    127 		x1 = x;
    128 		y1 = y;
    129 		x2 = x + w - 1;
    130 		y2 = y + h - 1;
    131 
    132 	}
    133 
    134 	rmesa->state.scissor.rect.x1 = CLAMP(x1,  min_x, max_x);
    135 	rmesa->state.scissor.rect.y1 = CLAMP(y1,  min_y, max_y);
    136 	rmesa->state.scissor.rect.x2 = CLAMP(x2,  min_x, max_x);
    137 	rmesa->state.scissor.rect.y2 = CLAMP(y2,  min_y, max_y);
    138 
    139 	if (rmesa->vtbl.update_scissor)
    140 	   rmesa->vtbl.update_scissor(ctx);
    141 }
    142 
    143 /* =============================================================
    144  * Scissoring
    145  */
    146 
    147 void radeonScissor(struct gl_context* ctx, GLint x, GLint y, GLsizei w, GLsizei h)
    148 {
    149 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    150 	if (ctx->Scissor.Enabled) {
    151 		/* We don't pipeline cliprect changes */
    152 		radeon_firevertices(radeon);
    153 		radeonUpdateScissor(ctx);
    154 	}
    155 }
    156 
    157 /* ================================================================
    158  * SwapBuffers with client-side throttling
    159  */
    160 
    161 uint32_t radeonGetAge(radeonContextPtr radeon)
    162 {
    163 	drm_radeon_getparam_t gp;
    164 	int ret;
    165 	uint32_t age;
    166 
    167 	gp.param = RADEON_PARAM_LAST_CLEAR;
    168 	gp.value = (int *)&age;
    169 	ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
    170 				  &gp, sizeof(gp));
    171 	if (ret) {
    172 		fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
    173 			ret);
    174 		exit(1);
    175 	}
    176 
    177 	return age;
    178 }
    179 
    180 /**
    181  * Check if we're about to draw into the front color buffer.
    182  * If so, set the intel->front_buffer_dirty field to true.
    183  */
    184 void
    185 radeon_check_front_buffer_rendering(struct gl_context *ctx)
    186 {
    187 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    188 	const struct gl_framebuffer *fb = ctx->DrawBuffer;
    189 
    190 	if (fb->Name == 0) {
    191 		/* drawing to window system buffer */
    192 		if (fb->_NumColorDrawBuffers > 0) {
    193 			if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
    194 				radeon->front_buffer_dirty = GL_TRUE;
    195 			}
    196 		}
    197 	}
    198 }
    199 
    200 
    201 void radeon_draw_buffer(struct gl_context *ctx, struct gl_framebuffer *fb)
    202 {
    203 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    204 	struct radeon_renderbuffer *rrbDepth = NULL, *rrbStencil = NULL,
    205 		*rrbColor = NULL;
    206 	uint32_t offset = 0;
    207 
    208 
    209 	if (!fb) {
    210 		/* this can happen during the initial context initialization */
    211 		return;
    212 	}
    213 
    214 	/* radeons only handle 1 color draw so far */
    215 	if (fb->_NumColorDrawBuffers != 1) {
    216 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
    217 		return;
    218 	}
    219 
    220 	/* Do this here, note core Mesa, since this function is called from
    221 	 * many places within the driver.
    222 	 */
    223 	if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
    224 		/* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
    225 		_mesa_update_framebuffer(ctx);
    226 		/* this updates the DrawBuffer's Width/Height if it's a FBO */
    227 		_mesa_update_draw_buffer_bounds(ctx);
    228 	}
    229 
    230 	if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
    231 		/* this may occur when we're called by glBindFrameBuffer() during
    232 		 * the process of someone setting up renderbuffers, etc.
    233 		 */
    234 		/*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
    235 		return;
    236 	}
    237 
    238 	if (fb->Name)
    239 		;/* do something depthy/stencily TODO */
    240 
    241 
    242 		/* none */
    243 	if (fb->Name == 0) {
    244 		if (fb->_ColorDrawBufferIndexes[0] == BUFFER_FRONT_LEFT) {
    245 			rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_FRONT_LEFT].Renderbuffer);
    246 			radeon->front_cliprects = GL_TRUE;
    247 		} else {
    248 			rrbColor = radeon_renderbuffer(fb->Attachment[BUFFER_BACK_LEFT].Renderbuffer);
    249 			radeon->front_cliprects = GL_FALSE;
    250 		}
    251 	} else {
    252 		/* user FBO in theory */
    253 		struct radeon_renderbuffer *rrb;
    254 		rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[0]);
    255 		if (rrb) {
    256 			offset = rrb->draw_offset;
    257 			rrbColor = rrb;
    258 		}
    259 	}
    260 
    261 	if (rrbColor == NULL)
    262 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_TRUE);
    263 	else
    264 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DRAW_BUFFER, GL_FALSE);
    265 
    266 
    267 	if (fb->Attachment[BUFFER_DEPTH].Renderbuffer) {
    268 		rrbDepth = radeon_renderbuffer(fb->Attachment[BUFFER_DEPTH].Renderbuffer);
    269 		if (rrbDepth && rrbDepth->bo) {
    270 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
    271 		} else {
    272 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_TRUE);
    273 		}
    274 	} else {
    275 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_DEPTH_BUFFER, GL_FALSE);
    276 		rrbDepth = NULL;
    277 	}
    278 
    279 	if (fb->Attachment[BUFFER_STENCIL].Renderbuffer) {
    280 		rrbStencil = radeon_renderbuffer(fb->Attachment[BUFFER_STENCIL].Renderbuffer);
    281 		if (rrbStencil && rrbStencil->bo) {
    282 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
    283 			/* need to re-compute stencil hw state */
    284 			if (!rrbDepth)
    285 				rrbDepth = rrbStencil;
    286 		} else {
    287 			radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_TRUE);
    288 		}
    289 	} else {
    290 		radeon->vtbl.fallback(ctx, RADEON_FALLBACK_STENCIL_BUFFER, GL_FALSE);
    291 		if (ctx->Driver.Enable != NULL)
    292 			ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
    293 		else
    294 			ctx->NewState |= _NEW_STENCIL;
    295 	}
    296 
    297 	/* Update culling direction which changes depending on the
    298 	 * orientation of the buffer:
    299 	 */
    300 	if (ctx->Driver.FrontFace)
    301 		ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
    302 	else
    303 		ctx->NewState |= _NEW_POLYGON;
    304 
    305 	/*
    306 	 * Update depth test state
    307 	 */
    308 	if (ctx->Driver.Enable) {
    309 		ctx->Driver.Enable(ctx, GL_DEPTH_TEST,
    310 				   (ctx->Depth.Test && fb->Visual.depthBits > 0));
    311 		/* Need to update the derived ctx->Stencil._Enabled first */
    312 		ctx->Driver.Enable(ctx, GL_STENCIL_TEST,
    313 				   (ctx->Stencil.Enabled && fb->Visual.stencilBits > 0));
    314 	} else {
    315 		ctx->NewState |= (_NEW_DEPTH | _NEW_STENCIL);
    316 	}
    317 
    318 	_mesa_reference_renderbuffer(&radeon->state.depth.rb, &rrbDepth->base.Base);
    319 	_mesa_reference_renderbuffer(&radeon->state.color.rb, &rrbColor->base.Base);
    320 	radeon->state.color.draw_offset = offset;
    321 
    322 #if 0
    323 	/* update viewport since it depends on window size */
    324 	if (ctx->Driver.Viewport) {
    325 		ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
    326 				     ctx->Viewport.Width, ctx->Viewport.Height);
    327 	} else {
    328 
    329 	}
    330 #endif
    331 	ctx->NewState |= _NEW_VIEWPORT;
    332 
    333 	/* Set state we know depends on drawable parameters:
    334 	 */
    335 	radeonUpdateScissor(ctx);
    336 	radeon->NewGLState |= _NEW_SCISSOR;
    337 
    338 	if (ctx->Driver.DepthRange)
    339 		ctx->Driver.DepthRange(ctx,
    340 				       ctx->Viewport.Near,
    341 				       ctx->Viewport.Far);
    342 
    343 	/* Update culling direction which changes depending on the
    344 	 * orientation of the buffer:
    345 	 */
    346 	if (ctx->Driver.FrontFace)
    347 		ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
    348 	else
    349 		ctx->NewState |= _NEW_POLYGON;
    350 }
    351 
    352 /**
    353  * Called via glDrawBuffer.
    354  */
    355 void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
    356 {
    357 	if (RADEON_DEBUG & RADEON_DRI)
    358 		fprintf(stderr, "%s %s\n", __FUNCTION__,
    359 			_mesa_lookup_enum_by_nr( mode ));
    360 
    361 	if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
    362 		radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    363 
    364 		const GLboolean was_front_buffer_rendering =
    365 			radeon->is_front_buffer_rendering;
    366 
    367 		radeon->is_front_buffer_rendering = (mode == GL_FRONT_LEFT) ||
    368                                             (mode == GL_FRONT);
    369 
    370       /* If we weren't front-buffer rendering before but we are now, make sure
    371        * that the front-buffer has actually been allocated.
    372        */
    373 		if (!was_front_buffer_rendering && radeon->is_front_buffer_rendering) {
    374 			radeon_update_renderbuffers(radeon->dri.context,
    375 				radeon->dri.context->driDrawablePriv, GL_FALSE);
    376       }
    377 	}
    378 
    379 	radeon_draw_buffer(ctx, ctx->DrawBuffer);
    380 }
    381 
    382 void radeonReadBuffer( struct gl_context *ctx, GLenum mode )
    383 {
    384 	if (ctx->DrawBuffer && _mesa_is_winsys_fbo(ctx->DrawBuffer)) {
    385 		struct radeon_context *const rmesa = RADEON_CONTEXT(ctx);
    386 		const GLboolean was_front_buffer_reading = rmesa->is_front_buffer_reading;
    387 		rmesa->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
    388 					|| (mode == GL_FRONT);
    389 
    390 		if (!was_front_buffer_reading && rmesa->is_front_buffer_reading) {
    391 			radeon_update_renderbuffers(rmesa->dri.context,
    392 						    rmesa->dri.context->driReadablePriv, GL_FALSE);
    393 	 	}
    394 	}
    395 	/* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
    396 	if (ctx->ReadBuffer == ctx->DrawBuffer) {
    397 		/* This will update FBO completeness status.
    398 		 * A framebuffer will be incomplete if the GL_READ_BUFFER setting
    399 		 * refers to a missing renderbuffer.  Calling glReadBuffer can set
    400 		 * that straight and can make the drawing buffer complete.
    401 		 */
    402 		radeon_draw_buffer(ctx, ctx->DrawBuffer);
    403 	}
    404 }
    405 
    406 void radeon_window_moved(radeonContextPtr radeon)
    407 {
    408 	/* Cliprects has to be updated before doing anything else */
    409 	radeonSetCliprects(radeon);
    410 }
    411 
    412 void radeon_viewport(struct gl_context *ctx, GLint x, GLint y, GLsizei width, GLsizei height)
    413 {
    414 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    415 	__DRIcontext *driContext = radeon->dri.context;
    416 	void (*old_viewport)(struct gl_context *ctx, GLint x, GLint y,
    417 			     GLsizei w, GLsizei h);
    418 
    419 	if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
    420 		if (radeon->is_front_buffer_rendering) {
    421 			ctx->Driver.Flush(ctx);
    422 		}
    423 		radeon_update_renderbuffers(driContext, driContext->driDrawablePriv, GL_FALSE);
    424 		if (driContext->driDrawablePriv != driContext->driReadablePriv)
    425 			radeon_update_renderbuffers(driContext, driContext->driReadablePriv, GL_FALSE);
    426 	}
    427 
    428 	old_viewport = ctx->Driver.Viewport;
    429 	ctx->Driver.Viewport = NULL;
    430 	radeon_window_moved(radeon);
    431 	radeon_draw_buffer(ctx, radeon->glCtx->DrawBuffer);
    432 	ctx->Driver.Viewport = old_viewport;
    433 }
    434 
    435 static void radeon_print_state_atom(radeonContextPtr radeon, struct radeon_state_atom *state)
    436 {
    437 	int i, j, reg, count;
    438 	int dwords;
    439 	uint32_t packet0;
    440 	if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
    441 		return;
    442 
    443 	dwords = (*state->check) (radeon->glCtx, state);
    444 
    445 	fprintf(stderr, "  emit %s %d/%d\n", state->name, dwords, state->cmd_size);
    446 
    447 	if (state->cmd && radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
    448 		if (dwords > state->cmd_size)
    449 			dwords = state->cmd_size;
    450 		for (i = 0; i < dwords;) {
    451 			packet0 = state->cmd[i];
    452 			reg = (packet0 & 0x1FFF) << 2;
    453 			count = ((packet0 & 0x3FFF0000) >> 16) + 1;
    454 			fprintf(stderr, "      %s[%d]: cmdpacket0 (first reg=0x%04x, count=%d)\n",
    455 					state->name, i, reg, count);
    456 			++i;
    457 			for (j = 0; j < count && i < dwords; j++) {
    458 				fprintf(stderr, "      %s[%d]: 0x%04x = %08x\n",
    459 						state->name, i, reg, state->cmd[i]);
    460 				reg += 4;
    461 				++i;
    462 			}
    463 		}
    464 	}
    465 }
    466 
    467 /**
    468  * Count total size for next state emit.
    469  **/
    470 GLuint radeonCountStateEmitSize(radeonContextPtr radeon)
    471 {
    472 	struct radeon_state_atom *atom;
    473 	GLuint dwords = 0;
    474 	/* check if we are going to emit full state */
    475 
    476 	if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
    477 		if (!radeon->hw.is_dirty)
    478 			goto out;
    479 		foreach(atom, &radeon->hw.atomlist) {
    480 			if (atom->dirty) {
    481 				const GLuint atom_size = atom->check(radeon->glCtx, atom);
    482 				dwords += atom_size;
    483 				if (RADEON_CMDBUF && atom_size) {
    484 					radeon_print_state_atom(radeon, atom);
    485 				}
    486 			}
    487 		}
    488 	} else {
    489 		foreach(atom, &radeon->hw.atomlist) {
    490 			const GLuint atom_size = atom->check(radeon->glCtx, atom);
    491 			dwords += atom_size;
    492 			if (RADEON_CMDBUF && atom_size) {
    493 				radeon_print_state_atom(radeon, atom);
    494 			}
    495 
    496 		}
    497 	}
    498 out:
    499 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
    500 	return dwords;
    501 }
    502 
    503 static INLINE void radeon_emit_atom(radeonContextPtr radeon, struct radeon_state_atom *atom)
    504 {
    505 	BATCH_LOCALS(radeon);
    506 	int dwords;
    507 
    508 	dwords = (*atom->check) (radeon->glCtx, atom);
    509 	if (dwords) {
    510 
    511 		radeon_print_state_atom(radeon, atom);
    512 
    513 		if (atom->emit) {
    514 			(*atom->emit)(radeon->glCtx, atom);
    515 		} else {
    516 			BEGIN_BATCH_NO_AUTOSTATE(dwords);
    517 			OUT_BATCH_TABLE(atom->cmd, dwords);
    518 			END_BATCH();
    519 		}
    520 		atom->dirty = GL_FALSE;
    521 
    522 	} else {
    523 		radeon_print(RADEON_STATE, RADEON_VERBOSE, "  skip state %s\n", atom->name);
    524 	}
    525 
    526 }
    527 
    528 static INLINE void radeonEmitAtoms(radeonContextPtr radeon, GLboolean emitAll)
    529 {
    530 	struct radeon_state_atom *atom;
    531 
    532 	if (radeon->vtbl.pre_emit_atoms)
    533 		radeon->vtbl.pre_emit_atoms(radeon);
    534 
    535 	/* Emit actual atoms */
    536 	if (radeon->hw.all_dirty || emitAll) {
    537 		foreach(atom, &radeon->hw.atomlist)
    538 			radeon_emit_atom( radeon, atom );
    539 	} else {
    540 		foreach(atom, &radeon->hw.atomlist) {
    541 			if ( atom->dirty )
    542 				radeon_emit_atom( radeon, atom );
    543 		}
    544 	}
    545 
    546 	COMMIT_BATCH();
    547 }
    548 
    549 static GLboolean radeon_revalidate_bos(struct gl_context *ctx)
    550 {
    551 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    552 	int ret;
    553 
    554 	ret = radeon_cs_space_check(radeon->cmdbuf.cs);
    555 	if (ret == RADEON_CS_SPACE_FLUSH)
    556 		return GL_FALSE;
    557 	return GL_TRUE;
    558 }
    559 
    560 void radeonEmitState(radeonContextPtr radeon)
    561 {
    562 	radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
    563 
    564 	if (radeon->vtbl.pre_emit_state)
    565 		radeon->vtbl.pre_emit_state(radeon);
    566 
    567 	/* this code used to return here but now it emits zbs */
    568 	if (radeon->cmdbuf.cs->cdw && !radeon->hw.is_dirty && !radeon->hw.all_dirty)
    569 		return;
    570 
    571 	if (!radeon->cmdbuf.cs->cdw) {
    572 		if (RADEON_DEBUG & RADEON_STATE)
    573 			fprintf(stderr, "Begin reemit state\n");
    574 
    575 		radeonEmitAtoms(radeon, GL_TRUE);
    576 	} else {
    577 
    578 		if (RADEON_DEBUG & RADEON_STATE)
    579 			fprintf(stderr, "Begin dirty state\n");
    580 
    581 		radeonEmitAtoms(radeon, GL_FALSE);
    582 	}
    583 
    584 	radeon->hw.is_dirty = GL_FALSE;
    585 	radeon->hw.all_dirty = GL_FALSE;
    586 }
    587 
    588 
    589 void radeonFlush(struct gl_context *ctx)
    590 {
    591 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    592 	if (RADEON_DEBUG & RADEON_IOCTL)
    593 		fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
    594 
    595 	/* okay if we have no cmds in the buffer &&
    596 	   we have no DMA flush &&
    597 	   we have no DMA buffer allocated.
    598 	   then no point flushing anything at all.
    599 	*/
    600 	if (!radeon->dma.flush && !radeon->cmdbuf.cs->cdw && is_empty_list(&radeon->dma.reserved))
    601 		goto flush_front;
    602 
    603 	if (radeon->dma.flush)
    604 		radeon->dma.flush( ctx );
    605 
    606 	if (radeon->cmdbuf.cs->cdw)
    607 		rcommonFlushCmdBuf(radeon, __FUNCTION__);
    608 
    609 flush_front:
    610 	if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
    611 		__DRIscreen *const screen = radeon->radeonScreen->driScreen;
    612 
    613 		if (screen->dri2.loader && (screen->dri2.loader->base.version >= 2)
    614 			&& (screen->dri2.loader->flushFrontBuffer != NULL)) {
    615 			__DRIdrawable * drawable = radeon_get_drawable(radeon);
    616 
    617 			/* We set the dirty bit in radeon_prepare_render() if we're
    618 			 * front buffer rendering once we get there.
    619 			 */
    620 			radeon->front_buffer_dirty = GL_FALSE;
    621 
    622 			(*screen->dri2.loader->flushFrontBuffer)(drawable, drawable->loaderPrivate);
    623 		}
    624 	}
    625 }
    626 
    627 /* Make sure all commands have been sent to the hardware and have
    628  * completed processing.
    629  */
    630 void radeonFinish(struct gl_context * ctx)
    631 {
    632 	radeonContextPtr radeon = RADEON_CONTEXT(ctx);
    633 	struct gl_framebuffer *fb = ctx->DrawBuffer;
    634 	struct radeon_renderbuffer *rrb;
    635 	int i;
    636 
    637 	if (ctx->Driver.Flush)
    638 		ctx->Driver.Flush(ctx); /* +r6/r7 */
    639 
    640 	for (i = 0; i < fb->_NumColorDrawBuffers; i++) {
    641 		struct radeon_renderbuffer *rrb;
    642 		rrb = radeon_renderbuffer(fb->_ColorDrawBuffers[i]);
    643 		if (rrb && rrb->bo)
    644 			radeon_bo_wait(rrb->bo);
    645 	}
    646 	rrb = radeon_get_depthbuffer(radeon);
    647 	if (rrb && rrb->bo)
    648 		radeon_bo_wait(rrb->bo);
    649 }
    650 
    651 /* cmdbuffer */
    652 /**
    653  * Send the current command buffer via ioctl to the hardware.
    654  */
    655 int rcommonFlushCmdBufLocked(radeonContextPtr rmesa, const char *caller)
    656 {
    657 	int ret = 0;
    658 
    659 	if (rmesa->cmdbuf.flushing) {
    660 		fprintf(stderr, "Recursive call into r300FlushCmdBufLocked!\n");
    661 		exit(-1);
    662 	}
    663 	rmesa->cmdbuf.flushing = 1;
    664 
    665 	if (RADEON_DEBUG & RADEON_IOCTL) {
    666 		fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
    667 	}
    668 
    669 	radeonEmitQueryEnd(rmesa->glCtx);
    670 
    671 	if (rmesa->cmdbuf.cs->cdw) {
    672 		ret = radeon_cs_emit(rmesa->cmdbuf.cs);
    673 		rmesa->hw.all_dirty = GL_TRUE;
    674 	}
    675 	radeon_cs_erase(rmesa->cmdbuf.cs);
    676 	rmesa->cmdbuf.flushing = 0;
    677 
    678 	if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE) {
    679 		fprintf(stderr,"failed to revalidate buffers\n");
    680 	}
    681 
    682 	return ret;
    683 }
    684 
    685 int rcommonFlushCmdBuf(radeonContextPtr rmesa, const char *caller)
    686 {
    687 	int ret;
    688 
    689 	radeonReleaseDmaRegions(rmesa);
    690 
    691 	ret = rcommonFlushCmdBufLocked(rmesa, caller);
    692 
    693 	if (ret) {
    694 		fprintf(stderr, "drmRadeonCmdBuffer: %d. Kernel failed to "
    695 				"parse or rejected command stream. See dmesg "
    696 				"for more info.\n", ret);
    697 		exit(ret);
    698 	}
    699 
    700 	return ret;
    701 }
    702 
    703 /**
    704  * Make sure that enough space is available in the command buffer
    705  * by flushing if necessary.
    706  *
    707  * \param dwords The number of dwords we need to be free on the command buffer
    708  */
    709 GLboolean rcommonEnsureCmdBufSpace(radeonContextPtr rmesa, int dwords, const char *caller)
    710 {
    711    if ((rmesa->cmdbuf.cs->cdw + dwords + 128) > rmesa->cmdbuf.size
    712 	 || radeon_cs_need_flush(rmesa->cmdbuf.cs)) {
    713       /* If we try to flush empty buffer there is too big rendering operation. */
    714       assert(rmesa->cmdbuf.cs->cdw);
    715       rcommonFlushCmdBuf(rmesa, caller);
    716       return GL_TRUE;
    717    }
    718    return GL_FALSE;
    719 }
    720 
    721 void rcommonInitCmdBuf(radeonContextPtr rmesa)
    722 {
    723 	GLuint size;
    724 	struct drm_radeon_gem_info mminfo = { 0 };
    725 
    726 	/* Initialize command buffer */
    727 	size = 256 * driQueryOptioni(&rmesa->optionCache,
    728 				     "command_buffer_size");
    729 	if (size < 2 * rmesa->hw.max_state_size) {
    730 		size = 2 * rmesa->hw.max_state_size + 65535;
    731 	}
    732 	if (size > 64 * 256)
    733 		size = 64 * 256;
    734 
    735 	radeon_print(RADEON_CS, RADEON_VERBOSE,
    736 			"sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
    737 	radeon_print(RADEON_CS, RADEON_VERBOSE,
    738 			"sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
    739 	radeon_print(RADEON_CS, RADEON_VERBOSE,
    740 			"Allocating %d bytes command buffer (max state is %d bytes)\n",
    741 			size * 4, rmesa->hw.max_state_size * 4);
    742 
    743 	rmesa->cmdbuf.csm =
    744 		radeon_cs_manager_gem_ctor(rmesa->radeonScreen->driScreen->fd);
    745 	if (rmesa->cmdbuf.csm == NULL) {
    746 		/* FIXME: fatal error */
    747 		return;
    748 	}
    749 	rmesa->cmdbuf.cs = radeon_cs_create(rmesa->cmdbuf.csm, size);
    750 	assert(rmesa->cmdbuf.cs != NULL);
    751 	rmesa->cmdbuf.size = size;
    752 
    753 	radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
    754 				  (void (*)(void *))rmesa->glCtx->Driver.Flush, rmesa->glCtx);
    755 
    756 
    757 	if (!drmCommandWriteRead(rmesa->dri.fd, DRM_RADEON_GEM_INFO,
    758 				 &mminfo, sizeof(mminfo))) {
    759 		radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM,
    760 				    mminfo.vram_visible);
    761 		radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT,
    762 				    mminfo.gart_size);
    763 	}
    764 }
    765 
    766 /**
    767  * Destroy the command buffer
    768  */
    769 void rcommonDestroyCmdBuf(radeonContextPtr rmesa)
    770 {
    771 	radeon_cs_destroy(rmesa->cmdbuf.cs);
    772 	radeon_cs_manager_gem_dtor(rmesa->cmdbuf.csm);
    773 }
    774 
    775 void rcommonBeginBatch(radeonContextPtr rmesa, int n,
    776 		       int dostate,
    777 		       const char *file,
    778 		       const char *function,
    779 		       int line)
    780 {
    781 	radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
    782 
    783     radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
    784                         n, rmesa->cmdbuf.cs->cdw, function, line);
    785 
    786 }
    787 
    788 void radeonUserClear(struct gl_context *ctx, GLuint mask)
    789 {
    790    _mesa_meta_Clear(ctx, mask);
    791 }
    792