1 /* 2 * Copyright 2010 Jerome Glisse <glisse (at) freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include "r600_formats.h" 24 #include "r600_shader.h" 25 #include "r600d.h" 26 27 #include "pipe/p_shader_tokens.h" 28 #include "util/u_pack_color.h" 29 #include "util/u_memory.h" 30 #include "util/u_framebuffer.h" 31 #include "util/u_dual_blend.h" 32 33 static uint32_t r600_translate_blend_function(int blend_func) 34 { 35 switch (blend_func) { 36 case PIPE_BLEND_ADD: 37 return V_028804_COMB_DST_PLUS_SRC; 38 case PIPE_BLEND_SUBTRACT: 39 return V_028804_COMB_SRC_MINUS_DST; 40 case PIPE_BLEND_REVERSE_SUBTRACT: 41 return V_028804_COMB_DST_MINUS_SRC; 42 case PIPE_BLEND_MIN: 43 return V_028804_COMB_MIN_DST_SRC; 44 case PIPE_BLEND_MAX: 45 return V_028804_COMB_MAX_DST_SRC; 46 default: 47 R600_ERR("Unknown blend function %d\n", blend_func); 48 assert(0); 49 break; 50 } 51 return 0; 52 } 53 54 static uint32_t r600_translate_blend_factor(int blend_fact) 55 { 56 switch (blend_fact) { 57 case PIPE_BLENDFACTOR_ONE: 58 return V_028804_BLEND_ONE; 59 case PIPE_BLENDFACTOR_SRC_COLOR: 60 return V_028804_BLEND_SRC_COLOR; 61 case PIPE_BLENDFACTOR_SRC_ALPHA: 62 return V_028804_BLEND_SRC_ALPHA; 63 case PIPE_BLENDFACTOR_DST_ALPHA: 64 return V_028804_BLEND_DST_ALPHA; 65 case PIPE_BLENDFACTOR_DST_COLOR: 66 return V_028804_BLEND_DST_COLOR; 67 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: 68 return V_028804_BLEND_SRC_ALPHA_SATURATE; 69 case PIPE_BLENDFACTOR_CONST_COLOR: 70 return V_028804_BLEND_CONST_COLOR; 71 case PIPE_BLENDFACTOR_CONST_ALPHA: 72 return V_028804_BLEND_CONST_ALPHA; 73 case PIPE_BLENDFACTOR_ZERO: 74 return V_028804_BLEND_ZERO; 75 case PIPE_BLENDFACTOR_INV_SRC_COLOR: 76 return V_028804_BLEND_ONE_MINUS_SRC_COLOR; 77 case PIPE_BLENDFACTOR_INV_SRC_ALPHA: 78 return V_028804_BLEND_ONE_MINUS_SRC_ALPHA; 79 case PIPE_BLENDFACTOR_INV_DST_ALPHA: 80 return V_028804_BLEND_ONE_MINUS_DST_ALPHA; 81 case PIPE_BLENDFACTOR_INV_DST_COLOR: 82 return V_028804_BLEND_ONE_MINUS_DST_COLOR; 83 case PIPE_BLENDFACTOR_INV_CONST_COLOR: 84 return V_028804_BLEND_ONE_MINUS_CONST_COLOR; 85 case PIPE_BLENDFACTOR_INV_CONST_ALPHA: 86 return V_028804_BLEND_ONE_MINUS_CONST_ALPHA; 87 case PIPE_BLENDFACTOR_SRC1_COLOR: 88 return V_028804_BLEND_SRC1_COLOR; 89 case PIPE_BLENDFACTOR_SRC1_ALPHA: 90 return V_028804_BLEND_SRC1_ALPHA; 91 case PIPE_BLENDFACTOR_INV_SRC1_COLOR: 92 return V_028804_BLEND_INV_SRC1_COLOR; 93 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: 94 return V_028804_BLEND_INV_SRC1_ALPHA; 95 default: 96 R600_ERR("Bad blend factor %d not supported!\n", blend_fact); 97 assert(0); 98 break; 99 } 100 return 0; 101 } 102 103 static unsigned r600_tex_dim(unsigned dim, unsigned nr_samples) 104 { 105 switch (dim) { 106 default: 107 case PIPE_TEXTURE_1D: 108 return V_038000_SQ_TEX_DIM_1D; 109 case PIPE_TEXTURE_1D_ARRAY: 110 return V_038000_SQ_TEX_DIM_1D_ARRAY; 111 case PIPE_TEXTURE_2D: 112 case PIPE_TEXTURE_RECT: 113 return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_MSAA : 114 V_038000_SQ_TEX_DIM_2D; 115 case PIPE_TEXTURE_2D_ARRAY: 116 return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA : 117 V_038000_SQ_TEX_DIM_2D_ARRAY; 118 case PIPE_TEXTURE_3D: 119 return V_038000_SQ_TEX_DIM_3D; 120 case PIPE_TEXTURE_CUBE: 121 case PIPE_TEXTURE_CUBE_ARRAY: 122 return V_038000_SQ_TEX_DIM_CUBEMAP; 123 } 124 } 125 126 static uint32_t r600_translate_dbformat(enum pipe_format format) 127 { 128 switch (format) { 129 case PIPE_FORMAT_Z16_UNORM: 130 return V_028010_DEPTH_16; 131 case PIPE_FORMAT_Z24X8_UNORM: 132 return V_028010_DEPTH_X8_24; 133 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 134 return V_028010_DEPTH_8_24; 135 case PIPE_FORMAT_Z32_FLOAT: 136 return V_028010_DEPTH_32_FLOAT; 137 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 138 return V_028010_DEPTH_X24_8_32_FLOAT; 139 default: 140 return ~0U; 141 } 142 } 143 144 static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format) 145 { 146 return r600_translate_texformat(screen, format, NULL, NULL, NULL, 147 FALSE) != ~0U; 148 } 149 150 static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format) 151 { 152 return r600_translate_colorformat(chip, format, FALSE) != ~0U && 153 r600_translate_colorswap(format, FALSE) != ~0U; 154 } 155 156 static bool r600_is_zs_format_supported(enum pipe_format format) 157 { 158 return r600_translate_dbformat(format) != ~0U; 159 } 160 161 boolean r600_is_format_supported(struct pipe_screen *screen, 162 enum pipe_format format, 163 enum pipe_texture_target target, 164 unsigned sample_count, 165 unsigned usage) 166 { 167 struct r600_screen *rscreen = (struct r600_screen*)screen; 168 unsigned retval = 0; 169 170 if (target >= PIPE_MAX_TEXTURE_TYPES) { 171 R600_ERR("r600: unsupported texture type %d\n", target); 172 return FALSE; 173 } 174 175 if (!util_format_is_supported(format, usage)) 176 return FALSE; 177 178 if (sample_count > 1) { 179 if (!rscreen->has_msaa) 180 return FALSE; 181 182 /* R11G11B10 is broken on R6xx. */ 183 if (rscreen->b.chip_class == R600 && 184 format == PIPE_FORMAT_R11G11B10_FLOAT) 185 return FALSE; 186 187 /* MSAA integer colorbuffers hang. */ 188 if (util_format_is_pure_integer(format) && 189 !util_format_is_depth_or_stencil(format)) 190 return FALSE; 191 192 switch (sample_count) { 193 case 2: 194 case 4: 195 case 8: 196 break; 197 default: 198 return FALSE; 199 } 200 } 201 202 if (usage & PIPE_BIND_SAMPLER_VIEW) { 203 if (target == PIPE_BUFFER) { 204 if (r600_is_vertex_format_supported(format)) 205 retval |= PIPE_BIND_SAMPLER_VIEW; 206 } else { 207 if (r600_is_sampler_format_supported(screen, format)) 208 retval |= PIPE_BIND_SAMPLER_VIEW; 209 } 210 } 211 212 if ((usage & (PIPE_BIND_RENDER_TARGET | 213 PIPE_BIND_DISPLAY_TARGET | 214 PIPE_BIND_SCANOUT | 215 PIPE_BIND_SHARED | 216 PIPE_BIND_BLENDABLE)) && 217 r600_is_colorbuffer_format_supported(rscreen->b.chip_class, format)) { 218 retval |= usage & 219 (PIPE_BIND_RENDER_TARGET | 220 PIPE_BIND_DISPLAY_TARGET | 221 PIPE_BIND_SCANOUT | 222 PIPE_BIND_SHARED); 223 if (!util_format_is_pure_integer(format) && 224 !util_format_is_depth_or_stencil(format)) 225 retval |= usage & PIPE_BIND_BLENDABLE; 226 } 227 228 if ((usage & PIPE_BIND_DEPTH_STENCIL) && 229 r600_is_zs_format_supported(format)) { 230 retval |= PIPE_BIND_DEPTH_STENCIL; 231 } 232 233 if ((usage & PIPE_BIND_VERTEX_BUFFER) && 234 r600_is_vertex_format_supported(format)) { 235 retval |= PIPE_BIND_VERTEX_BUFFER; 236 } 237 238 if ((usage & PIPE_BIND_LINEAR) && 239 !util_format_is_compressed(format) && 240 !(usage & PIPE_BIND_DEPTH_STENCIL)) 241 retval |= PIPE_BIND_LINEAR; 242 243 return retval == usage; 244 } 245 246 static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a) 247 { 248 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 249 struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a; 250 float offset_units = state->offset_units; 251 float offset_scale = state->offset_scale; 252 uint32_t pa_su_poly_offset_db_fmt_cntl = 0; 253 254 if (!state->offset_units_unscaled) { 255 switch (state->zs_format) { 256 case PIPE_FORMAT_Z24X8_UNORM: 257 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 258 offset_units *= 2.0f; 259 pa_su_poly_offset_db_fmt_cntl = 260 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24); 261 break; 262 case PIPE_FORMAT_Z16_UNORM: 263 offset_units *= 4.0f; 264 pa_su_poly_offset_db_fmt_cntl = 265 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16); 266 break; 267 default: 268 pa_su_poly_offset_db_fmt_cntl = 269 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) | 270 S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1); 271 } 272 } 273 274 radeon_set_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); 275 radeon_emit(cs, fui(offset_scale)); 276 radeon_emit(cs, fui(offset_units)); 277 radeon_emit(cs, fui(offset_scale)); 278 radeon_emit(cs, fui(offset_units)); 279 280 radeon_set_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 281 pa_su_poly_offset_db_fmt_cntl); 282 } 283 284 static uint32_t r600_get_blend_control(const struct pipe_blend_state *state, unsigned i) 285 { 286 int j = state->independent_blend_enable ? i : 0; 287 288 unsigned eqRGB = state->rt[j].rgb_func; 289 unsigned srcRGB = state->rt[j].rgb_src_factor; 290 unsigned dstRGB = state->rt[j].rgb_dst_factor; 291 292 unsigned eqA = state->rt[j].alpha_func; 293 unsigned srcA = state->rt[j].alpha_src_factor; 294 unsigned dstA = state->rt[j].alpha_dst_factor; 295 uint32_t bc = 0; 296 297 if (!state->rt[j].blend_enable) 298 return 0; 299 300 bc |= S_028804_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB)); 301 bc |= S_028804_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB)); 302 bc |= S_028804_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB)); 303 304 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) { 305 bc |= S_028804_SEPARATE_ALPHA_BLEND(1); 306 bc |= S_028804_ALPHA_COMB_FCN(r600_translate_blend_function(eqA)); 307 bc |= S_028804_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA)); 308 bc |= S_028804_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA)); 309 } 310 return bc; 311 } 312 313 static void *r600_create_blend_state_mode(struct pipe_context *ctx, 314 const struct pipe_blend_state *state, 315 int mode) 316 { 317 struct r600_context *rctx = (struct r600_context *)ctx; 318 uint32_t color_control = 0, target_mask = 0; 319 struct r600_blend_state *blend = CALLOC_STRUCT(r600_blend_state); 320 321 if (!blend) { 322 return NULL; 323 } 324 325 r600_init_command_buffer(&blend->buffer, 20); 326 r600_init_command_buffer(&blend->buffer_no_blend, 20); 327 328 /* R600 does not support per-MRT blends */ 329 if (rctx->b.family > CHIP_R600) 330 color_control |= S_028808_PER_MRT_BLEND(1); 331 332 if (state->logicop_enable) { 333 color_control |= (state->logicop_func << 16) | (state->logicop_func << 20); 334 } else { 335 color_control |= (0xcc << 16); 336 } 337 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */ 338 if (state->independent_blend_enable) { 339 for (int i = 0; i < 8; i++) { 340 if (state->rt[i].blend_enable) { 341 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i); 342 } 343 target_mask |= (state->rt[i].colormask << (4 * i)); 344 } 345 } else { 346 for (int i = 0; i < 8; i++) { 347 if (state->rt[0].blend_enable) { 348 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i); 349 } 350 target_mask |= (state->rt[0].colormask << (4 * i)); 351 } 352 } 353 354 if (target_mask) 355 color_control |= S_028808_SPECIAL_OP(mode); 356 else 357 color_control |= S_028808_SPECIAL_OP(V_028808_DISABLE); 358 359 /* only MRT0 has dual src blend */ 360 blend->dual_src_blend = util_blend_state_is_dual(state, 0); 361 blend->cb_target_mask = target_mask; 362 blend->cb_color_control = color_control; 363 blend->cb_color_control_no_blend = color_control & C_028808_TARGET_BLEND_ENABLE; 364 blend->alpha_to_one = state->alpha_to_one; 365 366 r600_store_context_reg(&blend->buffer, R_028D44_DB_ALPHA_TO_MASK, 367 S_028D44_ALPHA_TO_MASK_ENABLE(state->alpha_to_coverage) | 368 S_028D44_ALPHA_TO_MASK_OFFSET0(2) | 369 S_028D44_ALPHA_TO_MASK_OFFSET1(2) | 370 S_028D44_ALPHA_TO_MASK_OFFSET2(2) | 371 S_028D44_ALPHA_TO_MASK_OFFSET3(2)); 372 373 /* Copy over the registers set so far into buffer_no_blend. */ 374 memcpy(blend->buffer_no_blend.buf, blend->buffer.buf, blend->buffer.num_dw * 4); 375 blend->buffer_no_blend.num_dw = blend->buffer.num_dw; 376 377 /* Only add blend registers if blending is enabled. */ 378 if (!G_028808_TARGET_BLEND_ENABLE(color_control)) { 379 return blend; 380 } 381 382 /* The first R600 does not support per-MRT blends */ 383 r600_store_context_reg(&blend->buffer, R_028804_CB_BLEND_CONTROL, 384 r600_get_blend_control(state, 0)); 385 386 if (rctx->b.family > CHIP_R600) { 387 r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8); 388 for (int i = 0; i < 8; i++) { 389 r600_store_value(&blend->buffer, r600_get_blend_control(state, i)); 390 } 391 } 392 return blend; 393 } 394 395 static void *r600_create_blend_state(struct pipe_context *ctx, 396 const struct pipe_blend_state *state) 397 { 398 return r600_create_blend_state_mode(ctx, state, V_028808_SPECIAL_NORMAL); 399 } 400 401 static void *r600_create_dsa_state(struct pipe_context *ctx, 402 const struct pipe_depth_stencil_alpha_state *state) 403 { 404 unsigned db_depth_control, alpha_test_control, alpha_ref; 405 struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state); 406 407 if (!dsa) { 408 return NULL; 409 } 410 411 r600_init_command_buffer(&dsa->buffer, 3); 412 413 dsa->valuemask[0] = state->stencil[0].valuemask; 414 dsa->valuemask[1] = state->stencil[1].valuemask; 415 dsa->writemask[0] = state->stencil[0].writemask; 416 dsa->writemask[1] = state->stencil[1].writemask; 417 dsa->zwritemask = state->depth.writemask; 418 419 db_depth_control = S_028800_Z_ENABLE(state->depth.enabled) | 420 S_028800_Z_WRITE_ENABLE(state->depth.writemask) | 421 S_028800_ZFUNC(state->depth.func); 422 423 /* stencil */ 424 if (state->stencil[0].enabled) { 425 db_depth_control |= S_028800_STENCIL_ENABLE(1); 426 db_depth_control |= S_028800_STENCILFUNC(state->stencil[0].func); /* translates straight */ 427 db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op)); 428 db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op)); 429 db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op)); 430 431 if (state->stencil[1].enabled) { 432 db_depth_control |= S_028800_BACKFACE_ENABLE(1); 433 db_depth_control |= S_028800_STENCILFUNC_BF(state->stencil[1].func); /* translates straight */ 434 db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op)); 435 db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op)); 436 db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op)); 437 } 438 } 439 440 /* alpha */ 441 alpha_test_control = 0; 442 alpha_ref = 0; 443 if (state->alpha.enabled) { 444 alpha_test_control = S_028410_ALPHA_FUNC(state->alpha.func); 445 alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1); 446 alpha_ref = fui(state->alpha.ref_value); 447 } 448 dsa->sx_alpha_test_control = alpha_test_control & 0xff; 449 dsa->alpha_ref = alpha_ref; 450 451 r600_store_context_reg(&dsa->buffer, R_028800_DB_DEPTH_CONTROL, db_depth_control); 452 return dsa; 453 } 454 455 static void *r600_create_rs_state(struct pipe_context *ctx, 456 const struct pipe_rasterizer_state *state) 457 { 458 struct r600_context *rctx = (struct r600_context *)ctx; 459 unsigned tmp, sc_mode_cntl, spi_interp; 460 float psize_min, psize_max; 461 struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state); 462 463 if (!rs) { 464 return NULL; 465 } 466 467 r600_init_command_buffer(&rs->buffer, 30); 468 469 rs->scissor_enable = state->scissor; 470 rs->clip_halfz = state->clip_halfz; 471 rs->flatshade = state->flatshade; 472 rs->sprite_coord_enable = state->sprite_coord_enable; 473 rs->two_side = state->light_twoside; 474 rs->clip_plane_enable = state->clip_plane_enable; 475 rs->pa_sc_line_stipple = state->line_stipple_enable ? 476 S_028A0C_LINE_PATTERN(state->line_stipple_pattern) | 477 S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0; 478 rs->pa_cl_clip_cntl = 479 S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) | 480 S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip) | 481 S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip) | 482 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1); 483 if (rctx->b.chip_class == R700) { 484 rs->pa_cl_clip_cntl |= 485 S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard); 486 } 487 rs->multisample_enable = state->multisample; 488 489 /* offset */ 490 rs->offset_units = state->offset_units; 491 rs->offset_scale = state->offset_scale * 16.0f; 492 rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri; 493 rs->offset_units_unscaled = state->offset_units_unscaled; 494 495 if (state->point_size_per_vertex) { 496 psize_min = util_get_min_point_size(state); 497 psize_max = 8192; 498 } else { 499 /* Force the point size to be as if the vertex output was disabled. */ 500 psize_min = state->point_size; 501 psize_max = state->point_size; 502 } 503 504 sc_mode_cntl = S_028A4C_MSAA_ENABLE(state->multisample) | 505 S_028A4C_LINE_STIPPLE_ENABLE(state->line_stipple_enable) | 506 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) | 507 S_028A4C_PS_ITER_SAMPLE(state->multisample && rctx->ps_iter_samples > 1); 508 if (rctx->b.family == CHIP_RV770) { 509 /* workaround possible rendering corruption on RV770 with hyperz together with sample shading */ 510 sc_mode_cntl |= S_028A4C_TILE_COVER_DISABLE(state->multisample && rctx->ps_iter_samples > 1); 511 } 512 if (rctx->b.chip_class >= R700) { 513 sc_mode_cntl |= S_028A4C_FORCE_EOV_REZ_ENABLE(1) | 514 S_028A4C_R700_ZMM_LINE_OFFSET(1) | 515 S_028A4C_R700_VPORT_SCISSOR_ENABLE(1); 516 } else { 517 sc_mode_cntl |= S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1); 518 } 519 520 spi_interp = S_0286D4_FLAT_SHADE_ENA(1); 521 if (state->sprite_coord_enable) { 522 spi_interp |= S_0286D4_PNT_SPRITE_ENA(1) | 523 S_0286D4_PNT_SPRITE_OVRD_X(2) | 524 S_0286D4_PNT_SPRITE_OVRD_Y(3) | 525 S_0286D4_PNT_SPRITE_OVRD_Z(0) | 526 S_0286D4_PNT_SPRITE_OVRD_W(1); 527 if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) { 528 spi_interp |= S_0286D4_PNT_SPRITE_TOP_1(1); 529 } 530 } 531 532 r600_store_context_reg_seq(&rs->buffer, R_028A00_PA_SU_POINT_SIZE, 3); 533 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel. */ 534 tmp = r600_pack_float_12p4(state->point_size/2); 535 r600_store_value(&rs->buffer, /* R_028A00_PA_SU_POINT_SIZE */ 536 S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp)); 537 r600_store_value(&rs->buffer, /* R_028A04_PA_SU_POINT_MINMAX */ 538 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min/2)) | 539 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max/2))); 540 r600_store_value(&rs->buffer, /* R_028A08_PA_SU_LINE_CNTL */ 541 S_028A08_WIDTH(r600_pack_float_12p4(state->line_width/2))); 542 543 r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp); 544 r600_store_context_reg(&rs->buffer, R_028A4C_PA_SC_MODE_CNTL, sc_mode_cntl); 545 r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL, 546 S_028C08_PIX_CENTER_HALF(state->half_pixel_center) | 547 S_028C08_QUANT_MODE(V_028C08_X_1_256TH)); 548 r600_store_context_reg(&rs->buffer, R_028DFC_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp)); 549 550 rs->pa_su_sc_mode_cntl = S_028814_PROVOKING_VTX_LAST(!state->flatshade_first) | 551 S_028814_CULL_FRONT(state->cull_face & PIPE_FACE_FRONT ? 1 : 0) | 552 S_028814_CULL_BACK(state->cull_face & PIPE_FACE_BACK ? 1 : 0) | 553 S_028814_FACE(!state->front_ccw) | 554 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) | 555 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) | 556 S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) | 557 S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL || 558 state->fill_back != PIPE_POLYGON_MODE_FILL) | 559 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) | 560 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state->fill_back)); 561 if (rctx->b.chip_class == R700) { 562 r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL, rs->pa_su_sc_mode_cntl); 563 } 564 if (rctx->b.chip_class == R600) { 565 r600_store_context_reg(&rs->buffer, R_028350_SX_MISC, 566 S_028350_MULTIPASS(state->rasterizer_discard)); 567 } 568 return rs; 569 } 570 571 static unsigned r600_tex_filter(unsigned filter, unsigned max_aniso) 572 { 573 if (filter == PIPE_TEX_FILTER_LINEAR) 574 return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_BILINEAR 575 : V_03C000_SQ_TEX_XY_FILTER_BILINEAR; 576 else 577 return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_POINT 578 : V_03C000_SQ_TEX_XY_FILTER_POINT; 579 } 580 581 static void *r600_create_sampler_state(struct pipe_context *ctx, 582 const struct pipe_sampler_state *state) 583 { 584 struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen; 585 struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state); 586 unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso 587 : state->max_anisotropy; 588 unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso); 589 590 if (!ss) { 591 return NULL; 592 } 593 594 ss->seamless_cube_map = state->seamless_cube_map; 595 ss->border_color_use = sampler_state_needs_border_color(state); 596 597 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */ 598 ss->tex_sampler_words[0] = 599 S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) | 600 S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) | 601 S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) | 602 S_03C000_XY_MAG_FILTER(r600_tex_filter(state->mag_img_filter, max_aniso)) | 603 S_03C000_XY_MIN_FILTER(r600_tex_filter(state->min_img_filter, max_aniso)) | 604 S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) | 605 S_03C000_MAX_ANISO_RATIO(max_aniso_ratio) | 606 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) | 607 S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0); 608 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */ 609 ss->tex_sampler_words[1] = 610 S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 6)) | 611 S_03C004_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 6)) | 612 S_03C004_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 6)); 613 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */ 614 ss->tex_sampler_words[2] = S_03C008_TYPE(1); 615 616 if (ss->border_color_use) { 617 memcpy(&ss->border_color, &state->border_color, sizeof(state->border_color)); 618 } 619 return ss; 620 } 621 622 static struct pipe_sampler_view * 623 texture_buffer_sampler_view(struct r600_pipe_sampler_view *view, 624 unsigned width0, unsigned height0) 625 626 { 627 struct r600_texture *tmp = (struct r600_texture*)view->base.texture; 628 int stride = util_format_get_blocksize(view->base.format); 629 unsigned format, num_format, format_comp, endian; 630 uint64_t offset = view->base.u.buf.offset; 631 unsigned size = view->base.u.buf.size; 632 633 r600_vertex_data_type(view->base.format, 634 &format, &num_format, &format_comp, 635 &endian); 636 637 view->tex_resource = &tmp->resource; 638 view->skip_mip_address_reloc = true; 639 640 view->tex_resource_words[0] = offset; 641 view->tex_resource_words[1] = size - 1; 642 view->tex_resource_words[2] = S_038008_BASE_ADDRESS_HI(offset >> 32UL) | 643 S_038008_STRIDE(stride) | 644 S_038008_DATA_FORMAT(format) | 645 S_038008_NUM_FORMAT_ALL(num_format) | 646 S_038008_FORMAT_COMP_ALL(format_comp) | 647 S_038008_ENDIAN_SWAP(endian); 648 view->tex_resource_words[3] = 0; 649 /* 650 * in theory dword 4 is for number of elements, for use with resinfo, 651 * but it seems to utterly fail to work, the amd gpu shader analyser 652 * uses a const buffer to store the element sizes for buffer txq 653 */ 654 view->tex_resource_words[4] = 0; 655 view->tex_resource_words[5] = 0; 656 view->tex_resource_words[6] = S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_BUFFER); 657 return &view->base; 658 } 659 660 struct pipe_sampler_view * 661 r600_create_sampler_view_custom(struct pipe_context *ctx, 662 struct pipe_resource *texture, 663 const struct pipe_sampler_view *state, 664 unsigned width_first_level, unsigned height_first_level) 665 { 666 struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view); 667 struct r600_texture *tmp = (struct r600_texture*)texture; 668 unsigned format, endian; 669 uint32_t word4 = 0, yuv_format = 0, pitch = 0; 670 unsigned char swizzle[4], array_mode = 0; 671 unsigned width, height, depth, offset_level, last_level; 672 bool do_endian_swap = FALSE; 673 674 if (!view) 675 return NULL; 676 677 /* initialize base object */ 678 view->base = *state; 679 view->base.texture = NULL; 680 pipe_reference(NULL, &texture->reference); 681 view->base.texture = texture; 682 view->base.reference.count = 1; 683 view->base.context = ctx; 684 685 if (texture->target == PIPE_BUFFER) 686 return texture_buffer_sampler_view(view, texture->width0, 1); 687 688 swizzle[0] = state->swizzle_r; 689 swizzle[1] = state->swizzle_g; 690 swizzle[2] = state->swizzle_b; 691 swizzle[3] = state->swizzle_a; 692 693 if (R600_BIG_ENDIAN) 694 do_endian_swap = !tmp->db_compatible; 695 696 format = r600_translate_texformat(ctx->screen, state->format, 697 swizzle, 698 &word4, &yuv_format, do_endian_swap); 699 assert(format != ~0); 700 if (format == ~0) { 701 FREE(view); 702 return NULL; 703 } 704 705 if (state->format == PIPE_FORMAT_X24S8_UINT || 706 state->format == PIPE_FORMAT_S8X24_UINT || 707 state->format == PIPE_FORMAT_X32_S8X24_UINT || 708 state->format == PIPE_FORMAT_S8_UINT) 709 view->is_stencil_sampler = true; 710 711 if (tmp->is_depth && !r600_can_sample_zs(tmp, view->is_stencil_sampler)) { 712 if (!r600_init_flushed_depth_texture(ctx, texture, NULL)) { 713 FREE(view); 714 return NULL; 715 } 716 tmp = tmp->flushed_depth_texture; 717 } 718 719 endian = r600_colorformat_endian_swap(format, do_endian_swap); 720 721 offset_level = state->u.tex.first_level; 722 last_level = state->u.tex.last_level - offset_level; 723 width = width_first_level; 724 height = height_first_level; 725 depth = u_minify(texture->depth0, offset_level); 726 pitch = tmp->surface.level[offset_level].nblk_x * util_format_get_blockwidth(state->format); 727 728 if (texture->target == PIPE_TEXTURE_1D_ARRAY) { 729 height = 1; 730 depth = texture->array_size; 731 } else if (texture->target == PIPE_TEXTURE_2D_ARRAY) { 732 depth = texture->array_size; 733 } else if (texture->target == PIPE_TEXTURE_CUBE_ARRAY) 734 depth = texture->array_size / 6; 735 736 switch (tmp->surface.level[offset_level].mode) { 737 default: 738 case RADEON_SURF_MODE_LINEAR_ALIGNED: 739 array_mode = V_038000_ARRAY_LINEAR_ALIGNED; 740 break; 741 case RADEON_SURF_MODE_1D: 742 array_mode = V_038000_ARRAY_1D_TILED_THIN1; 743 break; 744 case RADEON_SURF_MODE_2D: 745 array_mode = V_038000_ARRAY_2D_TILED_THIN1; 746 break; 747 } 748 749 view->tex_resource = &tmp->resource; 750 view->tex_resource_words[0] = (S_038000_DIM(r600_tex_dim(texture->target, texture->nr_samples)) | 751 S_038000_TILE_MODE(array_mode) | 752 S_038000_TILE_TYPE(tmp->non_disp_tiling) | 753 S_038000_PITCH((pitch / 8) - 1) | 754 S_038000_TEX_WIDTH(width - 1)); 755 view->tex_resource_words[1] = (S_038004_TEX_HEIGHT(height - 1) | 756 S_038004_TEX_DEPTH(depth - 1) | 757 S_038004_DATA_FORMAT(format)); 758 view->tex_resource_words[2] = tmp->surface.level[offset_level].offset >> 8; 759 if (offset_level >= tmp->resource.b.b.last_level) { 760 view->tex_resource_words[3] = tmp->surface.level[offset_level].offset >> 8; 761 } else { 762 view->tex_resource_words[3] = tmp->surface.level[offset_level + 1].offset >> 8; 763 } 764 view->tex_resource_words[4] = (word4 | 765 S_038010_REQUEST_SIZE(1) | 766 S_038010_ENDIAN_SWAP(endian) | 767 S_038010_BASE_LEVEL(0)); 768 view->tex_resource_words[5] = (S_038014_BASE_ARRAY(state->u.tex.first_layer) | 769 S_038014_LAST_ARRAY(state->u.tex.last_layer)); 770 if (texture->nr_samples > 1) { 771 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */ 772 view->tex_resource_words[5] |= S_038014_LAST_LEVEL(util_logbase2(texture->nr_samples)); 773 } else { 774 view->tex_resource_words[5] |= S_038014_LAST_LEVEL(last_level); 775 } 776 view->tex_resource_words[6] = (S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE) | 777 S_038018_MAX_ANISO(4 /* max 16 samples */)); 778 return &view->base; 779 } 780 781 static struct pipe_sampler_view * 782 r600_create_sampler_view(struct pipe_context *ctx, 783 struct pipe_resource *tex, 784 const struct pipe_sampler_view *state) 785 { 786 return r600_create_sampler_view_custom(ctx, tex, state, 787 u_minify(tex->width0, state->u.tex.first_level), 788 u_minify(tex->height0, state->u.tex.first_level)); 789 } 790 791 static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom) 792 { 793 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 794 struct pipe_clip_state *state = &rctx->clip_state.state; 795 796 radeon_set_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4); 797 radeon_emit_array(cs, (unsigned*)state, 6*4); 798 } 799 800 static void r600_set_polygon_stipple(struct pipe_context *ctx, 801 const struct pipe_poly_stipple *state) 802 { 803 } 804 805 static void r600_init_color_surface(struct r600_context *rctx, 806 struct r600_surface *surf, 807 bool force_cmask_fmask) 808 { 809 struct r600_screen *rscreen = rctx->screen; 810 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 811 unsigned level = surf->base.u.tex.level; 812 unsigned pitch, slice; 813 unsigned color_info; 814 unsigned color_view; 815 unsigned format, swap, ntype, endian; 816 unsigned offset; 817 const struct util_format_description *desc; 818 int i; 819 bool blend_bypass = 0, blend_clamp = 1, do_endian_swap = FALSE; 820 821 if (rtex->db_compatible && !r600_can_sample_zs(rtex, false)) { 822 r600_init_flushed_depth_texture(&rctx->b.b, surf->base.texture, NULL); 823 rtex = rtex->flushed_depth_texture; 824 assert(rtex); 825 } 826 827 offset = rtex->surface.level[level].offset; 828 color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) | 829 S_028080_SLICE_MAX(surf->base.u.tex.last_layer); 830 831 pitch = rtex->surface.level[level].nblk_x / 8 - 1; 832 slice = (rtex->surface.level[level].nblk_x * rtex->surface.level[level].nblk_y) / 64; 833 if (slice) { 834 slice = slice - 1; 835 } 836 color_info = 0; 837 switch (rtex->surface.level[level].mode) { 838 default: 839 case RADEON_SURF_MODE_LINEAR_ALIGNED: 840 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_ALIGNED); 841 break; 842 case RADEON_SURF_MODE_1D: 843 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_1D_TILED_THIN1); 844 break; 845 case RADEON_SURF_MODE_2D: 846 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_2D_TILED_THIN1); 847 break; 848 } 849 850 desc = util_format_description(surf->base.format); 851 852 for (i = 0; i < 4; i++) { 853 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { 854 break; 855 } 856 } 857 858 ntype = V_0280A0_NUMBER_UNORM; 859 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) 860 ntype = V_0280A0_NUMBER_SRGB; 861 else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { 862 if (desc->channel[i].normalized) 863 ntype = V_0280A0_NUMBER_SNORM; 864 else if (desc->channel[i].pure_integer) 865 ntype = V_0280A0_NUMBER_SINT; 866 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) { 867 if (desc->channel[i].normalized) 868 ntype = V_0280A0_NUMBER_UNORM; 869 else if (desc->channel[i].pure_integer) 870 ntype = V_0280A0_NUMBER_UINT; 871 } 872 873 if (R600_BIG_ENDIAN) 874 do_endian_swap = !rtex->db_compatible; 875 876 format = r600_translate_colorformat(rctx->b.chip_class, surf->base.format, 877 do_endian_swap); 878 assert(format != ~0); 879 880 swap = r600_translate_colorswap(surf->base.format, do_endian_swap); 881 assert(swap != ~0); 882 883 endian = r600_colorformat_endian_swap(format, do_endian_swap); 884 885 /* set blend bypass according to docs if SINT/UINT or 886 8/24 COLOR variants */ 887 if (ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT || 888 format == V_0280A0_COLOR_8_24 || format == V_0280A0_COLOR_24_8 || 889 format == V_0280A0_COLOR_X24_8_32_FLOAT) { 890 blend_clamp = 0; 891 blend_bypass = 1; 892 } 893 894 surf->alphatest_bypass = ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT; 895 896 color_info |= S_0280A0_FORMAT(format) | 897 S_0280A0_COMP_SWAP(swap) | 898 S_0280A0_BLEND_BYPASS(blend_bypass) | 899 S_0280A0_BLEND_CLAMP(blend_clamp) | 900 S_0280A0_NUMBER_TYPE(ntype) | 901 S_0280A0_ENDIAN(endian); 902 903 /* EXPORT_NORM is an optimzation that can be enabled for better 904 * performance in certain cases 905 */ 906 if (rctx->b.chip_class == R600) { 907 /* EXPORT_NORM can be enabled if: 908 * - 11-bit or smaller UNORM/SNORM/SRGB 909 * - BLEND_CLAMP is enabled 910 * - BLEND_FLOAT32 is disabled 911 */ 912 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && 913 (desc->channel[i].size < 12 && 914 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && 915 ntype != V_0280A0_NUMBER_UINT && 916 ntype != V_0280A0_NUMBER_SINT) && 917 G_0280A0_BLEND_CLAMP(color_info) && 918 !G_0280A0_BLEND_FLOAT32(color_info)) { 919 color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM); 920 surf->export_16bpc = true; 921 } 922 } else { 923 /* EXPORT_NORM can be enabled if: 924 * - 11-bit or smaller UNORM/SNORM/SRGB 925 * - 16-bit or smaller FLOAT 926 */ 927 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && 928 ((desc->channel[i].size < 12 && 929 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && 930 ntype != V_0280A0_NUMBER_UINT && ntype != V_0280A0_NUMBER_SINT) || 931 (desc->channel[i].size < 17 && 932 desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))) { 933 color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM); 934 surf->export_16bpc = true; 935 } 936 } 937 938 /* These might not always be initialized to zero. */ 939 surf->cb_color_base = offset >> 8; 940 surf->cb_color_size = S_028060_PITCH_TILE_MAX(pitch) | 941 S_028060_SLICE_TILE_MAX(slice); 942 surf->cb_color_fmask = surf->cb_color_base; 943 surf->cb_color_cmask = surf->cb_color_base; 944 surf->cb_color_mask = 0; 945 946 r600_resource_reference(&surf->cb_buffer_cmask, &rtex->resource); 947 r600_resource_reference(&surf->cb_buffer_fmask, &rtex->resource); 948 949 if (rtex->cmask.size) { 950 surf->cb_color_cmask = rtex->cmask.offset >> 8; 951 surf->cb_color_mask |= S_028100_CMASK_BLOCK_MAX(rtex->cmask.slice_tile_max); 952 953 if (rtex->fmask.size) { 954 color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE); 955 surf->cb_color_fmask = rtex->fmask.offset >> 8; 956 surf->cb_color_mask |= S_028100_FMASK_TILE_MAX(rtex->fmask.slice_tile_max); 957 } else { /* cmask only */ 958 color_info |= S_0280A0_TILE_MODE(V_0280A0_CLEAR_ENABLE); 959 } 960 } else if (force_cmask_fmask) { 961 /* Allocate dummy FMASK and CMASK if they aren't allocated already. 962 * 963 * R6xx needs FMASK and CMASK for the destination buffer of color resolve, 964 * otherwise it hangs. We don't have FMASK and CMASK pre-allocated, 965 * because it's not an MSAA buffer. 966 */ 967 struct r600_cmask_info cmask; 968 struct r600_fmask_info fmask; 969 970 r600_texture_get_cmask_info(&rscreen->b, rtex, &cmask); 971 r600_texture_get_fmask_info(&rscreen->b, rtex, 8, &fmask); 972 973 /* CMASK. */ 974 if (!rctx->dummy_cmask || 975 rctx->dummy_cmask->b.b.width0 < cmask.size || 976 rctx->dummy_cmask->buf->alignment % cmask.alignment != 0) { 977 struct pipe_transfer *transfer; 978 void *ptr; 979 980 r600_resource_reference(&rctx->dummy_cmask, NULL); 981 rctx->dummy_cmask = (struct r600_resource*) 982 r600_aligned_buffer_create(&rscreen->b.b, 0, 983 PIPE_USAGE_DEFAULT, 984 cmask.size, cmask.alignment); 985 986 /* Set the contents to 0xCC. */ 987 ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer); 988 memset(ptr, 0xCC, cmask.size); 989 pipe_buffer_unmap(&rctx->b.b, transfer); 990 } 991 r600_resource_reference(&surf->cb_buffer_cmask, rctx->dummy_cmask); 992 993 /* FMASK. */ 994 if (!rctx->dummy_fmask || 995 rctx->dummy_fmask->b.b.width0 < fmask.size || 996 rctx->dummy_fmask->buf->alignment % fmask.alignment != 0) { 997 r600_resource_reference(&rctx->dummy_fmask, NULL); 998 rctx->dummy_fmask = (struct r600_resource*) 999 r600_aligned_buffer_create(&rscreen->b.b, 0, 1000 PIPE_USAGE_DEFAULT, 1001 fmask.size, fmask.alignment); 1002 } 1003 r600_resource_reference(&surf->cb_buffer_fmask, rctx->dummy_fmask); 1004 1005 /* Init the registers. */ 1006 color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE); 1007 surf->cb_color_cmask = 0; 1008 surf->cb_color_fmask = 0; 1009 surf->cb_color_mask = S_028100_CMASK_BLOCK_MAX(cmask.slice_tile_max) | 1010 S_028100_FMASK_TILE_MAX(fmask.slice_tile_max); 1011 } 1012 1013 surf->cb_color_info = color_info; 1014 surf->cb_color_view = color_view; 1015 surf->color_initialized = true; 1016 } 1017 1018 static void r600_init_depth_surface(struct r600_context *rctx, 1019 struct r600_surface *surf) 1020 { 1021 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 1022 unsigned level, pitch, slice, format, offset, array_mode; 1023 1024 level = surf->base.u.tex.level; 1025 offset = rtex->surface.level[level].offset; 1026 pitch = rtex->surface.level[level].nblk_x / 8 - 1; 1027 slice = (rtex->surface.level[level].nblk_x * rtex->surface.level[level].nblk_y) / 64; 1028 if (slice) { 1029 slice = slice - 1; 1030 } 1031 switch (rtex->surface.level[level].mode) { 1032 case RADEON_SURF_MODE_2D: 1033 array_mode = V_0280A0_ARRAY_2D_TILED_THIN1; 1034 break; 1035 case RADEON_SURF_MODE_1D: 1036 case RADEON_SURF_MODE_LINEAR_ALIGNED: 1037 default: 1038 array_mode = V_0280A0_ARRAY_1D_TILED_THIN1; 1039 break; 1040 } 1041 1042 format = r600_translate_dbformat(surf->base.format); 1043 assert(format != ~0); 1044 1045 surf->db_depth_info = S_028010_ARRAY_MODE(array_mode) | S_028010_FORMAT(format); 1046 surf->db_depth_base = offset >> 8; 1047 surf->db_depth_view = S_028004_SLICE_START(surf->base.u.tex.first_layer) | 1048 S_028004_SLICE_MAX(surf->base.u.tex.last_layer); 1049 surf->db_depth_size = S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice); 1050 surf->db_prefetch_limit = (rtex->surface.level[level].nblk_y / 8) - 1; 1051 1052 /* use htile only for first level */ 1053 if (rtex->htile_buffer && !level) { 1054 surf->db_htile_data_base = 0; 1055 surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) | 1056 S_028D24_HTILE_HEIGHT(1) | 1057 S_028D24_FULL_CACHE(1); 1058 /* preload is not working properly on r6xx/r7xx */ 1059 surf->db_depth_info |= S_028010_TILE_SURFACE_ENABLE(1); 1060 } 1061 1062 surf->depth_initialized = true; 1063 } 1064 1065 static void r600_set_framebuffer_state(struct pipe_context *ctx, 1066 const struct pipe_framebuffer_state *state) 1067 { 1068 struct r600_context *rctx = (struct r600_context *)ctx; 1069 struct r600_surface *surf; 1070 struct r600_texture *rtex; 1071 unsigned i; 1072 1073 /* Flush TC when changing the framebuffer state, because the only 1074 * client not using TC that can change textures is the framebuffer. 1075 * Other places don't typically have to flush TC. 1076 */ 1077 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | 1078 R600_CONTEXT_FLUSH_AND_INV | 1079 R600_CONTEXT_FLUSH_AND_INV_CB | 1080 R600_CONTEXT_FLUSH_AND_INV_CB_META | 1081 R600_CONTEXT_FLUSH_AND_INV_DB | 1082 R600_CONTEXT_FLUSH_AND_INV_DB_META | 1083 R600_CONTEXT_INV_TEX_CACHE; 1084 1085 /* Set the new state. */ 1086 util_copy_framebuffer_state(&rctx->framebuffer.state, state); 1087 1088 rctx->framebuffer.export_16bpc = state->nr_cbufs != 0; 1089 rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] && 1090 util_format_is_pure_integer(state->cbufs[0]->format); 1091 rctx->framebuffer.compressed_cb_mask = 0; 1092 rctx->framebuffer.is_msaa_resolve = state->nr_cbufs == 2 && 1093 state->cbufs[0] && state->cbufs[1] && 1094 state->cbufs[0]->texture->nr_samples > 1 && 1095 state->cbufs[1]->texture->nr_samples <= 1; 1096 rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state); 1097 1098 /* Colorbuffers. */ 1099 for (i = 0; i < state->nr_cbufs; i++) { 1100 /* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */ 1101 bool force_cmask_fmask = rctx->b.chip_class == R600 && 1102 rctx->framebuffer.is_msaa_resolve && 1103 i == 1; 1104 1105 surf = (struct r600_surface*)state->cbufs[i]; 1106 if (!surf) 1107 continue; 1108 1109 rtex = (struct r600_texture*)surf->base.texture; 1110 r600_context_add_resource_size(ctx, state->cbufs[i]->texture); 1111 1112 if (!surf->color_initialized || force_cmask_fmask) { 1113 r600_init_color_surface(rctx, surf, force_cmask_fmask); 1114 if (force_cmask_fmask) { 1115 /* re-initialize later without compression */ 1116 surf->color_initialized = false; 1117 } 1118 } 1119 1120 if (!surf->export_16bpc) { 1121 rctx->framebuffer.export_16bpc = false; 1122 } 1123 1124 if (rtex->fmask.size) { 1125 rctx->framebuffer.compressed_cb_mask |= 1 << i; 1126 } 1127 } 1128 1129 /* Update alpha-test state dependencies. 1130 * Alpha-test is done on the first colorbuffer only. */ 1131 if (state->nr_cbufs) { 1132 bool alphatest_bypass = false; 1133 1134 surf = (struct r600_surface*)state->cbufs[0]; 1135 if (surf) { 1136 alphatest_bypass = surf->alphatest_bypass; 1137 } 1138 1139 if (rctx->alphatest_state.bypass != alphatest_bypass) { 1140 rctx->alphatest_state.bypass = alphatest_bypass; 1141 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 1142 } 1143 } 1144 1145 /* ZS buffer. */ 1146 if (state->zsbuf) { 1147 surf = (struct r600_surface*)state->zsbuf; 1148 1149 r600_context_add_resource_size(ctx, state->zsbuf->texture); 1150 1151 if (!surf->depth_initialized) { 1152 r600_init_depth_surface(rctx, surf); 1153 } 1154 1155 if (state->zsbuf->format != rctx->poly_offset_state.zs_format) { 1156 rctx->poly_offset_state.zs_format = state->zsbuf->format; 1157 r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom); 1158 } 1159 1160 if (rctx->db_state.rsurf != surf) { 1161 rctx->db_state.rsurf = surf; 1162 r600_mark_atom_dirty(rctx, &rctx->db_state.atom); 1163 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1164 } 1165 } else if (rctx->db_state.rsurf) { 1166 rctx->db_state.rsurf = NULL; 1167 r600_mark_atom_dirty(rctx, &rctx->db_state.atom); 1168 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1169 } 1170 1171 if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs) { 1172 rctx->cb_misc_state.nr_cbufs = state->nr_cbufs; 1173 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 1174 } 1175 1176 if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) { 1177 rctx->alphatest_state.bypass = false; 1178 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 1179 } 1180 1181 /* Calculate the CS size. */ 1182 rctx->framebuffer.atom.num_dw = 1183 10 /*COLOR_INFO*/ + 4 /*SCISSOR*/ + 3 /*SHADER_CONTROL*/ + 8 /*MSAA*/; 1184 1185 if (rctx->framebuffer.state.nr_cbufs) { 1186 rctx->framebuffer.atom.num_dw += 15 * rctx->framebuffer.state.nr_cbufs; 1187 rctx->framebuffer.atom.num_dw += 3 * (2 + rctx->framebuffer.state.nr_cbufs); 1188 } 1189 if (rctx->framebuffer.state.zsbuf) { 1190 rctx->framebuffer.atom.num_dw += 16; 1191 } else if (rctx->screen->b.info.drm_minor >= 18) { 1192 rctx->framebuffer.atom.num_dw += 3; 1193 } 1194 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770) { 1195 rctx->framebuffer.atom.num_dw += 2; 1196 } 1197 1198 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 1199 1200 r600_set_sample_locations_constant_buffer(rctx); 1201 } 1202 1203 static uint32_t sample_locs_2x[] = { 1204 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4), 1205 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4), 1206 }; 1207 static unsigned max_dist_2x = 4; 1208 1209 static uint32_t sample_locs_4x[] = { 1210 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6), 1211 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6), 1212 }; 1213 static unsigned max_dist_4x = 6; 1214 static uint32_t sample_locs_8x[] = { 1215 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3), 1216 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7), 1217 }; 1218 static unsigned max_dist_8x = 7; 1219 1220 static void r600_get_sample_position(struct pipe_context *ctx, 1221 unsigned sample_count, 1222 unsigned sample_index, 1223 float *out_value) 1224 { 1225 int offset, index; 1226 struct { 1227 int idx:4; 1228 } val; 1229 switch (sample_count) { 1230 case 1: 1231 default: 1232 out_value[0] = out_value[1] = 0.5; 1233 break; 1234 case 2: 1235 offset = 4 * (sample_index * 2); 1236 val.idx = (sample_locs_2x[0] >> offset) & 0xf; 1237 out_value[0] = (float)(val.idx + 8) / 16.0f; 1238 val.idx = (sample_locs_2x[0] >> (offset + 4)) & 0xf; 1239 out_value[1] = (float)(val.idx + 8) / 16.0f; 1240 break; 1241 case 4: 1242 offset = 4 * (sample_index * 2); 1243 val.idx = (sample_locs_4x[0] >> offset) & 0xf; 1244 out_value[0] = (float)(val.idx + 8) / 16.0f; 1245 val.idx = (sample_locs_4x[0] >> (offset + 4)) & 0xf; 1246 out_value[1] = (float)(val.idx + 8) / 16.0f; 1247 break; 1248 case 8: 1249 offset = 4 * (sample_index % 4 * 2); 1250 index = (sample_index / 4); 1251 val.idx = (sample_locs_8x[index] >> offset) & 0xf; 1252 out_value[0] = (float)(val.idx + 8) / 16.0f; 1253 val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf; 1254 out_value[1] = (float)(val.idx + 8) / 16.0f; 1255 break; 1256 } 1257 } 1258 1259 static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) 1260 { 1261 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1262 unsigned max_dist = 0; 1263 1264 if (rctx->b.family == CHIP_R600) { 1265 switch (nr_samples) { 1266 default: 1267 nr_samples = 0; 1268 break; 1269 case 2: 1270 radeon_set_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]); 1271 max_dist = max_dist_2x; 1272 break; 1273 case 4: 1274 radeon_set_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]); 1275 max_dist = max_dist_4x; 1276 break; 1277 case 8: 1278 radeon_set_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2); 1279 radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */ 1280 radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */ 1281 max_dist = max_dist_8x; 1282 break; 1283 } 1284 } else { 1285 switch (nr_samples) { 1286 default: 1287 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1288 radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1289 radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1290 nr_samples = 0; 1291 break; 1292 case 2: 1293 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1294 radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1295 radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1296 max_dist = max_dist_2x; 1297 break; 1298 case 4: 1299 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1300 radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1301 radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1302 max_dist = max_dist_4x; 1303 break; 1304 case 8: 1305 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1306 radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1307 radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1308 max_dist = max_dist_8x; 1309 break; 1310 } 1311 } 1312 1313 if (nr_samples > 1) { 1314 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1315 radeon_emit(cs, S_028C00_LAST_PIXEL(1) | 1316 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1317 radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | 1318 S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ 1319 } else { 1320 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1321 radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1322 radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ 1323 } 1324 } 1325 1326 static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom) 1327 { 1328 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1329 struct pipe_framebuffer_state *state = &rctx->framebuffer.state; 1330 unsigned nr_cbufs = state->nr_cbufs; 1331 struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0]; 1332 unsigned i, sbu = 0; 1333 1334 /* Colorbuffers. */ 1335 radeon_set_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8); 1336 for (i = 0; i < nr_cbufs; i++) { 1337 radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0); 1338 } 1339 /* set CB_COLOR1_INFO for possible dual-src blending */ 1340 if (i == 1 && cb[0]) { 1341 radeon_emit(cs, cb[0]->cb_color_info); 1342 i++; 1343 } 1344 for (; i < 8; i++) { 1345 radeon_emit(cs, 0); 1346 } 1347 1348 if (nr_cbufs) { 1349 for (i = 0; i < nr_cbufs; i++) { 1350 unsigned reloc; 1351 1352 if (!cb[i]) 1353 continue; 1354 1355 /* COLOR_BASE */ 1356 radeon_set_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base); 1357 1358 reloc = radeon_add_to_buffer_list(&rctx->b, 1359 &rctx->b.gfx, 1360 (struct r600_resource*)cb[i]->base.texture, 1361 RADEON_USAGE_READWRITE, 1362 cb[i]->base.texture->nr_samples > 1 ? 1363 RADEON_PRIO_COLOR_BUFFER_MSAA : 1364 RADEON_PRIO_COLOR_BUFFER); 1365 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1366 radeon_emit(cs, reloc); 1367 1368 /* FMASK */ 1369 radeon_set_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask); 1370 1371 reloc = radeon_add_to_buffer_list(&rctx->b, 1372 &rctx->b.gfx, 1373 cb[i]->cb_buffer_fmask, 1374 RADEON_USAGE_READWRITE, 1375 cb[i]->base.texture->nr_samples > 1 ? 1376 RADEON_PRIO_COLOR_BUFFER_MSAA : 1377 RADEON_PRIO_COLOR_BUFFER); 1378 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1379 radeon_emit(cs, reloc); 1380 1381 /* CMASK */ 1382 radeon_set_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask); 1383 1384 reloc = radeon_add_to_buffer_list(&rctx->b, 1385 &rctx->b.gfx, 1386 cb[i]->cb_buffer_cmask, 1387 RADEON_USAGE_READWRITE, 1388 cb[i]->base.texture->nr_samples > 1 ? 1389 RADEON_PRIO_COLOR_BUFFER_MSAA : 1390 RADEON_PRIO_COLOR_BUFFER); 1391 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1392 radeon_emit(cs, reloc); 1393 } 1394 1395 radeon_set_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs); 1396 for (i = 0; i < nr_cbufs; i++) { 1397 radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0); 1398 } 1399 1400 radeon_set_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs); 1401 for (i = 0; i < nr_cbufs; i++) { 1402 radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0); 1403 } 1404 1405 radeon_set_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs); 1406 for (i = 0; i < nr_cbufs; i++) { 1407 radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0); 1408 } 1409 1410 sbu |= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs); 1411 } 1412 1413 /* SURFACE_BASE_UPDATE */ 1414 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { 1415 radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); 1416 radeon_emit(cs, sbu); 1417 sbu = 0; 1418 } 1419 1420 /* Zbuffer. */ 1421 if (state->zsbuf) { 1422 struct r600_surface *surf = (struct r600_surface*)state->zsbuf; 1423 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, 1424 &rctx->b.gfx, 1425 (struct r600_resource*)state->zsbuf->texture, 1426 RADEON_USAGE_READWRITE, 1427 surf->base.texture->nr_samples > 1 ? 1428 RADEON_PRIO_DEPTH_BUFFER_MSAA : 1429 RADEON_PRIO_DEPTH_BUFFER); 1430 1431 radeon_set_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2); 1432 radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */ 1433 radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */ 1434 radeon_set_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2); 1435 radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */ 1436 radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */ 1437 1438 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1439 radeon_emit(cs, reloc); 1440 1441 radeon_set_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit); 1442 1443 sbu |= SURFACE_BASE_UPDATE_DEPTH; 1444 } else if (rctx->screen->b.info.drm_minor >= 18) { 1445 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. 1446 * Older kernels are out of luck. */ 1447 radeon_set_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID)); 1448 } 1449 1450 /* SURFACE_BASE_UPDATE */ 1451 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { 1452 radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); 1453 radeon_emit(cs, sbu); 1454 sbu = 0; 1455 } 1456 1457 /* Framebuffer dimensions. */ 1458 radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); 1459 radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | 1460 S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ 1461 radeon_emit(cs, S_028244_BR_X(state->width) | 1462 S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ 1463 1464 if (rctx->framebuffer.is_msaa_resolve) { 1465 radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1); 1466 } else { 1467 /* Always enable the first colorbuffer in CB_SHADER_CONTROL. This 1468 * will assure that the alpha-test will work even if there is 1469 * no colorbuffer bound. */ 1470 radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1471 (1ull << MAX2(nr_cbufs, 1)) - 1); 1472 } 1473 1474 r600_emit_msaa_state(rctx, rctx->framebuffer.nr_samples); 1475 } 1476 1477 static void r600_set_min_samples(struct pipe_context *ctx, unsigned min_samples) 1478 { 1479 struct r600_context *rctx = (struct r600_context *)ctx; 1480 1481 if (rctx->ps_iter_samples == min_samples) 1482 return; 1483 1484 rctx->ps_iter_samples = min_samples; 1485 if (rctx->framebuffer.nr_samples > 1) { 1486 r600_mark_atom_dirty(rctx, &rctx->rasterizer_state.atom); 1487 if (rctx->b.chip_class == R600) 1488 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1489 } 1490 } 1491 1492 static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom) 1493 { 1494 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1495 struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom; 1496 1497 if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) { 1498 radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); 1499 if (rctx->b.chip_class == R600) { 1500 radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */ 1501 radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */ 1502 } else { 1503 radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */ 1504 radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */ 1505 } 1506 radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control); 1507 } else { 1508 unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1; 1509 unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1; 1510 unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1; 1511 1512 radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); 1513 radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ 1514 /* Always enable the first color output to make sure alpha-test works even without one. */ 1515 radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */ 1516 radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, 1517 a->cb_color_control | 1518 S_028808_MULTIWRITE_ENABLE(multiwrite)); 1519 } 1520 } 1521 1522 static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom) 1523 { 1524 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1525 struct r600_db_state *a = (struct r600_db_state*)atom; 1526 1527 if (a->rsurf && a->rsurf->db_htile_surface) { 1528 struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture; 1529 unsigned reloc_idx; 1530 1531 radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); 1532 radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); 1533 radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); 1534 reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rtex->htile_buffer, 1535 RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE); 1536 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1537 radeon_emit(cs, reloc_idx); 1538 } else { 1539 radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0); 1540 } 1541 } 1542 1543 static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom) 1544 { 1545 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1546 struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom; 1547 unsigned db_render_control = 0; 1548 unsigned db_render_override = 1549 S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE) | 1550 S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE); 1551 1552 if (rctx->b.chip_class >= R700) { 1553 switch (a->ps_conservative_z) { 1554 default: /* fall through */ 1555 case TGSI_FS_DEPTH_LAYOUT_ANY: 1556 db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_ANY_Z); 1557 break; 1558 case TGSI_FS_DEPTH_LAYOUT_GREATER: 1559 db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_GREATER_THAN_Z); 1560 break; 1561 case TGSI_FS_DEPTH_LAYOUT_LESS: 1562 db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_LESS_THAN_Z); 1563 break; 1564 } 1565 } 1566 1567 if (rctx->b.num_occlusion_queries > 0 && 1568 !a->occlusion_queries_disabled) { 1569 if (rctx->b.chip_class >= R700) { 1570 db_render_control |= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1); 1571 } 1572 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1573 } else { 1574 db_render_control |= S_028D0C_ZPASS_INCREMENT_DISABLE(1); 1575 } 1576 1577 if (rctx->db_state.rsurf && rctx->db_state.rsurf->db_htile_surface) { 1578 /* FORCE_OFF means HiZ/HiS are determined by DB_SHADER_CONTROL */ 1579 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_OFF); 1580 /* This is to fix a lockup when hyperz and alpha test are enabled at 1581 * the same time somehow GPU get confuse on which order to pick for 1582 * z test 1583 */ 1584 if (rctx->alphatest_state.sx_alpha_test_control) { 1585 db_render_override |= S_028D10_FORCE_SHADER_Z_ORDER(1); 1586 } 1587 } else { 1588 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE); 1589 } 1590 if (rctx->b.chip_class == R600 && rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0) { 1591 /* sample shading and hyperz causes lockups on R6xx chips */ 1592 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE); 1593 } 1594 if (a->flush_depthstencil_through_cb) { 1595 assert(a->copy_depth || a->copy_stencil); 1596 1597 db_render_control |= S_028D0C_DEPTH_COPY_ENABLE(a->copy_depth) | 1598 S_028D0C_STENCIL_COPY_ENABLE(a->copy_stencil) | 1599 S_028D0C_COPY_CENTROID(1) | 1600 S_028D0C_COPY_SAMPLE(a->copy_sample); 1601 1602 if (rctx->b.chip_class == R600) 1603 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1604 1605 if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 || 1606 rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635) 1607 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE); 1608 } else if (a->flush_depth_inplace || a->flush_stencil_inplace) { 1609 db_render_control |= S_028D0C_DEPTH_COMPRESS_DISABLE(a->flush_depth_inplace) | 1610 S_028D0C_STENCIL_COMPRESS_DISABLE(a->flush_stencil_inplace); 1611 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1612 } 1613 if (a->htile_clear) { 1614 db_render_control |= S_028D0C_DEPTH_CLEAR_ENABLE(1); 1615 } 1616 1617 /* RV770 workaround for a hang with 8x MSAA. */ 1618 if (rctx->b.family == CHIP_RV770 && a->log_samples == 3) { 1619 db_render_override |= S_028D10_MAX_TILES_IN_DTT(6); 1620 } 1621 1622 radeon_set_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2); 1623 radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */ 1624 radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */ 1625 radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); 1626 } 1627 1628 static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom) 1629 { 1630 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1631 struct r600_config_state *a = (struct r600_config_state*)atom; 1632 1633 radeon_set_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1); 1634 radeon_set_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2); 1635 } 1636 1637 static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom) 1638 { 1639 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1640 uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask; 1641 1642 while (dirty_mask) { 1643 struct pipe_vertex_buffer *vb; 1644 struct r600_resource *rbuffer; 1645 unsigned offset; 1646 unsigned buffer_index = u_bit_scan(&dirty_mask); 1647 1648 vb = &rctx->vertex_buffer_state.vb[buffer_index]; 1649 rbuffer = (struct r600_resource*)vb->buffer; 1650 assert(rbuffer); 1651 1652 offset = vb->buffer_offset; 1653 1654 /* fetch resources start at index 320 (OFFSET_FS) */ 1655 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1656 radeon_emit(cs, (R600_FETCH_CONSTANTS_OFFSET_FS + buffer_index) * 7); 1657 radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ 1658 radeon_emit(cs, rbuffer->b.b.width0 - offset - 1); /* RESOURCEi_WORD1 */ 1659 radeon_emit(cs, /* RESOURCEi_WORD2 */ 1660 S_038008_ENDIAN_SWAP(r600_endian_swap(32)) | 1661 S_038008_STRIDE(vb->stride)); 1662 radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ 1663 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 1664 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 1665 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ 1666 1667 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1668 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1669 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER)); 1670 } 1671 } 1672 1673 static void r600_emit_constant_buffers(struct r600_context *rctx, 1674 struct r600_constbuf_state *state, 1675 unsigned buffer_id_base, 1676 unsigned reg_alu_constbuf_size, 1677 unsigned reg_alu_const_cache) 1678 { 1679 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1680 uint32_t dirty_mask = state->dirty_mask; 1681 1682 while (dirty_mask) { 1683 struct pipe_constant_buffer *cb; 1684 struct r600_resource *rbuffer; 1685 unsigned offset; 1686 unsigned buffer_index = ffs(dirty_mask) - 1; 1687 unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER); 1688 cb = &state->cb[buffer_index]; 1689 rbuffer = (struct r600_resource*)cb->buffer; 1690 assert(rbuffer); 1691 1692 offset = cb->buffer_offset; 1693 1694 if (!gs_ring_buffer) { 1695 radeon_set_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4, 1696 DIV_ROUND_UP(cb->buffer_size, 256)); 1697 radeon_set_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8); 1698 } 1699 1700 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1701 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1702 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER)); 1703 1704 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1705 radeon_emit(cs, (buffer_id_base + buffer_index) * 7); 1706 radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ 1707 radeon_emit(cs, rbuffer->b.b.width0 - offset - 1); /* RESOURCEi_WORD1 */ 1708 radeon_emit(cs, /* RESOURCEi_WORD2 */ 1709 S_038008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) | 1710 S_038008_STRIDE(gs_ring_buffer ? 4 : 16)); 1711 radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ 1712 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 1713 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 1714 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ 1715 1716 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1717 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1718 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER)); 1719 1720 dirty_mask &= ~(1 << buffer_index); 1721 } 1722 state->dirty_mask = 0; 1723 } 1724 1725 static void r600_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1726 { 1727 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX], 1728 R600_FETCH_CONSTANTS_OFFSET_VS, 1729 R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 1730 R_028980_ALU_CONST_CACHE_VS_0); 1731 } 1732 1733 static void r600_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1734 { 1735 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY], 1736 R600_FETCH_CONSTANTS_OFFSET_GS, 1737 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 1738 R_0289C0_ALU_CONST_CACHE_GS_0); 1739 } 1740 1741 static void r600_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1742 { 1743 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT], 1744 R600_FETCH_CONSTANTS_OFFSET_PS, 1745 R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 1746 R_028940_ALU_CONST_CACHE_PS_0); 1747 } 1748 1749 static void r600_emit_sampler_views(struct r600_context *rctx, 1750 struct r600_samplerview_state *state, 1751 unsigned resource_id_base) 1752 { 1753 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1754 uint32_t dirty_mask = state->dirty_mask; 1755 1756 while (dirty_mask) { 1757 struct r600_pipe_sampler_view *rview; 1758 unsigned resource_index = u_bit_scan(&dirty_mask); 1759 unsigned reloc; 1760 1761 rview = state->views[resource_index]; 1762 assert(rview); 1763 1764 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1765 radeon_emit(cs, (resource_id_base + resource_index) * 7); 1766 radeon_emit_array(cs, rview->tex_resource_words, 7); 1767 1768 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource, 1769 RADEON_USAGE_READ, 1770 r600_get_sampler_view_priority(rview->tex_resource)); 1771 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1772 radeon_emit(cs, reloc); 1773 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1774 radeon_emit(cs, reloc); 1775 } 1776 state->dirty_mask = 0; 1777 } 1778 1779 1780 static void r600_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1781 { 1782 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, R600_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS); 1783 } 1784 1785 static void r600_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1786 { 1787 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views, R600_FETCH_CONSTANTS_OFFSET_GS + R600_MAX_CONST_BUFFERS); 1788 } 1789 1790 static void r600_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1791 { 1792 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views, R600_FETCH_CONSTANTS_OFFSET_PS + R600_MAX_CONST_BUFFERS); 1793 } 1794 1795 static void r600_emit_sampler_states(struct r600_context *rctx, 1796 struct r600_textures_info *texinfo, 1797 unsigned resource_id_base, 1798 unsigned border_color_reg) 1799 { 1800 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1801 uint32_t dirty_mask = texinfo->states.dirty_mask; 1802 1803 while (dirty_mask) { 1804 struct r600_pipe_sampler_state *rstate; 1805 struct r600_pipe_sampler_view *rview; 1806 unsigned i = u_bit_scan(&dirty_mask); 1807 1808 rstate = texinfo->states.states[i]; 1809 assert(rstate); 1810 rview = texinfo->views.views[i]; 1811 1812 /* TEX_ARRAY_OVERRIDE must be set for array textures to disable 1813 * filtering between layers. 1814 * Don't update TEX_ARRAY_OVERRIDE if we don't have the sampler view. 1815 */ 1816 if (rview) { 1817 enum pipe_texture_target target = rview->base.texture->target; 1818 if (target == PIPE_TEXTURE_1D_ARRAY || 1819 target == PIPE_TEXTURE_2D_ARRAY) { 1820 rstate->tex_sampler_words[0] |= S_03C000_TEX_ARRAY_OVERRIDE(1); 1821 texinfo->is_array_sampler[i] = true; 1822 } else { 1823 rstate->tex_sampler_words[0] &= C_03C000_TEX_ARRAY_OVERRIDE; 1824 texinfo->is_array_sampler[i] = false; 1825 } 1826 } 1827 1828 radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0)); 1829 radeon_emit(cs, (resource_id_base + i) * 3); 1830 radeon_emit_array(cs, rstate->tex_sampler_words, 3); 1831 1832 if (rstate->border_color_use) { 1833 unsigned offset; 1834 1835 offset = border_color_reg; 1836 offset += i * 16; 1837 radeon_set_config_reg_seq(cs, offset, 4); 1838 radeon_emit_array(cs, rstate->border_color.ui, 4); 1839 } 1840 } 1841 texinfo->states.dirty_mask = 0; 1842 } 1843 1844 static void r600_emit_vs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1845 { 1846 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 18, R_00A600_TD_VS_SAMPLER0_BORDER_RED); 1847 } 1848 1849 static void r600_emit_gs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1850 { 1851 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY], 36, R_00A800_TD_GS_SAMPLER0_BORDER_RED); 1852 } 1853 1854 static void r600_emit_ps_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1855 { 1856 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT], 0, R_00A400_TD_PS_SAMPLER0_BORDER_RED); 1857 } 1858 1859 static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom) 1860 { 1861 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1862 unsigned tmp; 1863 1864 tmp = S_009508_DISABLE_CUBE_ANISO(1) | 1865 S_009508_SYNC_GRADIENT(1) | 1866 S_009508_SYNC_WALKER(1) | 1867 S_009508_SYNC_ALIGNER(1); 1868 if (!rctx->seamless_cube_map.enabled) { 1869 tmp |= S_009508_DISABLE_CUBE_WRAP(1); 1870 } 1871 radeon_set_config_reg(cs, R_009508_TA_CNTL_AUX, tmp); 1872 } 1873 1874 static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a) 1875 { 1876 struct r600_sample_mask *s = (struct r600_sample_mask*)a; 1877 uint8_t mask = s->sample_mask; 1878 1879 radeon_set_context_reg(rctx->b.gfx.cs, R_028C48_PA_SC_AA_MASK, 1880 mask | (mask << 8) | (mask << 16) | (mask << 24)); 1881 } 1882 1883 static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a) 1884 { 1885 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1886 struct r600_cso_state *state = (struct r600_cso_state*)a; 1887 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; 1888 1889 radeon_set_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8); 1890 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1891 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer, 1892 RADEON_USAGE_READ, 1893 RADEON_PRIO_SHADER_BINARY)); 1894 } 1895 1896 static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a) 1897 { 1898 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1899 struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a; 1900 1901 uint32_t v2 = 0, primid = 0; 1902 1903 if (rctx->vs_shader->current->shader.vs_as_gs_a) { 1904 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_A); 1905 primid = 1; 1906 } 1907 1908 if (state->geom_enable) { 1909 uint32_t cut_val; 1910 1911 if (rctx->gs_shader->gs_max_out_vertices <= 128) 1912 cut_val = V_028A40_GS_CUT_128; 1913 else if (rctx->gs_shader->gs_max_out_vertices <= 256) 1914 cut_val = V_028A40_GS_CUT_256; 1915 else if (rctx->gs_shader->gs_max_out_vertices <= 512) 1916 cut_val = V_028A40_GS_CUT_512; 1917 else 1918 cut_val = V_028A40_GS_CUT_1024; 1919 1920 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) | 1921 S_028A40_CUT_MODE(cut_val); 1922 1923 if (rctx->gs_shader->current->shader.gs_prim_id_input) 1924 primid = 1; 1925 } 1926 1927 radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2); 1928 radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); 1929 } 1930 1931 static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a) 1932 { 1933 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1934 struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a; 1935 struct r600_resource *rbuffer; 1936 1937 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1938 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1939 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1940 1941 if (state->enable) { 1942 rbuffer =(struct r600_resource*)state->esgs_ring.buffer; 1943 radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0); 1944 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1945 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1946 RADEON_USAGE_READWRITE, 1947 RADEON_PRIO_SHADER_RINGS)); 1948 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 1949 state->esgs_ring.buffer_size >> 8); 1950 1951 rbuffer =(struct r600_resource*)state->gsvs_ring.buffer; 1952 radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0); 1953 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1954 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1955 RADEON_USAGE_READWRITE, 1956 RADEON_PRIO_SHADER_RINGS)); 1957 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 1958 state->gsvs_ring.buffer_size >> 8); 1959 } else { 1960 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); 1961 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); 1962 } 1963 1964 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1965 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1966 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1967 } 1968 1969 /* Adjust GPR allocation on R6xx/R7xx */ 1970 bool r600_adjust_gprs(struct r600_context *rctx) 1971 { 1972 unsigned num_gprs[R600_NUM_HW_STAGES]; 1973 unsigned new_gprs[R600_NUM_HW_STAGES]; 1974 unsigned cur_gprs[R600_NUM_HW_STAGES]; 1975 unsigned def_gprs[R600_NUM_HW_STAGES]; 1976 unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs; 1977 unsigned max_gprs; 1978 unsigned tmp, tmp2; 1979 unsigned i; 1980 bool need_recalc = false, use_default = true; 1981 1982 /* hardware will reserve twice num_clause_temp_gprs */ 1983 max_gprs = def_num_clause_temp_gprs * 2; 1984 for (i = 0; i < R600_NUM_HW_STAGES; i++) { 1985 def_gprs[i] = rctx->default_gprs[i]; 1986 max_gprs += def_gprs[i]; 1987 } 1988 1989 cur_gprs[R600_HW_STAGE_PS] = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 1990 cur_gprs[R600_HW_STAGE_VS] = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 1991 cur_gprs[R600_HW_STAGE_GS] = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 1992 cur_gprs[R600_HW_STAGE_ES] = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 1993 1994 num_gprs[R600_HW_STAGE_PS] = rctx->ps_shader->current->shader.bc.ngpr; 1995 if (rctx->gs_shader) { 1996 num_gprs[R600_HW_STAGE_ES] = rctx->vs_shader->current->shader.bc.ngpr; 1997 num_gprs[R600_HW_STAGE_GS] = rctx->gs_shader->current->shader.bc.ngpr; 1998 num_gprs[R600_HW_STAGE_VS] = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr; 1999 } else { 2000 num_gprs[R600_HW_STAGE_ES] = 0; 2001 num_gprs[R600_HW_STAGE_GS] = 0; 2002 num_gprs[R600_HW_STAGE_VS] = rctx->vs_shader->current->shader.bc.ngpr; 2003 } 2004 2005 for (i = 0; i < R600_NUM_HW_STAGES; i++) { 2006 new_gprs[i] = num_gprs[i]; 2007 if (new_gprs[i] > cur_gprs[i]) 2008 need_recalc = true; 2009 if (new_gprs[i] > def_gprs[i]) 2010 use_default = false; 2011 } 2012 2013 /* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */ 2014 if (!need_recalc) 2015 return true; 2016 2017 /* try to use switch back to default */ 2018 if (!use_default) { 2019 /* always privilege vs stage so that at worst we have the 2020 * pixel stage producing wrong output (not the vertex 2021 * stage) */ 2022 new_gprs[R600_HW_STAGE_PS] = max_gprs - def_num_clause_temp_gprs * 2; 2023 for (i = R600_HW_STAGE_VS; i < R600_NUM_HW_STAGES; i++) 2024 new_gprs[R600_HW_STAGE_PS] -= new_gprs[i]; 2025 } else { 2026 for (i = 0; i < R600_NUM_HW_STAGES; i++) 2027 new_gprs[i] = def_gprs[i]; 2028 } 2029 2030 /* SQ_PGM_RESOURCES_*.NUM_GPRS must always be program to a value <= 2031 * SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS otherwise the GPU will lockup 2032 * Also if a shader use more gpr than SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS 2033 * it will lockup. So in this case just discard the draw command 2034 * and don't change the current gprs repartitions. 2035 */ 2036 for (i = 0; i < R600_NUM_HW_STAGES; i++) { 2037 if (num_gprs[i] > new_gprs[i]) { 2038 R600_ERR("shaders require too many register (%d + %d + %d + %d) " 2039 "for a combined maximum of %d\n", 2040 num_gprs[R600_HW_STAGE_PS], num_gprs[R600_HW_STAGE_VS], num_gprs[R600_HW_STAGE_ES], num_gprs[R600_HW_STAGE_GS], max_gprs); 2041 return false; 2042 } 2043 } 2044 2045 /* in some case we endup recomputing the current value */ 2046 tmp = S_008C04_NUM_PS_GPRS(new_gprs[R600_HW_STAGE_PS]) | 2047 S_008C04_NUM_VS_GPRS(new_gprs[R600_HW_STAGE_VS]) | 2048 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs); 2049 2050 tmp2 = S_008C08_NUM_ES_GPRS(new_gprs[R600_HW_STAGE_ES]) | 2051 S_008C08_NUM_GS_GPRS(new_gprs[R600_HW_STAGE_GS]); 2052 if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp || rctx->config_state.sq_gpr_resource_mgmt_2 != tmp2) { 2053 rctx->config_state.sq_gpr_resource_mgmt_1 = tmp; 2054 rctx->config_state.sq_gpr_resource_mgmt_2 = tmp2; 2055 r600_mark_atom_dirty(rctx, &rctx->config_state.atom); 2056 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 2057 } 2058 return true; 2059 } 2060 2061 void r600_init_atom_start_cs(struct r600_context *rctx) 2062 { 2063 int ps_prio; 2064 int vs_prio; 2065 int gs_prio; 2066 int es_prio; 2067 int num_ps_gprs; 2068 int num_vs_gprs; 2069 int num_gs_gprs; 2070 int num_es_gprs; 2071 int num_temp_gprs; 2072 int num_ps_threads; 2073 int num_vs_threads; 2074 int num_gs_threads; 2075 int num_es_threads; 2076 int num_ps_stack_entries; 2077 int num_vs_stack_entries; 2078 int num_gs_stack_entries; 2079 int num_es_stack_entries; 2080 enum radeon_family family; 2081 struct r600_command_buffer *cb = &rctx->start_cs_cmd; 2082 uint32_t tmp, i; 2083 2084 r600_init_command_buffer(cb, 256); 2085 2086 /* R6xx requires this packet at the start of each command buffer */ 2087 if (rctx->b.chip_class == R600) { 2088 r600_store_value(cb, PKT3(PKT3_START_3D_CMDBUF, 0, 0)); 2089 r600_store_value(cb, 0); 2090 } 2091 /* All asics require this one */ 2092 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); 2093 r600_store_value(cb, 0x80000000); 2094 r600_store_value(cb, 0x80000000); 2095 2096 /* We're setting config registers here. */ 2097 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2098 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 2099 2100 /* This enables pipeline stat & streamout queries. 2101 * They are only disabled by blits. 2102 */ 2103 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2104 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0)); 2105 2106 family = rctx->b.family; 2107 ps_prio = 0; 2108 vs_prio = 1; 2109 gs_prio = 2; 2110 es_prio = 3; 2111 switch (family) { 2112 case CHIP_R600: 2113 num_ps_gprs = 192; 2114 num_vs_gprs = 56; 2115 num_temp_gprs = 4; 2116 num_gs_gprs = 0; 2117 num_es_gprs = 0; 2118 num_ps_threads = 136; 2119 num_vs_threads = 48; 2120 num_gs_threads = 4; 2121 num_es_threads = 4; 2122 num_ps_stack_entries = 128; 2123 num_vs_stack_entries = 128; 2124 num_gs_stack_entries = 0; 2125 num_es_stack_entries = 0; 2126 break; 2127 case CHIP_RV630: 2128 case CHIP_RV635: 2129 num_ps_gprs = 84; 2130 num_vs_gprs = 36; 2131 num_temp_gprs = 4; 2132 num_gs_gprs = 0; 2133 num_es_gprs = 0; 2134 num_ps_threads = 144; 2135 num_vs_threads = 40; 2136 num_gs_threads = 4; 2137 num_es_threads = 4; 2138 num_ps_stack_entries = 40; 2139 num_vs_stack_entries = 40; 2140 num_gs_stack_entries = 32; 2141 num_es_stack_entries = 16; 2142 break; 2143 case CHIP_RV610: 2144 case CHIP_RV620: 2145 case CHIP_RS780: 2146 case CHIP_RS880: 2147 default: 2148 num_ps_gprs = 84; 2149 num_vs_gprs = 36; 2150 num_temp_gprs = 4; 2151 num_gs_gprs = 0; 2152 num_es_gprs = 0; 2153 /* use limits 40 VS and at least 16 ES/GS */ 2154 num_ps_threads = 120; 2155 num_vs_threads = 40; 2156 num_gs_threads = 16; 2157 num_es_threads = 16; 2158 num_ps_stack_entries = 40; 2159 num_vs_stack_entries = 40; 2160 num_gs_stack_entries = 32; 2161 num_es_stack_entries = 16; 2162 break; 2163 case CHIP_RV670: 2164 num_ps_gprs = 144; 2165 num_vs_gprs = 40; 2166 num_temp_gprs = 4; 2167 num_gs_gprs = 0; 2168 num_es_gprs = 0; 2169 num_ps_threads = 136; 2170 num_vs_threads = 48; 2171 num_gs_threads = 4; 2172 num_es_threads = 4; 2173 num_ps_stack_entries = 40; 2174 num_vs_stack_entries = 40; 2175 num_gs_stack_entries = 32; 2176 num_es_stack_entries = 16; 2177 break; 2178 case CHIP_RV770: 2179 num_ps_gprs = 130; 2180 num_vs_gprs = 56; 2181 num_temp_gprs = 4; 2182 num_gs_gprs = 31; 2183 num_es_gprs = 31; 2184 num_ps_threads = 180; 2185 num_vs_threads = 60; 2186 num_gs_threads = 4; 2187 num_es_threads = 4; 2188 num_ps_stack_entries = 128; 2189 num_vs_stack_entries = 128; 2190 num_gs_stack_entries = 128; 2191 num_es_stack_entries = 128; 2192 break; 2193 case CHIP_RV730: 2194 case CHIP_RV740: 2195 num_ps_gprs = 84; 2196 num_vs_gprs = 36; 2197 num_temp_gprs = 4; 2198 num_gs_gprs = 0; 2199 num_es_gprs = 0; 2200 num_ps_threads = 180; 2201 num_vs_threads = 60; 2202 num_gs_threads = 4; 2203 num_es_threads = 4; 2204 num_ps_stack_entries = 128; 2205 num_vs_stack_entries = 128; 2206 num_gs_stack_entries = 0; 2207 num_es_stack_entries = 0; 2208 break; 2209 case CHIP_RV710: 2210 num_ps_gprs = 192; 2211 num_vs_gprs = 56; 2212 num_temp_gprs = 4; 2213 num_gs_gprs = 0; 2214 num_es_gprs = 0; 2215 num_ps_threads = 136; 2216 num_vs_threads = 48; 2217 num_gs_threads = 4; 2218 num_es_threads = 4; 2219 num_ps_stack_entries = 128; 2220 num_vs_stack_entries = 128; 2221 num_gs_stack_entries = 0; 2222 num_es_stack_entries = 0; 2223 break; 2224 } 2225 2226 rctx->default_gprs[R600_HW_STAGE_PS] = num_ps_gprs; 2227 rctx->default_gprs[R600_HW_STAGE_VS] = num_vs_gprs; 2228 rctx->default_gprs[R600_HW_STAGE_GS] = 0; 2229 rctx->default_gprs[R600_HW_STAGE_ES] = 0; 2230 2231 rctx->r6xx_num_clause_temp_gprs = num_temp_gprs; 2232 2233 /* SQ_CONFIG */ 2234 tmp = 0; 2235 switch (family) { 2236 case CHIP_RV610: 2237 case CHIP_RV620: 2238 case CHIP_RS780: 2239 case CHIP_RS880: 2240 case CHIP_RV710: 2241 break; 2242 default: 2243 tmp |= S_008C00_VC_ENABLE(1); 2244 break; 2245 } 2246 tmp |= S_008C00_DX9_CONSTS(0); 2247 tmp |= S_008C00_ALU_INST_PREFER_VECTOR(1); 2248 tmp |= S_008C00_PS_PRIO(ps_prio); 2249 tmp |= S_008C00_VS_PRIO(vs_prio); 2250 tmp |= S_008C00_GS_PRIO(gs_prio); 2251 tmp |= S_008C00_ES_PRIO(es_prio); 2252 r600_store_config_reg(cb, R_008C00_SQ_CONFIG, tmp); 2253 2254 /* SQ_GPR_RESOURCE_MGMT_2 */ 2255 tmp = S_008C08_NUM_GS_GPRS(num_gs_gprs); 2256 tmp |= S_008C08_NUM_ES_GPRS(num_es_gprs); 2257 r600_store_config_reg_seq(cb, R_008C08_SQ_GPR_RESOURCE_MGMT_2, 4); 2258 r600_store_value(cb, tmp); 2259 2260 /* SQ_THREAD_RESOURCE_MGMT */ 2261 tmp = S_008C0C_NUM_PS_THREADS(num_ps_threads); 2262 tmp |= S_008C0C_NUM_VS_THREADS(num_vs_threads); 2263 tmp |= S_008C0C_NUM_GS_THREADS(num_gs_threads); 2264 tmp |= S_008C0C_NUM_ES_THREADS(num_es_threads); 2265 r600_store_value(cb, tmp); /* R_008C0C_SQ_THREAD_RESOURCE_MGMT */ 2266 2267 /* SQ_STACK_RESOURCE_MGMT_1 */ 2268 tmp = S_008C10_NUM_PS_STACK_ENTRIES(num_ps_stack_entries); 2269 tmp |= S_008C10_NUM_VS_STACK_ENTRIES(num_vs_stack_entries); 2270 r600_store_value(cb, tmp); /* R_008C10_SQ_STACK_RESOURCE_MGMT_1 */ 2271 2272 /* SQ_STACK_RESOURCE_MGMT_2 */ 2273 tmp = S_008C14_NUM_GS_STACK_ENTRIES(num_gs_stack_entries); 2274 tmp |= S_008C14_NUM_ES_STACK_ENTRIES(num_es_stack_entries); 2275 r600_store_value(cb, tmp); /* R_008C14_SQ_STACK_RESOURCE_MGMT_2 */ 2276 2277 r600_store_config_reg(cb, R_009714_VC_ENHANCE, 0); 2278 2279 if (rctx->b.chip_class >= R700) { 2280 r600_store_context_reg(cb, R_028A50_VGT_ENHANCE, 4); 2281 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00004000); 2282 r600_store_config_reg(cb, R_009830_DB_DEBUG, 0); 2283 r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x00420204); 2284 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0); 2285 } else { 2286 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); 2287 r600_store_config_reg(cb, R_009830_DB_DEBUG, 0x82000000); 2288 r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x01020204); 2289 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 1); 2290 } 2291 r600_store_context_reg_seq(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 9); 2292 r600_store_value(cb, 0); /* R_0288A8_SQ_ESGS_RING_ITEMSIZE */ 2293 r600_store_value(cb, 0); /* R_0288AC_SQ_GSVS_RING_ITEMSIZE */ 2294 r600_store_value(cb, 0); /* R_0288B0_SQ_ESTMP_RING_ITEMSIZE */ 2295 r600_store_value(cb, 0); /* R_0288B4_SQ_GSTMP_RING_ITEMSIZE */ 2296 r600_store_value(cb, 0); /* R_0288B8_SQ_VSTMP_RING_ITEMSIZE */ 2297 r600_store_value(cb, 0); /* R_0288BC_SQ_PSTMP_RING_ITEMSIZE */ 2298 r600_store_value(cb, 0); /* R_0288C0_SQ_FBUF_RING_ITEMSIZE */ 2299 r600_store_value(cb, 0); /* R_0288C4_SQ_REDUC_RING_ITEMSIZE */ 2300 r600_store_value(cb, 0); /* R_0288C8_SQ_GS_VERT_ITEMSIZE */ 2301 2302 /* to avoid GPU doing any preloading of constant from random address */ 2303 r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16); 2304 for (i = 0; i < 16; i++) 2305 r600_store_value(cb, 0); 2306 2307 r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16); 2308 for (i = 0; i < 16; i++) 2309 r600_store_value(cb, 0); 2310 2311 r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16); 2312 for (i = 0; i < 16; i++) 2313 r600_store_value(cb, 0); 2314 2315 r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13); 2316 r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */ 2317 r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */ 2318 r600_store_value(cb, 0); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */ 2319 r600_store_value(cb, 0); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */ 2320 r600_store_value(cb, 0); /* R_028A20_VGT_HOS_REUSE_DEPTH */ 2321 r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */ 2322 r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */ 2323 r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */ 2324 r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */ 2325 r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */ 2326 r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */ 2327 r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */ 2328 r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE, 0); */ 2329 2330 r600_store_context_reg(cb, R_028A84_VGT_PRIMITIVEID_EN, 0); 2331 r600_store_context_reg(cb, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 0); 2332 r600_store_context_reg(cb, R_028AA4_VGT_INSTANCE_STEP_RATE_1, 0); 2333 2334 r600_store_context_reg_seq(cb, R_028AB4_VGT_REUSE_OFF, 2); 2335 r600_store_value(cb, 1); /* R_028AB4_VGT_REUSE_OFF */ 2336 r600_store_value(cb, 0); /* R_028AB8_VGT_VTX_CNT_EN */ 2337 2338 r600_store_context_reg(cb, R_028B20_VGT_STRMOUT_BUFFER_EN, 0); 2339 2340 r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); 2341 2342 r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0); 2343 2344 r600_store_context_reg_seq(cb, R_0286DC_SPI_FOG_CNTL, 3); 2345 r600_store_value(cb, 0); /* R_0286DC_SPI_FOG_CNTL */ 2346 r600_store_value(cb, 0); /* R_0286E0_SPI_FOG_FUNC_SCALE */ 2347 r600_store_value(cb, 0); /* R_0286E4_SPI_FOG_FUNC_BIAS */ 2348 2349 r600_store_context_reg_seq(cb, R_028D28_DB_SRESULTS_COMPARE_STATE0, 3); 2350 r600_store_value(cb, 0); /* R_028D28_DB_SRESULTS_COMPARE_STATE0 */ 2351 r600_store_value(cb, 0); /* R_028D2C_DB_SRESULTS_COMPARE_STATE1 */ 2352 r600_store_value(cb, 0); /* R_028D30_DB_PRELOAD_CONTROL */ 2353 2354 r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0); 2355 r600_store_context_reg(cb, R_028A48_PA_SC_MPASS_PS_CNTL, 0); 2356 2357 r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0); 2358 r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); 2359 2360 if (rctx->b.chip_class >= R700) { 2361 r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); 2362 } 2363 2364 r600_store_context_reg_seq(cb, R_028C30_CB_CLRCMP_CONTROL, 4); 2365 r600_store_value(cb, 0x1000000); /* R_028C30_CB_CLRCMP_CONTROL */ 2366 r600_store_value(cb, 0); /* R_028C34_CB_CLRCMP_SRC */ 2367 r600_store_value(cb, 0xFF); /* R_028C38_CB_CLRCMP_DST */ 2368 r600_store_value(cb, 0xFFFFFFFF); /* R_028C3C_CB_CLRCMP_MSK */ 2369 2370 r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2); 2371 r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */ 2372 r600_store_value(cb, S_028034_BR_X(8192) | S_028034_BR_Y(8192)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */ 2373 2374 r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2); 2375 r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */ 2376 r600_store_value(cb, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */ 2377 2378 r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 5); 2379 r600_store_value(cb, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */ 2380 r600_store_value(cb, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */ 2381 r600_store_value(cb, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */ 2382 r600_store_value(cb, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */ 2383 r600_store_value(cb, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */ 2384 2385 r600_store_context_reg(cb, R_0288E0_SQ_VTX_SEMANTIC_CLEAR, ~0); 2386 2387 r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2); 2388 r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */ 2389 r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */ 2390 2391 r600_store_context_reg(cb, R_0288A4_SQ_PGM_RESOURCES_FS, 0); 2392 2393 if (rctx->b.chip_class == R700) 2394 r600_store_context_reg(cb, R_028350_SX_MISC, 0); 2395 if (rctx->b.chip_class == R700 && rctx->screen->b.has_streamout) 2396 r600_store_context_reg(cb, R_028354_SX_SURFACE_SYNC, S_028354_SURFACE_SYNC_MASK(0xf)); 2397 2398 r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0); 2399 if (rctx->screen->b.has_streamout) { 2400 r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); 2401 } 2402 2403 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0, 0x1000FFF); 2404 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (32 * 4), 0x1000FFF); 2405 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (64 * 4), 0x1000FFF); 2406 } 2407 2408 void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2409 { 2410 struct r600_context *rctx = (struct r600_context *)ctx; 2411 struct r600_command_buffer *cb = &shader->command_buffer; 2412 struct r600_shader *rshader = &shader->shader; 2413 unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control; 2414 int pos_index = -1, face_index = -1, fixed_pt_position_index = -1; 2415 unsigned tmp, sid, ufi = 0; 2416 int need_linear = 0; 2417 unsigned z_export = 0, stencil_export = 0, mask_export = 0; 2418 unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0; 2419 2420 if (!cb->buf) { 2421 r600_init_command_buffer(cb, 64); 2422 } else { 2423 cb->num_dw = 0; 2424 } 2425 2426 r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, rshader->ninput); 2427 for (i = 0; i < rshader->ninput; i++) { 2428 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION) 2429 pos_index = i; 2430 if (rshader->input[i].name == TGSI_SEMANTIC_FACE && face_index == -1) 2431 face_index = i; 2432 if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEID) 2433 fixed_pt_position_index = i; 2434 2435 sid = rshader->input[i].spi_sid; 2436 2437 tmp = S_028644_SEMANTIC(sid); 2438 2439 /* D3D 9 behaviour. GL is undefined */ 2440 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR && rshader->input[i].sid == 0) 2441 tmp |= S_028644_DEFAULT_VAL(3); 2442 2443 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION || 2444 rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT || 2445 (rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR && 2446 rctx->rasterizer && rctx->rasterizer->flatshade)) 2447 tmp |= S_028644_FLAT_SHADE(1); 2448 2449 if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && 2450 sprite_coord_enable & (1 << rshader->input[i].sid)) { 2451 tmp |= S_028644_PT_SPRITE_TEX(1); 2452 } 2453 2454 if (rshader->input[i].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) 2455 tmp |= S_028644_SEL_CENTROID(1); 2456 2457 if (rshader->input[i].interpolate_location == TGSI_INTERPOLATE_LOC_SAMPLE) 2458 tmp |= S_028644_SEL_SAMPLE(1); 2459 2460 if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR) { 2461 need_linear = 1; 2462 tmp |= S_028644_SEL_LINEAR(1); 2463 } 2464 2465 r600_store_value(cb, tmp); 2466 } 2467 2468 db_shader_control = 0; 2469 for (i = 0; i < rshader->noutput; i++) { 2470 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION) 2471 z_export = 1; 2472 if (rshader->output[i].name == TGSI_SEMANTIC_STENCIL) 2473 stencil_export = 1; 2474 if (rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK && 2475 rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0) 2476 mask_export = 1; 2477 } 2478 db_shader_control |= S_02880C_Z_EXPORT_ENABLE(z_export); 2479 db_shader_control |= S_02880C_STENCIL_REF_EXPORT_ENABLE(stencil_export); 2480 db_shader_control |= S_02880C_MASK_EXPORT_ENABLE(mask_export); 2481 if (rshader->uses_kill) 2482 db_shader_control |= S_02880C_KILL_ENABLE(1); 2483 2484 exports_ps = 0; 2485 for (i = 0; i < rshader->noutput; i++) { 2486 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION || 2487 rshader->output[i].name == TGSI_SEMANTIC_STENCIL || 2488 rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) { 2489 exports_ps |= 1; 2490 } 2491 } 2492 num_cout = rshader->nr_ps_color_exports; 2493 exports_ps |= S_028854_EXPORT_COLORS(num_cout); 2494 if (!exports_ps) { 2495 /* always at least export 1 component per pixel */ 2496 exports_ps = 2; 2497 } 2498 2499 shader->nr_ps_color_outputs = num_cout; 2500 2501 spi_ps_in_control_0 = S_0286CC_NUM_INTERP(rshader->ninput) | 2502 S_0286CC_PERSP_GRADIENT_ENA(1)| 2503 S_0286CC_LINEAR_GRADIENT_ENA(need_linear); 2504 spi_input_z = 0; 2505 if (pos_index != -1) { 2506 spi_ps_in_control_0 |= (S_0286CC_POSITION_ENA(1) | 2507 S_0286CC_POSITION_CENTROID(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) | 2508 S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr) | 2509 S_0286CC_BARYC_SAMPLE_CNTL(1)) | 2510 S_0286CC_POSITION_SAMPLE(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_SAMPLE); 2511 spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1); 2512 } 2513 2514 spi_ps_in_control_1 = 0; 2515 if (face_index != -1) { 2516 spi_ps_in_control_1 |= S_0286D0_FRONT_FACE_ENA(1) | 2517 S_0286D0_FRONT_FACE_ADDR(rshader->input[face_index].gpr); 2518 } 2519 if (fixed_pt_position_index != -1) { 2520 spi_ps_in_control_1 |= S_0286D0_FIXED_PT_POSITION_ENA(1) | 2521 S_0286D0_FIXED_PT_POSITION_ADDR(rshader->input[fixed_pt_position_index].gpr); 2522 } 2523 2524 /* HW bug in original R600 */ 2525 if (rctx->b.family == CHIP_R600) 2526 ufi = 1; 2527 2528 r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2); 2529 r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */ 2530 r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */ 2531 2532 r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z); 2533 2534 r600_store_context_reg_seq(cb, R_028850_SQ_PGM_RESOURCES_PS, 2); 2535 r600_store_value(cb, /* R_028850_SQ_PGM_RESOURCES_PS*/ 2536 S_028850_NUM_GPRS(rshader->bc.ngpr) | 2537 S_028850_STACK_SIZE(rshader->bc.nstack) | 2538 S_028850_UNCACHED_FIRST_INST(ufi)); 2539 r600_store_value(cb, exports_ps); /* R_028854_SQ_PGM_EXPORTS_PS */ 2540 2541 r600_store_context_reg(cb, R_028840_SQ_PGM_START_PS, 0); 2542 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2543 2544 /* only set some bits here, the other bits are set in the dsa state */ 2545 shader->db_shader_control = db_shader_control; 2546 shader->ps_depth_export = z_export | stencil_export | mask_export; 2547 2548 shader->sprite_coord_enable = sprite_coord_enable; 2549 if (rctx->rasterizer) 2550 shader->flatshade = rctx->rasterizer->flatshade; 2551 } 2552 2553 void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2554 { 2555 struct r600_command_buffer *cb = &shader->command_buffer; 2556 struct r600_shader *rshader = &shader->shader; 2557 unsigned spi_vs_out_id[10] = {}; 2558 unsigned i, tmp, nparams = 0; 2559 2560 for (i = 0; i < rshader->noutput; i++) { 2561 if (rshader->output[i].spi_sid) { 2562 tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8); 2563 spi_vs_out_id[nparams / 4] |= tmp; 2564 nparams++; 2565 } 2566 } 2567 2568 r600_init_command_buffer(cb, 32); 2569 2570 r600_store_context_reg_seq(cb, R_028614_SPI_VS_OUT_ID_0, 10); 2571 for (i = 0; i < 10; i++) { 2572 r600_store_value(cb, spi_vs_out_id[i]); 2573 } 2574 2575 /* Certain attributes (position, psize, etc.) don't count as params. 2576 * VS is required to export at least one param and r600_shader_from_tgsi() 2577 * takes care of adding a dummy export. 2578 */ 2579 if (nparams < 1) 2580 nparams = 1; 2581 2582 r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG, 2583 S_0286C4_VS_EXPORT_COUNT(nparams - 1)); 2584 r600_store_context_reg(cb, R_028868_SQ_PGM_RESOURCES_VS, 2585 S_028868_NUM_GPRS(rshader->bc.ngpr) | 2586 S_028868_STACK_SIZE(rshader->bc.nstack)); 2587 if (rshader->vs_position_window_space) { 2588 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 2589 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1)); 2590 } else { 2591 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 2592 S_028818_VTX_W0_FMT(1) | 2593 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 2594 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 2595 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 2596 2597 } 2598 r600_store_context_reg(cb, R_028858_SQ_PGM_START_VS, 0); 2599 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2600 2601 shader->pa_cl_vs_out_cntl = 2602 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->clip_dist_write & 0x0F) != 0) | 2603 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader->clip_dist_write & 0xF0) != 0) | 2604 S_02881C_VS_OUT_MISC_VEC_ENA(rshader->vs_out_misc_write) | 2605 S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size) | 2606 S_02881C_USE_VTX_EDGE_FLAG(rshader->vs_out_edgeflag) | 2607 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader->vs_out_layer) | 2608 S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport); 2609 } 2610 2611 #define RV610_GSVS_ALIGN 32 2612 #define R600_GSVS_ALIGN 16 2613 2614 void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2615 { 2616 struct r600_context *rctx = (struct r600_context *)ctx; 2617 struct r600_command_buffer *cb = &shader->command_buffer; 2618 struct r600_shader *rshader = &shader->shader; 2619 struct r600_shader *cp_shader = &shader->gs_copy_shader->shader; 2620 unsigned gsvs_itemsize = 2621 (cp_shader->ring_item_sizes[0] * shader->selector->gs_max_out_vertices) >> 2; 2622 2623 /* some r600s needs gsvs itemsize aligned to cacheline size 2624 this was fixed in rs780 and above. */ 2625 switch (rctx->b.family) { 2626 case CHIP_RV610: 2627 gsvs_itemsize = align(gsvs_itemsize, RV610_GSVS_ALIGN); 2628 break; 2629 case CHIP_R600: 2630 case CHIP_RV630: 2631 case CHIP_RV670: 2632 case CHIP_RV620: 2633 case CHIP_RV635: 2634 gsvs_itemsize = align(gsvs_itemsize, R600_GSVS_ALIGN); 2635 break; 2636 default: 2637 break; 2638 } 2639 2640 r600_init_command_buffer(cb, 64); 2641 2642 /* VGT_GS_MODE is written by r600_emit_shader_stages */ 2643 r600_store_context_reg(cb, R_028AB8_VGT_VTX_CNT_EN, 1); 2644 2645 if (rctx->b.chip_class >= R700) { 2646 r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT, 2647 S_028B38_MAX_VERT_OUT(shader->selector->gs_max_out_vertices)); 2648 } 2649 r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE, 2650 r600_conv_prim_to_gs_out(shader->selector->gs_output_prim)); 2651 2652 r600_store_context_reg(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE, 2653 cp_shader->ring_item_sizes[0] >> 2); 2654 2655 r600_store_context_reg(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 2656 (rshader->ring_item_sizes[0]) >> 2); 2657 2658 r600_store_context_reg(cb, R_0288AC_SQ_GSVS_RING_ITEMSIZE, 2659 gsvs_itemsize); 2660 2661 /* FIXME calculate these values somehow ??? */ 2662 r600_store_config_reg_seq(cb, R_0088C8_VGT_GS_PER_ES, 2); 2663 r600_store_value(cb, 0x80); /* GS_PER_ES */ 2664 r600_store_value(cb, 0x100); /* ES_PER_GS */ 2665 r600_store_config_reg_seq(cb, R_0088E8_VGT_GS_PER_VS, 1); 2666 r600_store_value(cb, 0x2); /* GS_PER_VS */ 2667 2668 r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_GS, 2669 S_02887C_NUM_GPRS(rshader->bc.ngpr) | 2670 S_02887C_STACK_SIZE(rshader->bc.nstack)); 2671 r600_store_context_reg(cb, R_02886C_SQ_PGM_START_GS, 0); 2672 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2673 } 2674 2675 void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2676 { 2677 struct r600_command_buffer *cb = &shader->command_buffer; 2678 struct r600_shader *rshader = &shader->shader; 2679 2680 r600_init_command_buffer(cb, 32); 2681 2682 r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES, 2683 S_028890_NUM_GPRS(rshader->bc.ngpr) | 2684 S_028890_STACK_SIZE(rshader->bc.nstack)); 2685 r600_store_context_reg(cb, R_028880_SQ_PGM_START_ES, 0); 2686 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2687 } 2688 2689 2690 void *r600_create_resolve_blend(struct r600_context *rctx) 2691 { 2692 struct pipe_blend_state blend; 2693 unsigned i; 2694 2695 memset(&blend, 0, sizeof(blend)); 2696 blend.independent_blend_enable = true; 2697 for (i = 0; i < 2; i++) { 2698 blend.rt[i].colormask = 0xf; 2699 blend.rt[i].blend_enable = 1; 2700 blend.rt[i].rgb_func = PIPE_BLEND_ADD; 2701 blend.rt[i].alpha_func = PIPE_BLEND_ADD; 2702 blend.rt[i].rgb_src_factor = PIPE_BLENDFACTOR_ZERO; 2703 blend.rt[i].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO; 2704 blend.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO; 2705 blend.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO; 2706 } 2707 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); 2708 } 2709 2710 void *r700_create_resolve_blend(struct r600_context *rctx) 2711 { 2712 struct pipe_blend_state blend; 2713 2714 memset(&blend, 0, sizeof(blend)); 2715 blend.independent_blend_enable = true; 2716 blend.rt[0].colormask = 0xf; 2717 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); 2718 } 2719 2720 void *r600_create_decompress_blend(struct r600_context *rctx) 2721 { 2722 struct pipe_blend_state blend; 2723 2724 memset(&blend, 0, sizeof(blend)); 2725 blend.independent_blend_enable = true; 2726 blend.rt[0].colormask = 0xf; 2727 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_EXPAND_SAMPLES); 2728 } 2729 2730 void *r600_create_db_flush_dsa(struct r600_context *rctx) 2731 { 2732 struct pipe_depth_stencil_alpha_state dsa; 2733 boolean quirk = false; 2734 2735 if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 || 2736 rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635) 2737 quirk = true; 2738 2739 memset(&dsa, 0, sizeof(dsa)); 2740 2741 if (quirk) { 2742 dsa.depth.enabled = 1; 2743 dsa.depth.func = PIPE_FUNC_LEQUAL; 2744 dsa.stencil[0].enabled = 1; 2745 dsa.stencil[0].func = PIPE_FUNC_ALWAYS; 2746 dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_KEEP; 2747 dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_INCR; 2748 dsa.stencil[0].writemask = 0xff; 2749 } 2750 2751 return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa); 2752 } 2753 2754 void r600_update_db_shader_control(struct r600_context * rctx) 2755 { 2756 bool dual_export; 2757 unsigned db_shader_control; 2758 uint8_t ps_conservative_z; 2759 2760 if (!rctx->ps_shader) { 2761 return; 2762 } 2763 2764 dual_export = rctx->framebuffer.export_16bpc && 2765 !rctx->ps_shader->current->ps_depth_export; 2766 2767 db_shader_control = rctx->ps_shader->current->db_shader_control | 2768 S_02880C_DUAL_EXPORT_ENABLE(dual_export); 2769 2770 ps_conservative_z = rctx->ps_shader->current->shader.ps_conservative_z; 2771 2772 /* When alpha test is enabled we can't trust the hw to make the proper 2773 * decision on the order in which ztest should be run related to fragment 2774 * shader execution. 2775 * 2776 * If alpha test is enabled perform z test after fragment. RE_Z (early 2777 * z test but no write to the zbuffer) seems to cause lockup on r6xx/r7xx 2778 */ 2779 if (rctx->alphatest_state.sx_alpha_test_control) { 2780 db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z); 2781 } else { 2782 db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z); 2783 } 2784 2785 if (db_shader_control != rctx->db_misc_state.db_shader_control || 2786 ps_conservative_z != rctx->db_misc_state.ps_conservative_z) { 2787 rctx->db_misc_state.db_shader_control = db_shader_control; 2788 rctx->db_misc_state.ps_conservative_z = ps_conservative_z; 2789 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 2790 } 2791 } 2792 2793 static inline unsigned r600_array_mode(unsigned mode) 2794 { 2795 switch (mode) { 2796 default: 2797 case RADEON_SURF_MODE_LINEAR_ALIGNED: return V_0280A0_ARRAY_LINEAR_ALIGNED; 2798 break; 2799 case RADEON_SURF_MODE_1D: return V_0280A0_ARRAY_1D_TILED_THIN1; 2800 break; 2801 case RADEON_SURF_MODE_2D: return V_0280A0_ARRAY_2D_TILED_THIN1; 2802 } 2803 } 2804 2805 static boolean r600_dma_copy_tile(struct r600_context *rctx, 2806 struct pipe_resource *dst, 2807 unsigned dst_level, 2808 unsigned dst_x, 2809 unsigned dst_y, 2810 unsigned dst_z, 2811 struct pipe_resource *src, 2812 unsigned src_level, 2813 unsigned src_x, 2814 unsigned src_y, 2815 unsigned src_z, 2816 unsigned copy_height, 2817 unsigned pitch, 2818 unsigned bpp) 2819 { 2820 struct radeon_winsys_cs *cs = rctx->b.dma.cs; 2821 struct r600_texture *rsrc = (struct r600_texture*)src; 2822 struct r600_texture *rdst = (struct r600_texture*)dst; 2823 unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size; 2824 unsigned ncopy, height, cheight, detile, i, x, y, z, src_mode, dst_mode; 2825 uint64_t base, addr; 2826 2827 dst_mode = rdst->surface.level[dst_level].mode; 2828 src_mode = rsrc->surface.level[src_level].mode; 2829 assert(dst_mode != src_mode); 2830 2831 y = 0; 2832 lbpp = util_logbase2(bpp); 2833 pitch_tile_max = ((pitch / bpp) / 8) - 1; 2834 2835 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED) { 2836 /* T2L */ 2837 array_mode = r600_array_mode(src_mode); 2838 slice_tile_max = (rsrc->surface.level[src_level].nblk_x * rsrc->surface.level[src_level].nblk_y) / (8*8); 2839 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 2840 /* linear height must be the same as the slice tile max height, it's ok even 2841 * if the linear destination/source have smaller heigh as the size of the 2842 * dma packet will be using the copy_height which is always smaller or equal 2843 * to the linear height 2844 */ 2845 height = u_minify(rsrc->resource.b.b.height0, src_level); 2846 detile = 1; 2847 x = src_x; 2848 y = src_y; 2849 z = src_z; 2850 base = rsrc->surface.level[src_level].offset; 2851 addr = rdst->surface.level[dst_level].offset; 2852 addr += rdst->surface.level[dst_level].slice_size * dst_z; 2853 addr += dst_y * pitch + dst_x * bpp; 2854 } else { 2855 /* L2T */ 2856 array_mode = r600_array_mode(dst_mode); 2857 slice_tile_max = (rdst->surface.level[dst_level].nblk_x * rdst->surface.level[dst_level].nblk_y) / (8*8); 2858 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 2859 /* linear height must be the same as the slice tile max height, it's ok even 2860 * if the linear destination/source have smaller heigh as the size of the 2861 * dma packet will be using the copy_height which is always smaller or equal 2862 * to the linear height 2863 */ 2864 height = u_minify(rdst->resource.b.b.height0, dst_level); 2865 detile = 0; 2866 x = dst_x; 2867 y = dst_y; 2868 z = dst_z; 2869 base = rdst->surface.level[dst_level].offset; 2870 addr = rsrc->surface.level[src_level].offset; 2871 addr += rsrc->surface.level[src_level].slice_size * src_z; 2872 addr += src_y * pitch + src_x * bpp; 2873 } 2874 /* check that we are in dw/base alignment constraint */ 2875 if (addr % 4 || base % 256) { 2876 return FALSE; 2877 } 2878 2879 /* It's a r6xx/r7xx limitation, the blit must be on 8 boundary for number 2880 * line in the blit. Compute max 8 line we can copy in the size limit 2881 */ 2882 cheight = ((R600_DMA_COPY_MAX_SIZE_DW * 4) / pitch) & 0xfffffff8; 2883 ncopy = (copy_height / cheight) + !!(copy_height % cheight); 2884 r600_need_dma_space(&rctx->b, ncopy * 7, &rdst->resource, &rsrc->resource); 2885 2886 for (i = 0; i < ncopy; i++) { 2887 cheight = cheight > copy_height ? copy_height : cheight; 2888 size = (cheight * pitch) / 4; 2889 /* emit reloc before writing cs so that cs is always in consistent state */ 2890 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, RADEON_USAGE_READ, 2891 RADEON_PRIO_SDMA_TEXTURE); 2892 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, RADEON_USAGE_WRITE, 2893 RADEON_PRIO_SDMA_TEXTURE); 2894 radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 1, 0, size)); 2895 radeon_emit(cs, base >> 8); 2896 radeon_emit(cs, (detile << 31) | (array_mode << 27) | 2897 (lbpp << 24) | ((height - 1) << 10) | 2898 pitch_tile_max); 2899 radeon_emit(cs, (slice_tile_max << 12) | (z << 0)); 2900 radeon_emit(cs, (x << 3) | (y << 17)); 2901 radeon_emit(cs, addr & 0xfffffffc); 2902 radeon_emit(cs, (addr >> 32UL) & 0xff); 2903 copy_height -= cheight; 2904 addr += cheight * pitch; 2905 y += cheight; 2906 } 2907 return TRUE; 2908 } 2909 2910 static void r600_dma_copy(struct pipe_context *ctx, 2911 struct pipe_resource *dst, 2912 unsigned dst_level, 2913 unsigned dstx, unsigned dsty, unsigned dstz, 2914 struct pipe_resource *src, 2915 unsigned src_level, 2916 const struct pipe_box *src_box) 2917 { 2918 struct r600_context *rctx = (struct r600_context *)ctx; 2919 struct r600_texture *rsrc = (struct r600_texture*)src; 2920 struct r600_texture *rdst = (struct r600_texture*)dst; 2921 unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height; 2922 unsigned src_w, dst_w; 2923 unsigned src_x, src_y; 2924 unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz; 2925 2926 if (rctx->b.dma.cs == NULL) { 2927 goto fallback; 2928 } 2929 2930 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { 2931 if (dst_x % 4 || src_box->x % 4 || src_box->width % 4) 2932 goto fallback; 2933 2934 r600_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width); 2935 return; 2936 } 2937 2938 if (src_box->depth > 1 || 2939 !r600_prepare_for_dma_blit(&rctx->b, rdst, dst_level, dstx, dsty, 2940 dstz, rsrc, src_level, src_box)) 2941 goto fallback; 2942 2943 src_x = util_format_get_nblocksx(src->format, src_box->x); 2944 dst_x = util_format_get_nblocksx(src->format, dst_x); 2945 src_y = util_format_get_nblocksy(src->format, src_box->y); 2946 dst_y = util_format_get_nblocksy(src->format, dst_y); 2947 2948 bpp = rdst->surface.bpe; 2949 dst_pitch = rdst->surface.level[dst_level].nblk_x * rdst->surface.bpe; 2950 src_pitch = rsrc->surface.level[src_level].nblk_x * rsrc->surface.bpe; 2951 src_w = u_minify(rsrc->resource.b.b.width0, src_level); 2952 dst_w = u_minify(rdst->resource.b.b.width0, dst_level); 2953 copy_height = src_box->height / rsrc->surface.blk_h; 2954 2955 dst_mode = rdst->surface.level[dst_level].mode; 2956 src_mode = rsrc->surface.level[src_level].mode; 2957 2958 if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) { 2959 /* strict requirement on r6xx/r7xx */ 2960 goto fallback; 2961 } 2962 /* lot of constraint on alignment this should capture them all */ 2963 if (src_pitch % 8 || src_box->y % 8 || dst_y % 8) { 2964 goto fallback; 2965 } 2966 2967 if (src_mode == dst_mode) { 2968 uint64_t dst_offset, src_offset, size; 2969 2970 /* simple dma blit would do NOTE code here assume : 2971 * src_box.x/y == 0 2972 * dst_x/y == 0 2973 * dst_pitch == src_pitch 2974 */ 2975 src_offset= rsrc->surface.level[src_level].offset; 2976 src_offset += rsrc->surface.level[src_level].slice_size * src_box->z; 2977 src_offset += src_y * src_pitch + src_x * bpp; 2978 dst_offset = rdst->surface.level[dst_level].offset; 2979 dst_offset += rdst->surface.level[dst_level].slice_size * dst_z; 2980 dst_offset += dst_y * dst_pitch + dst_x * bpp; 2981 size = src_box->height * src_pitch; 2982 /* must be dw aligned */ 2983 if (dst_offset % 4 || src_offset % 4 || size % 4) { 2984 goto fallback; 2985 } 2986 r600_dma_copy_buffer(rctx, dst, src, dst_offset, src_offset, size); 2987 } else { 2988 if (!r600_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z, 2989 src, src_level, src_x, src_y, src_box->z, 2990 copy_height, dst_pitch, bpp)) { 2991 goto fallback; 2992 } 2993 } 2994 return; 2995 2996 fallback: 2997 r600_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, 2998 src, src_level, src_box); 2999 } 3000 3001 void r600_init_state_functions(struct r600_context *rctx) 3002 { 3003 unsigned id = 1; 3004 unsigned i; 3005 /* !!! 3006 * To avoid GPU lockup registers must be emited in a specific order 3007 * (no kidding ...). The order below is important and have been 3008 * partialy infered from analyzing fglrx command stream. 3009 * 3010 * Don't reorder atom without carefully checking the effect (GPU lockup 3011 * or piglit regression). 3012 * !!! 3013 */ 3014 3015 r600_init_atom(rctx, &rctx->framebuffer.atom, id++, r600_emit_framebuffer_state, 0); 3016 3017 /* shader const */ 3018 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, r600_emit_vs_constant_buffers, 0); 3019 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, r600_emit_gs_constant_buffers, 0); 3020 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, r600_emit_ps_constant_buffers, 0); 3021 3022 /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change 3023 * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map) 3024 */ 3025 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, r600_emit_vs_sampler_states, 0); 3026 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, r600_emit_gs_sampler_states, 0); 3027 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, r600_emit_ps_sampler_states, 0); 3028 /* resource */ 3029 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, r600_emit_vs_sampler_views, 0); 3030 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, r600_emit_gs_sampler_views, 0); 3031 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, r600_emit_ps_sampler_views, 0); 3032 r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, r600_emit_vertex_buffers, 0); 3033 3034 r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 10); 3035 3036 r600_init_atom(rctx, &rctx->seamless_cube_map.atom, id++, r600_emit_seamless_cube_map, 3); 3037 r600_init_atom(rctx, &rctx->sample_mask.atom, id++, r600_emit_sample_mask, 3); 3038 rctx->sample_mask.sample_mask = ~0; 3039 3040 r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6); 3041 r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6); 3042 r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0); 3043 r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, r600_emit_cb_misc_state, 7); 3044 r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 6); 3045 r600_init_atom(rctx, &rctx->clip_state.atom, id++, r600_emit_clip_state, 26); 3046 r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, r600_emit_db_misc_state, 7); 3047 r600_init_atom(rctx, &rctx->db_state.atom, id++, r600_emit_db_state, 11); 3048 r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0); 3049 r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 9); 3050 r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0); 3051 r600_add_atom(rctx, &rctx->b.scissors.atom, id++); 3052 r600_add_atom(rctx, &rctx->b.viewports.atom, id++); 3053 r600_init_atom(rctx, &rctx->config_state.atom, id++, r600_emit_config_state, 3); 3054 r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4); 3055 r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5); 3056 r600_add_atom(rctx, &rctx->b.render_cond_atom, id++); 3057 r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++); 3058 r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++); 3059 for (i = 0; i < R600_NUM_HW_STAGES; i++) 3060 r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0); 3061 r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0); 3062 r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0); 3063 3064 rctx->b.b.create_blend_state = r600_create_blend_state; 3065 rctx->b.b.create_depth_stencil_alpha_state = r600_create_dsa_state; 3066 rctx->b.b.create_rasterizer_state = r600_create_rs_state; 3067 rctx->b.b.create_sampler_state = r600_create_sampler_state; 3068 rctx->b.b.create_sampler_view = r600_create_sampler_view; 3069 rctx->b.b.set_framebuffer_state = r600_set_framebuffer_state; 3070 rctx->b.b.set_polygon_stipple = r600_set_polygon_stipple; 3071 rctx->b.b.set_min_samples = r600_set_min_samples; 3072 rctx->b.b.get_sample_position = r600_get_sample_position; 3073 rctx->b.dma_copy = r600_dma_copy; 3074 } 3075 /* this function must be last */ 3076