1 /* 2 * Copyright 2010 Jerome Glisse <glisse (at) freedesktop.org> 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * on the rights to use, copy, modify, merge, publish, distribute, sub 8 * license, and/or sell copies of the Software, and to permit persons to whom 9 * the Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 */ 23 #include "r600_formats.h" 24 #include "r600_shader.h" 25 #include "r600d.h" 26 27 #include "pipe/p_shader_tokens.h" 28 #include "util/u_pack_color.h" 29 #include "util/u_memory.h" 30 #include "util/u_framebuffer.h" 31 #include "util/u_dual_blend.h" 32 33 static uint32_t r600_translate_blend_function(int blend_func) 34 { 35 switch (blend_func) { 36 case PIPE_BLEND_ADD: 37 return V_028804_COMB_DST_PLUS_SRC; 38 case PIPE_BLEND_SUBTRACT: 39 return V_028804_COMB_SRC_MINUS_DST; 40 case PIPE_BLEND_REVERSE_SUBTRACT: 41 return V_028804_COMB_DST_MINUS_SRC; 42 case PIPE_BLEND_MIN: 43 return V_028804_COMB_MIN_DST_SRC; 44 case PIPE_BLEND_MAX: 45 return V_028804_COMB_MAX_DST_SRC; 46 default: 47 R600_ERR("Unknown blend function %d\n", blend_func); 48 assert(0); 49 break; 50 } 51 return 0; 52 } 53 54 static uint32_t r600_translate_blend_factor(int blend_fact) 55 { 56 switch (blend_fact) { 57 case PIPE_BLENDFACTOR_ONE: 58 return V_028804_BLEND_ONE; 59 case PIPE_BLENDFACTOR_SRC_COLOR: 60 return V_028804_BLEND_SRC_COLOR; 61 case PIPE_BLENDFACTOR_SRC_ALPHA: 62 return V_028804_BLEND_SRC_ALPHA; 63 case PIPE_BLENDFACTOR_DST_ALPHA: 64 return V_028804_BLEND_DST_ALPHA; 65 case PIPE_BLENDFACTOR_DST_COLOR: 66 return V_028804_BLEND_DST_COLOR; 67 case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: 68 return V_028804_BLEND_SRC_ALPHA_SATURATE; 69 case PIPE_BLENDFACTOR_CONST_COLOR: 70 return V_028804_BLEND_CONST_COLOR; 71 case PIPE_BLENDFACTOR_CONST_ALPHA: 72 return V_028804_BLEND_CONST_ALPHA; 73 case PIPE_BLENDFACTOR_ZERO: 74 return V_028804_BLEND_ZERO; 75 case PIPE_BLENDFACTOR_INV_SRC_COLOR: 76 return V_028804_BLEND_ONE_MINUS_SRC_COLOR; 77 case PIPE_BLENDFACTOR_INV_SRC_ALPHA: 78 return V_028804_BLEND_ONE_MINUS_SRC_ALPHA; 79 case PIPE_BLENDFACTOR_INV_DST_ALPHA: 80 return V_028804_BLEND_ONE_MINUS_DST_ALPHA; 81 case PIPE_BLENDFACTOR_INV_DST_COLOR: 82 return V_028804_BLEND_ONE_MINUS_DST_COLOR; 83 case PIPE_BLENDFACTOR_INV_CONST_COLOR: 84 return V_028804_BLEND_ONE_MINUS_CONST_COLOR; 85 case PIPE_BLENDFACTOR_INV_CONST_ALPHA: 86 return V_028804_BLEND_ONE_MINUS_CONST_ALPHA; 87 case PIPE_BLENDFACTOR_SRC1_COLOR: 88 return V_028804_BLEND_SRC1_COLOR; 89 case PIPE_BLENDFACTOR_SRC1_ALPHA: 90 return V_028804_BLEND_SRC1_ALPHA; 91 case PIPE_BLENDFACTOR_INV_SRC1_COLOR: 92 return V_028804_BLEND_INV_SRC1_COLOR; 93 case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: 94 return V_028804_BLEND_INV_SRC1_ALPHA; 95 default: 96 R600_ERR("Bad blend factor %d not supported!\n", blend_fact); 97 assert(0); 98 break; 99 } 100 return 0; 101 } 102 103 static unsigned r600_tex_dim(unsigned dim, unsigned nr_samples) 104 { 105 switch (dim) { 106 default: 107 case PIPE_TEXTURE_1D: 108 return V_038000_SQ_TEX_DIM_1D; 109 case PIPE_TEXTURE_1D_ARRAY: 110 return V_038000_SQ_TEX_DIM_1D_ARRAY; 111 case PIPE_TEXTURE_2D: 112 case PIPE_TEXTURE_RECT: 113 return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_MSAA : 114 V_038000_SQ_TEX_DIM_2D; 115 case PIPE_TEXTURE_2D_ARRAY: 116 return nr_samples > 1 ? V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA : 117 V_038000_SQ_TEX_DIM_2D_ARRAY; 118 case PIPE_TEXTURE_3D: 119 return V_038000_SQ_TEX_DIM_3D; 120 case PIPE_TEXTURE_CUBE: 121 case PIPE_TEXTURE_CUBE_ARRAY: 122 return V_038000_SQ_TEX_DIM_CUBEMAP; 123 } 124 } 125 126 static uint32_t r600_translate_dbformat(enum pipe_format format) 127 { 128 switch (format) { 129 case PIPE_FORMAT_Z16_UNORM: 130 return V_028010_DEPTH_16; 131 case PIPE_FORMAT_Z24X8_UNORM: 132 return V_028010_DEPTH_X8_24; 133 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 134 return V_028010_DEPTH_8_24; 135 case PIPE_FORMAT_Z32_FLOAT: 136 return V_028010_DEPTH_32_FLOAT; 137 case PIPE_FORMAT_Z32_FLOAT_S8X24_UINT: 138 return V_028010_DEPTH_X24_8_32_FLOAT; 139 default: 140 return ~0U; 141 } 142 } 143 144 static bool r600_is_sampler_format_supported(struct pipe_screen *screen, enum pipe_format format) 145 { 146 return r600_translate_texformat(screen, format, NULL, NULL, NULL, 147 FALSE) != ~0U; 148 } 149 150 static bool r600_is_colorbuffer_format_supported(enum chip_class chip, enum pipe_format format) 151 { 152 return r600_translate_colorformat(chip, format, FALSE) != ~0U && 153 r600_translate_colorswap(format, FALSE) != ~0U; 154 } 155 156 static bool r600_is_zs_format_supported(enum pipe_format format) 157 { 158 return r600_translate_dbformat(format) != ~0U; 159 } 160 161 boolean r600_is_format_supported(struct pipe_screen *screen, 162 enum pipe_format format, 163 enum pipe_texture_target target, 164 unsigned sample_count, 165 unsigned usage) 166 { 167 struct r600_screen *rscreen = (struct r600_screen*)screen; 168 unsigned retval = 0; 169 170 if (target >= PIPE_MAX_TEXTURE_TYPES) { 171 R600_ERR("r600: unsupported texture type %d\n", target); 172 return FALSE; 173 } 174 175 if (!util_format_is_supported(format, usage)) 176 return FALSE; 177 178 if (sample_count > 1) { 179 if (!rscreen->has_msaa) 180 return FALSE; 181 182 /* R11G11B10 is broken on R6xx. */ 183 if (rscreen->b.chip_class == R600 && 184 format == PIPE_FORMAT_R11G11B10_FLOAT) 185 return FALSE; 186 187 /* MSAA integer colorbuffers hang. */ 188 if (util_format_is_pure_integer(format) && 189 !util_format_is_depth_or_stencil(format)) 190 return FALSE; 191 192 switch (sample_count) { 193 case 2: 194 case 4: 195 case 8: 196 break; 197 default: 198 return FALSE; 199 } 200 } 201 202 if (usage & PIPE_BIND_SAMPLER_VIEW) { 203 if (target == PIPE_BUFFER) { 204 if (r600_is_vertex_format_supported(format)) 205 retval |= PIPE_BIND_SAMPLER_VIEW; 206 } else { 207 if (r600_is_sampler_format_supported(screen, format)) 208 retval |= PIPE_BIND_SAMPLER_VIEW; 209 } 210 } 211 212 if ((usage & (PIPE_BIND_RENDER_TARGET | 213 PIPE_BIND_DISPLAY_TARGET | 214 PIPE_BIND_SCANOUT | 215 PIPE_BIND_SHARED | 216 PIPE_BIND_BLENDABLE)) && 217 r600_is_colorbuffer_format_supported(rscreen->b.chip_class, format)) { 218 retval |= usage & 219 (PIPE_BIND_RENDER_TARGET | 220 PIPE_BIND_DISPLAY_TARGET | 221 PIPE_BIND_SCANOUT | 222 PIPE_BIND_SHARED); 223 if (!util_format_is_pure_integer(format) && 224 !util_format_is_depth_or_stencil(format)) 225 retval |= usage & PIPE_BIND_BLENDABLE; 226 } 227 228 if ((usage & PIPE_BIND_DEPTH_STENCIL) && 229 r600_is_zs_format_supported(format)) { 230 retval |= PIPE_BIND_DEPTH_STENCIL; 231 } 232 233 if ((usage & PIPE_BIND_VERTEX_BUFFER) && 234 r600_is_vertex_format_supported(format)) { 235 retval |= PIPE_BIND_VERTEX_BUFFER; 236 } 237 238 if ((usage & PIPE_BIND_LINEAR) && 239 !util_format_is_compressed(format) && 240 !(usage & PIPE_BIND_DEPTH_STENCIL)) 241 retval |= PIPE_BIND_LINEAR; 242 243 return retval == usage; 244 } 245 246 static void r600_emit_polygon_offset(struct r600_context *rctx, struct r600_atom *a) 247 { 248 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 249 struct r600_poly_offset_state *state = (struct r600_poly_offset_state*)a; 250 float offset_units = state->offset_units; 251 float offset_scale = state->offset_scale; 252 uint32_t pa_su_poly_offset_db_fmt_cntl = 0; 253 254 if (!state->offset_units_unscaled) { 255 switch (state->zs_format) { 256 case PIPE_FORMAT_Z24X8_UNORM: 257 case PIPE_FORMAT_Z24_UNORM_S8_UINT: 258 offset_units *= 2.0f; 259 pa_su_poly_offset_db_fmt_cntl = 260 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-24); 261 break; 262 case PIPE_FORMAT_Z16_UNORM: 263 offset_units *= 4.0f; 264 pa_su_poly_offset_db_fmt_cntl = 265 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-16); 266 break; 267 default: 268 pa_su_poly_offset_db_fmt_cntl = 269 S_028DF8_POLY_OFFSET_NEG_NUM_DB_BITS((char)-23) | 270 S_028DF8_POLY_OFFSET_DB_IS_FLOAT_FMT(1); 271 } 272 } 273 274 radeon_set_context_reg_seq(cs, R_028E00_PA_SU_POLY_OFFSET_FRONT_SCALE, 4); 275 radeon_emit(cs, fui(offset_scale)); 276 radeon_emit(cs, fui(offset_units)); 277 radeon_emit(cs, fui(offset_scale)); 278 radeon_emit(cs, fui(offset_units)); 279 280 radeon_set_context_reg(cs, R_028DF8_PA_SU_POLY_OFFSET_DB_FMT_CNTL, 281 pa_su_poly_offset_db_fmt_cntl); 282 } 283 284 static uint32_t r600_get_blend_control(const struct pipe_blend_state *state, unsigned i) 285 { 286 int j = state->independent_blend_enable ? i : 0; 287 288 unsigned eqRGB = state->rt[j].rgb_func; 289 unsigned srcRGB = state->rt[j].rgb_src_factor; 290 unsigned dstRGB = state->rt[j].rgb_dst_factor; 291 292 unsigned eqA = state->rt[j].alpha_func; 293 unsigned srcA = state->rt[j].alpha_src_factor; 294 unsigned dstA = state->rt[j].alpha_dst_factor; 295 uint32_t bc = 0; 296 297 if (!state->rt[j].blend_enable) 298 return 0; 299 300 bc |= S_028804_COLOR_COMB_FCN(r600_translate_blend_function(eqRGB)); 301 bc |= S_028804_COLOR_SRCBLEND(r600_translate_blend_factor(srcRGB)); 302 bc |= S_028804_COLOR_DESTBLEND(r600_translate_blend_factor(dstRGB)); 303 304 if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) { 305 bc |= S_028804_SEPARATE_ALPHA_BLEND(1); 306 bc |= S_028804_ALPHA_COMB_FCN(r600_translate_blend_function(eqA)); 307 bc |= S_028804_ALPHA_SRCBLEND(r600_translate_blend_factor(srcA)); 308 bc |= S_028804_ALPHA_DESTBLEND(r600_translate_blend_factor(dstA)); 309 } 310 return bc; 311 } 312 313 static void *r600_create_blend_state_mode(struct pipe_context *ctx, 314 const struct pipe_blend_state *state, 315 int mode) 316 { 317 struct r600_context *rctx = (struct r600_context *)ctx; 318 uint32_t color_control = 0, target_mask = 0; 319 struct r600_blend_state *blend = CALLOC_STRUCT(r600_blend_state); 320 321 if (!blend) { 322 return NULL; 323 } 324 325 r600_init_command_buffer(&blend->buffer, 20); 326 r600_init_command_buffer(&blend->buffer_no_blend, 20); 327 328 /* R600 does not support per-MRT blends */ 329 if (rctx->b.family > CHIP_R600) 330 color_control |= S_028808_PER_MRT_BLEND(1); 331 332 if (state->logicop_enable) { 333 color_control |= (state->logicop_func << 16) | (state->logicop_func << 20); 334 } else { 335 color_control |= (0xcc << 16); 336 } 337 /* we pretend 8 buffer are used, CB_SHADER_MASK will disable unused one */ 338 if (state->independent_blend_enable) { 339 for (int i = 0; i < 8; i++) { 340 if (state->rt[i].blend_enable) { 341 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i); 342 } 343 target_mask |= (state->rt[i].colormask << (4 * i)); 344 } 345 } else { 346 for (int i = 0; i < 8; i++) { 347 if (state->rt[0].blend_enable) { 348 color_control |= S_028808_TARGET_BLEND_ENABLE(1 << i); 349 } 350 target_mask |= (state->rt[0].colormask << (4 * i)); 351 } 352 } 353 354 if (target_mask) 355 color_control |= S_028808_SPECIAL_OP(mode); 356 else 357 color_control |= S_028808_SPECIAL_OP(V_028808_DISABLE); 358 359 /* only MRT0 has dual src blend */ 360 blend->dual_src_blend = util_blend_state_is_dual(state, 0); 361 blend->cb_target_mask = target_mask; 362 blend->cb_color_control = color_control; 363 blend->cb_color_control_no_blend = color_control & C_028808_TARGET_BLEND_ENABLE; 364 blend->alpha_to_one = state->alpha_to_one; 365 366 r600_store_context_reg(&blend->buffer, R_028D44_DB_ALPHA_TO_MASK, 367 S_028D44_ALPHA_TO_MASK_ENABLE(state->alpha_to_coverage) | 368 S_028D44_ALPHA_TO_MASK_OFFSET0(2) | 369 S_028D44_ALPHA_TO_MASK_OFFSET1(2) | 370 S_028D44_ALPHA_TO_MASK_OFFSET2(2) | 371 S_028D44_ALPHA_TO_MASK_OFFSET3(2)); 372 373 /* Copy over the registers set so far into buffer_no_blend. */ 374 memcpy(blend->buffer_no_blend.buf, blend->buffer.buf, blend->buffer.num_dw * 4); 375 blend->buffer_no_blend.num_dw = blend->buffer.num_dw; 376 377 /* Only add blend registers if blending is enabled. */ 378 if (!G_028808_TARGET_BLEND_ENABLE(color_control)) { 379 return blend; 380 } 381 382 /* The first R600 does not support per-MRT blends */ 383 r600_store_context_reg(&blend->buffer, R_028804_CB_BLEND_CONTROL, 384 r600_get_blend_control(state, 0)); 385 386 if (rctx->b.family > CHIP_R600) { 387 r600_store_context_reg_seq(&blend->buffer, R_028780_CB_BLEND0_CONTROL, 8); 388 for (int i = 0; i < 8; i++) { 389 r600_store_value(&blend->buffer, r600_get_blend_control(state, i)); 390 } 391 } 392 return blend; 393 } 394 395 static void *r600_create_blend_state(struct pipe_context *ctx, 396 const struct pipe_blend_state *state) 397 { 398 return r600_create_blend_state_mode(ctx, state, V_028808_SPECIAL_NORMAL); 399 } 400 401 static void *r600_create_dsa_state(struct pipe_context *ctx, 402 const struct pipe_depth_stencil_alpha_state *state) 403 { 404 unsigned db_depth_control, alpha_test_control, alpha_ref; 405 struct r600_dsa_state *dsa = CALLOC_STRUCT(r600_dsa_state); 406 407 if (!dsa) { 408 return NULL; 409 } 410 411 r600_init_command_buffer(&dsa->buffer, 3); 412 413 dsa->valuemask[0] = state->stencil[0].valuemask; 414 dsa->valuemask[1] = state->stencil[1].valuemask; 415 dsa->writemask[0] = state->stencil[0].writemask; 416 dsa->writemask[1] = state->stencil[1].writemask; 417 dsa->zwritemask = state->depth.writemask; 418 419 db_depth_control = S_028800_Z_ENABLE(state->depth.enabled) | 420 S_028800_Z_WRITE_ENABLE(state->depth.writemask) | 421 S_028800_ZFUNC(state->depth.func); 422 423 /* stencil */ 424 if (state->stencil[0].enabled) { 425 db_depth_control |= S_028800_STENCIL_ENABLE(1); 426 db_depth_control |= S_028800_STENCILFUNC(state->stencil[0].func); /* translates straight */ 427 db_depth_control |= S_028800_STENCILFAIL(r600_translate_stencil_op(state->stencil[0].fail_op)); 428 db_depth_control |= S_028800_STENCILZPASS(r600_translate_stencil_op(state->stencil[0].zpass_op)); 429 db_depth_control |= S_028800_STENCILZFAIL(r600_translate_stencil_op(state->stencil[0].zfail_op)); 430 431 if (state->stencil[1].enabled) { 432 db_depth_control |= S_028800_BACKFACE_ENABLE(1); 433 db_depth_control |= S_028800_STENCILFUNC_BF(state->stencil[1].func); /* translates straight */ 434 db_depth_control |= S_028800_STENCILFAIL_BF(r600_translate_stencil_op(state->stencil[1].fail_op)); 435 db_depth_control |= S_028800_STENCILZPASS_BF(r600_translate_stencil_op(state->stencil[1].zpass_op)); 436 db_depth_control |= S_028800_STENCILZFAIL_BF(r600_translate_stencil_op(state->stencil[1].zfail_op)); 437 } 438 } 439 440 /* alpha */ 441 alpha_test_control = 0; 442 alpha_ref = 0; 443 if (state->alpha.enabled) { 444 alpha_test_control = S_028410_ALPHA_FUNC(state->alpha.func); 445 alpha_test_control |= S_028410_ALPHA_TEST_ENABLE(1); 446 alpha_ref = fui(state->alpha.ref_value); 447 } 448 dsa->sx_alpha_test_control = alpha_test_control & 0xff; 449 dsa->alpha_ref = alpha_ref; 450 451 r600_store_context_reg(&dsa->buffer, R_028800_DB_DEPTH_CONTROL, db_depth_control); 452 return dsa; 453 } 454 455 static void *r600_create_rs_state(struct pipe_context *ctx, 456 const struct pipe_rasterizer_state *state) 457 { 458 struct r600_context *rctx = (struct r600_context *)ctx; 459 unsigned tmp, sc_mode_cntl, spi_interp; 460 float psize_min, psize_max; 461 struct r600_rasterizer_state *rs = CALLOC_STRUCT(r600_rasterizer_state); 462 463 if (!rs) { 464 return NULL; 465 } 466 467 r600_init_command_buffer(&rs->buffer, 30); 468 469 rs->scissor_enable = state->scissor; 470 rs->clip_halfz = state->clip_halfz; 471 rs->flatshade = state->flatshade; 472 rs->sprite_coord_enable = state->sprite_coord_enable; 473 rs->rasterizer_discard = state->rasterizer_discard; 474 rs->two_side = state->light_twoside; 475 rs->clip_plane_enable = state->clip_plane_enable; 476 rs->pa_sc_line_stipple = state->line_stipple_enable ? 477 S_028A0C_LINE_PATTERN(state->line_stipple_pattern) | 478 S_028A0C_REPEAT_COUNT(state->line_stipple_factor) : 0; 479 rs->pa_cl_clip_cntl = 480 S_028810_DX_CLIP_SPACE_DEF(state->clip_halfz) | 481 S_028810_ZCLIP_NEAR_DISABLE(!state->depth_clip) | 482 S_028810_ZCLIP_FAR_DISABLE(!state->depth_clip) | 483 S_028810_DX_LINEAR_ATTR_CLIP_ENA(1); 484 if (rctx->b.chip_class == R700) { 485 rs->pa_cl_clip_cntl |= 486 S_028810_DX_RASTERIZATION_KILL(state->rasterizer_discard); 487 } 488 rs->multisample_enable = state->multisample; 489 490 /* offset */ 491 rs->offset_units = state->offset_units; 492 rs->offset_scale = state->offset_scale * 16.0f; 493 rs->offset_enable = state->offset_point || state->offset_line || state->offset_tri; 494 rs->offset_units_unscaled = state->offset_units_unscaled; 495 496 if (state->point_size_per_vertex) { 497 psize_min = util_get_min_point_size(state); 498 psize_max = 8192; 499 } else { 500 /* Force the point size to be as if the vertex output was disabled. */ 501 psize_min = state->point_size; 502 psize_max = state->point_size; 503 } 504 505 sc_mode_cntl = S_028A4C_MSAA_ENABLE(state->multisample) | 506 S_028A4C_LINE_STIPPLE_ENABLE(state->line_stipple_enable) | 507 S_028A4C_FORCE_EOV_CNTDWN_ENABLE(1) | 508 S_028A4C_PS_ITER_SAMPLE(state->multisample && rctx->ps_iter_samples > 1); 509 if (rctx->b.family == CHIP_RV770) { 510 /* workaround possible rendering corruption on RV770 with hyperz together with sample shading */ 511 sc_mode_cntl |= S_028A4C_TILE_COVER_DISABLE(state->multisample && rctx->ps_iter_samples > 1); 512 } 513 if (rctx->b.chip_class >= R700) { 514 sc_mode_cntl |= S_028A4C_FORCE_EOV_REZ_ENABLE(1) | 515 S_028A4C_R700_ZMM_LINE_OFFSET(1) | 516 S_028A4C_R700_VPORT_SCISSOR_ENABLE(1); 517 } else { 518 sc_mode_cntl |= S_028A4C_WALK_ALIGN8_PRIM_FITS_ST(1); 519 } 520 521 spi_interp = S_0286D4_FLAT_SHADE_ENA(1); 522 if (state->sprite_coord_enable) { 523 spi_interp |= S_0286D4_PNT_SPRITE_ENA(1) | 524 S_0286D4_PNT_SPRITE_OVRD_X(2) | 525 S_0286D4_PNT_SPRITE_OVRD_Y(3) | 526 S_0286D4_PNT_SPRITE_OVRD_Z(0) | 527 S_0286D4_PNT_SPRITE_OVRD_W(1); 528 if (state->sprite_coord_mode != PIPE_SPRITE_COORD_UPPER_LEFT) { 529 spi_interp |= S_0286D4_PNT_SPRITE_TOP_1(1); 530 } 531 } 532 533 r600_store_context_reg_seq(&rs->buffer, R_028A00_PA_SU_POINT_SIZE, 3); 534 /* point size 12.4 fixed point (divide by two, because 0.5 = 1 pixel. */ 535 tmp = r600_pack_float_12p4(state->point_size/2); 536 r600_store_value(&rs->buffer, /* R_028A00_PA_SU_POINT_SIZE */ 537 S_028A00_HEIGHT(tmp) | S_028A00_WIDTH(tmp)); 538 r600_store_value(&rs->buffer, /* R_028A04_PA_SU_POINT_MINMAX */ 539 S_028A04_MIN_SIZE(r600_pack_float_12p4(psize_min/2)) | 540 S_028A04_MAX_SIZE(r600_pack_float_12p4(psize_max/2))); 541 r600_store_value(&rs->buffer, /* R_028A08_PA_SU_LINE_CNTL */ 542 S_028A08_WIDTH(r600_pack_float_12p4(state->line_width/2))); 543 544 r600_store_context_reg(&rs->buffer, R_0286D4_SPI_INTERP_CONTROL_0, spi_interp); 545 r600_store_context_reg(&rs->buffer, R_028A4C_PA_SC_MODE_CNTL, sc_mode_cntl); 546 r600_store_context_reg(&rs->buffer, R_028C08_PA_SU_VTX_CNTL, 547 S_028C08_PIX_CENTER_HALF(state->half_pixel_center) | 548 S_028C08_QUANT_MODE(V_028C08_X_1_256TH)); 549 r600_store_context_reg(&rs->buffer, R_028DFC_PA_SU_POLY_OFFSET_CLAMP, fui(state->offset_clamp)); 550 551 rs->pa_su_sc_mode_cntl = S_028814_PROVOKING_VTX_LAST(!state->flatshade_first) | 552 S_028814_CULL_FRONT(state->cull_face & PIPE_FACE_FRONT ? 1 : 0) | 553 S_028814_CULL_BACK(state->cull_face & PIPE_FACE_BACK ? 1 : 0) | 554 S_028814_FACE(!state->front_ccw) | 555 S_028814_POLY_OFFSET_FRONT_ENABLE(util_get_offset(state, state->fill_front)) | 556 S_028814_POLY_OFFSET_BACK_ENABLE(util_get_offset(state, state->fill_back)) | 557 S_028814_POLY_OFFSET_PARA_ENABLE(state->offset_point || state->offset_line) | 558 S_028814_POLY_MODE(state->fill_front != PIPE_POLYGON_MODE_FILL || 559 state->fill_back != PIPE_POLYGON_MODE_FILL) | 560 S_028814_POLYMODE_FRONT_PTYPE(r600_translate_fill(state->fill_front)) | 561 S_028814_POLYMODE_BACK_PTYPE(r600_translate_fill(state->fill_back)); 562 if (rctx->b.chip_class == R700) { 563 r600_store_context_reg(&rs->buffer, R_028814_PA_SU_SC_MODE_CNTL, rs->pa_su_sc_mode_cntl); 564 } 565 if (rctx->b.chip_class == R600) { 566 r600_store_context_reg(&rs->buffer, R_028350_SX_MISC, 567 S_028350_MULTIPASS(state->rasterizer_discard)); 568 } 569 return rs; 570 } 571 572 static unsigned r600_tex_filter(unsigned filter, unsigned max_aniso) 573 { 574 if (filter == PIPE_TEX_FILTER_LINEAR) 575 return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_BILINEAR 576 : V_03C000_SQ_TEX_XY_FILTER_BILINEAR; 577 else 578 return max_aniso > 1 ? V_03C000_SQ_TEX_XY_FILTER_ANISO_POINT 579 : V_03C000_SQ_TEX_XY_FILTER_POINT; 580 } 581 582 static void *r600_create_sampler_state(struct pipe_context *ctx, 583 const struct pipe_sampler_state *state) 584 { 585 struct r600_common_screen *rscreen = (struct r600_common_screen*)ctx->screen; 586 struct r600_pipe_sampler_state *ss = CALLOC_STRUCT(r600_pipe_sampler_state); 587 unsigned max_aniso = rscreen->force_aniso >= 0 ? rscreen->force_aniso 588 : state->max_anisotropy; 589 unsigned max_aniso_ratio = r600_tex_aniso_filter(max_aniso); 590 591 if (!ss) { 592 return NULL; 593 } 594 595 ss->seamless_cube_map = state->seamless_cube_map; 596 ss->border_color_use = sampler_state_needs_border_color(state); 597 598 /* R_03C000_SQ_TEX_SAMPLER_WORD0_0 */ 599 ss->tex_sampler_words[0] = 600 S_03C000_CLAMP_X(r600_tex_wrap(state->wrap_s)) | 601 S_03C000_CLAMP_Y(r600_tex_wrap(state->wrap_t)) | 602 S_03C000_CLAMP_Z(r600_tex_wrap(state->wrap_r)) | 603 S_03C000_XY_MAG_FILTER(r600_tex_filter(state->mag_img_filter, max_aniso)) | 604 S_03C000_XY_MIN_FILTER(r600_tex_filter(state->min_img_filter, max_aniso)) | 605 S_03C000_MIP_FILTER(r600_tex_mipfilter(state->min_mip_filter)) | 606 S_03C000_MAX_ANISO_RATIO(max_aniso_ratio) | 607 S_03C000_DEPTH_COMPARE_FUNCTION(r600_tex_compare(state->compare_func)) | 608 S_03C000_BORDER_COLOR_TYPE(ss->border_color_use ? V_03C000_SQ_TEX_BORDER_COLOR_REGISTER : 0); 609 /* R_03C004_SQ_TEX_SAMPLER_WORD1_0 */ 610 ss->tex_sampler_words[1] = 611 S_03C004_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 6)) | 612 S_03C004_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 6)) | 613 S_03C004_LOD_BIAS(S_FIXED(CLAMP(state->lod_bias, -16, 16), 6)); 614 /* R_03C008_SQ_TEX_SAMPLER_WORD2_0 */ 615 ss->tex_sampler_words[2] = S_03C008_TYPE(1); 616 617 if (ss->border_color_use) { 618 memcpy(&ss->border_color, &state->border_color, sizeof(state->border_color)); 619 } 620 return ss; 621 } 622 623 static struct pipe_sampler_view * 624 texture_buffer_sampler_view(struct r600_pipe_sampler_view *view, 625 unsigned width0, unsigned height0) 626 627 { 628 struct r600_texture *tmp = (struct r600_texture*)view->base.texture; 629 int stride = util_format_get_blocksize(view->base.format); 630 unsigned format, num_format, format_comp, endian; 631 uint64_t offset = view->base.u.buf.offset; 632 unsigned size = view->base.u.buf.size; 633 634 r600_vertex_data_type(view->base.format, 635 &format, &num_format, &format_comp, 636 &endian); 637 638 view->tex_resource = &tmp->resource; 639 view->skip_mip_address_reloc = true; 640 641 view->tex_resource_words[0] = offset; 642 view->tex_resource_words[1] = size - 1; 643 view->tex_resource_words[2] = S_038008_BASE_ADDRESS_HI(offset >> 32UL) | 644 S_038008_STRIDE(stride) | 645 S_038008_DATA_FORMAT(format) | 646 S_038008_NUM_FORMAT_ALL(num_format) | 647 S_038008_FORMAT_COMP_ALL(format_comp) | 648 S_038008_ENDIAN_SWAP(endian); 649 view->tex_resource_words[3] = 0; 650 /* 651 * in theory dword 4 is for number of elements, for use with resinfo, 652 * but it seems to utterly fail to work, the amd gpu shader analyser 653 * uses a const buffer to store the element sizes for buffer txq 654 */ 655 view->tex_resource_words[4] = 0; 656 view->tex_resource_words[5] = 0; 657 view->tex_resource_words[6] = S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_BUFFER); 658 return &view->base; 659 } 660 661 struct pipe_sampler_view * 662 r600_create_sampler_view_custom(struct pipe_context *ctx, 663 struct pipe_resource *texture, 664 const struct pipe_sampler_view *state, 665 unsigned width_first_level, unsigned height_first_level) 666 { 667 struct r600_pipe_sampler_view *view = CALLOC_STRUCT(r600_pipe_sampler_view); 668 struct r600_texture *tmp = (struct r600_texture*)texture; 669 unsigned format, endian; 670 uint32_t word4 = 0, yuv_format = 0, pitch = 0; 671 unsigned char swizzle[4], array_mode = 0; 672 unsigned width, height, depth, offset_level, last_level; 673 bool do_endian_swap = FALSE; 674 675 if (!view) 676 return NULL; 677 678 /* initialize base object */ 679 view->base = *state; 680 view->base.texture = NULL; 681 pipe_reference(NULL, &texture->reference); 682 view->base.texture = texture; 683 view->base.reference.count = 1; 684 view->base.context = ctx; 685 686 if (texture->target == PIPE_BUFFER) 687 return texture_buffer_sampler_view(view, texture->width0, 1); 688 689 swizzle[0] = state->swizzle_r; 690 swizzle[1] = state->swizzle_g; 691 swizzle[2] = state->swizzle_b; 692 swizzle[3] = state->swizzle_a; 693 694 if (R600_BIG_ENDIAN) 695 do_endian_swap = !tmp->db_compatible; 696 697 format = r600_translate_texformat(ctx->screen, state->format, 698 swizzle, 699 &word4, &yuv_format, do_endian_swap); 700 assert(format != ~0); 701 if (format == ~0) { 702 FREE(view); 703 return NULL; 704 } 705 706 if (state->format == PIPE_FORMAT_X24S8_UINT || 707 state->format == PIPE_FORMAT_S8X24_UINT || 708 state->format == PIPE_FORMAT_X32_S8X24_UINT || 709 state->format == PIPE_FORMAT_S8_UINT) 710 view->is_stencil_sampler = true; 711 712 if (tmp->is_depth && !r600_can_sample_zs(tmp, view->is_stencil_sampler)) { 713 if (!r600_init_flushed_depth_texture(ctx, texture, NULL)) { 714 FREE(view); 715 return NULL; 716 } 717 tmp = tmp->flushed_depth_texture; 718 } 719 720 endian = r600_colorformat_endian_swap(format, do_endian_swap); 721 722 offset_level = state->u.tex.first_level; 723 last_level = state->u.tex.last_level - offset_level; 724 width = width_first_level; 725 height = height_first_level; 726 depth = u_minify(texture->depth0, offset_level); 727 pitch = tmp->surface.u.legacy.level[offset_level].nblk_x * util_format_get_blockwidth(state->format); 728 729 if (texture->target == PIPE_TEXTURE_1D_ARRAY) { 730 height = 1; 731 depth = texture->array_size; 732 } else if (texture->target == PIPE_TEXTURE_2D_ARRAY) { 733 depth = texture->array_size; 734 } else if (texture->target == PIPE_TEXTURE_CUBE_ARRAY) 735 depth = texture->array_size / 6; 736 737 switch (tmp->surface.u.legacy.level[offset_level].mode) { 738 default: 739 case RADEON_SURF_MODE_LINEAR_ALIGNED: 740 array_mode = V_038000_ARRAY_LINEAR_ALIGNED; 741 break; 742 case RADEON_SURF_MODE_1D: 743 array_mode = V_038000_ARRAY_1D_TILED_THIN1; 744 break; 745 case RADEON_SURF_MODE_2D: 746 array_mode = V_038000_ARRAY_2D_TILED_THIN1; 747 break; 748 } 749 750 view->tex_resource = &tmp->resource; 751 view->tex_resource_words[0] = (S_038000_DIM(r600_tex_dim(texture->target, texture->nr_samples)) | 752 S_038000_TILE_MODE(array_mode) | 753 S_038000_TILE_TYPE(tmp->non_disp_tiling) | 754 S_038000_PITCH((pitch / 8) - 1) | 755 S_038000_TEX_WIDTH(width - 1)); 756 view->tex_resource_words[1] = (S_038004_TEX_HEIGHT(height - 1) | 757 S_038004_TEX_DEPTH(depth - 1) | 758 S_038004_DATA_FORMAT(format)); 759 view->tex_resource_words[2] = tmp->surface.u.legacy.level[offset_level].offset >> 8; 760 if (offset_level >= tmp->resource.b.b.last_level) { 761 view->tex_resource_words[3] = tmp->surface.u.legacy.level[offset_level].offset >> 8; 762 } else { 763 view->tex_resource_words[3] = tmp->surface.u.legacy.level[offset_level + 1].offset >> 8; 764 } 765 view->tex_resource_words[4] = (word4 | 766 S_038010_REQUEST_SIZE(1) | 767 S_038010_ENDIAN_SWAP(endian) | 768 S_038010_BASE_LEVEL(0)); 769 view->tex_resource_words[5] = (S_038014_BASE_ARRAY(state->u.tex.first_layer) | 770 S_038014_LAST_ARRAY(state->u.tex.last_layer)); 771 if (texture->nr_samples > 1) { 772 /* LAST_LEVEL holds log2(nr_samples) for multisample textures */ 773 view->tex_resource_words[5] |= S_038014_LAST_LEVEL(util_logbase2(texture->nr_samples)); 774 } else { 775 view->tex_resource_words[5] |= S_038014_LAST_LEVEL(last_level); 776 } 777 view->tex_resource_words[6] = (S_038018_TYPE(V_038010_SQ_TEX_VTX_VALID_TEXTURE) | 778 S_038018_MAX_ANISO(4 /* max 16 samples */)); 779 return &view->base; 780 } 781 782 static struct pipe_sampler_view * 783 r600_create_sampler_view(struct pipe_context *ctx, 784 struct pipe_resource *tex, 785 const struct pipe_sampler_view *state) 786 { 787 return r600_create_sampler_view_custom(ctx, tex, state, 788 u_minify(tex->width0, state->u.tex.first_level), 789 u_minify(tex->height0, state->u.tex.first_level)); 790 } 791 792 static void r600_emit_clip_state(struct r600_context *rctx, struct r600_atom *atom) 793 { 794 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 795 struct pipe_clip_state *state = &rctx->clip_state.state; 796 797 radeon_set_context_reg_seq(cs, R_028E20_PA_CL_UCP0_X, 6*4); 798 radeon_emit_array(cs, (unsigned*)state, 6*4); 799 } 800 801 static void r600_set_polygon_stipple(struct pipe_context *ctx, 802 const struct pipe_poly_stipple *state) 803 { 804 } 805 806 static void r600_init_color_surface(struct r600_context *rctx, 807 struct r600_surface *surf, 808 bool force_cmask_fmask) 809 { 810 struct r600_screen *rscreen = rctx->screen; 811 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 812 unsigned level = surf->base.u.tex.level; 813 unsigned pitch, slice; 814 unsigned color_info; 815 unsigned color_view; 816 unsigned format, swap, ntype, endian; 817 unsigned offset; 818 const struct util_format_description *desc; 819 int i; 820 bool blend_bypass = 0, blend_clamp = 0, do_endian_swap = FALSE; 821 822 if (rtex->db_compatible && !r600_can_sample_zs(rtex, false)) { 823 r600_init_flushed_depth_texture(&rctx->b.b, surf->base.texture, NULL); 824 rtex = rtex->flushed_depth_texture; 825 assert(rtex); 826 } 827 828 offset = rtex->surface.u.legacy.level[level].offset; 829 color_view = S_028080_SLICE_START(surf->base.u.tex.first_layer) | 830 S_028080_SLICE_MAX(surf->base.u.tex.last_layer); 831 832 pitch = rtex->surface.u.legacy.level[level].nblk_x / 8 - 1; 833 slice = (rtex->surface.u.legacy.level[level].nblk_x * rtex->surface.u.legacy.level[level].nblk_y) / 64; 834 if (slice) { 835 slice = slice - 1; 836 } 837 color_info = 0; 838 switch (rtex->surface.u.legacy.level[level].mode) { 839 default: 840 case RADEON_SURF_MODE_LINEAR_ALIGNED: 841 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_LINEAR_ALIGNED); 842 break; 843 case RADEON_SURF_MODE_1D: 844 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_1D_TILED_THIN1); 845 break; 846 case RADEON_SURF_MODE_2D: 847 color_info = S_0280A0_ARRAY_MODE(V_038000_ARRAY_2D_TILED_THIN1); 848 break; 849 } 850 851 desc = util_format_description(surf->base.format); 852 853 for (i = 0; i < 4; i++) { 854 if (desc->channel[i].type != UTIL_FORMAT_TYPE_VOID) { 855 break; 856 } 857 } 858 859 ntype = V_0280A0_NUMBER_UNORM; 860 if (desc->colorspace == UTIL_FORMAT_COLORSPACE_SRGB) 861 ntype = V_0280A0_NUMBER_SRGB; 862 else if (desc->channel[i].type == UTIL_FORMAT_TYPE_SIGNED) { 863 if (desc->channel[i].normalized) 864 ntype = V_0280A0_NUMBER_SNORM; 865 else if (desc->channel[i].pure_integer) 866 ntype = V_0280A0_NUMBER_SINT; 867 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_UNSIGNED) { 868 if (desc->channel[i].normalized) 869 ntype = V_0280A0_NUMBER_UNORM; 870 else if (desc->channel[i].pure_integer) 871 ntype = V_0280A0_NUMBER_UINT; 872 } else if (desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT) { 873 ntype = V_0280A0_NUMBER_FLOAT; 874 } 875 876 if (R600_BIG_ENDIAN) 877 do_endian_swap = !rtex->db_compatible; 878 879 format = r600_translate_colorformat(rctx->b.chip_class, surf->base.format, 880 do_endian_swap); 881 assert(format != ~0); 882 883 swap = r600_translate_colorswap(surf->base.format, do_endian_swap); 884 assert(swap != ~0); 885 886 endian = r600_colorformat_endian_swap(format, do_endian_swap); 887 888 /* blend clamp should be set for all NORM/SRGB types */ 889 if (ntype == V_0280A0_NUMBER_UNORM || ntype == V_0280A0_NUMBER_SNORM || 890 ntype == V_0280A0_NUMBER_SRGB) 891 blend_clamp = 1; 892 893 /* set blend bypass according to docs if SINT/UINT or 894 8/24 COLOR variants */ 895 if (ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT || 896 format == V_0280A0_COLOR_8_24 || format == V_0280A0_COLOR_24_8 || 897 format == V_0280A0_COLOR_X24_8_32_FLOAT) { 898 blend_clamp = 0; 899 blend_bypass = 1; 900 } 901 902 surf->alphatest_bypass = ntype == V_0280A0_NUMBER_UINT || ntype == V_0280A0_NUMBER_SINT; 903 904 color_info |= S_0280A0_FORMAT(format) | 905 S_0280A0_COMP_SWAP(swap) | 906 S_0280A0_BLEND_BYPASS(blend_bypass) | 907 S_0280A0_BLEND_CLAMP(blend_clamp) | 908 S_0280A0_SIMPLE_FLOAT(1) | 909 S_0280A0_NUMBER_TYPE(ntype) | 910 S_0280A0_ENDIAN(endian); 911 912 /* EXPORT_NORM is an optimzation that can be enabled for better 913 * performance in certain cases 914 */ 915 if (rctx->b.chip_class == R600) { 916 /* EXPORT_NORM can be enabled if: 917 * - 11-bit or smaller UNORM/SNORM/SRGB 918 * - BLEND_CLAMP is enabled 919 * - BLEND_FLOAT32 is disabled 920 */ 921 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && 922 (desc->channel[i].size < 12 && 923 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && 924 ntype != V_0280A0_NUMBER_UINT && 925 ntype != V_0280A0_NUMBER_SINT) && 926 G_0280A0_BLEND_CLAMP(color_info) && 927 /* XXX this condition is always true since BLEND_FLOAT32 is never set (bug?). */ 928 !G_0280A0_BLEND_FLOAT32(color_info)) { 929 color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM); 930 surf->export_16bpc = true; 931 } 932 } else { 933 /* EXPORT_NORM can be enabled if: 934 * - 11-bit or smaller UNORM/SNORM/SRGB 935 * - 16-bit or smaller FLOAT 936 */ 937 if (desc->colorspace != UTIL_FORMAT_COLORSPACE_ZS && 938 ((desc->channel[i].size < 12 && 939 desc->channel[i].type != UTIL_FORMAT_TYPE_FLOAT && 940 ntype != V_0280A0_NUMBER_UINT && ntype != V_0280A0_NUMBER_SINT) || 941 (desc->channel[i].size < 17 && 942 desc->channel[i].type == UTIL_FORMAT_TYPE_FLOAT))) { 943 color_info |= S_0280A0_SOURCE_FORMAT(V_0280A0_EXPORT_NORM); 944 surf->export_16bpc = true; 945 } 946 } 947 948 /* These might not always be initialized to zero. */ 949 surf->cb_color_base = offset >> 8; 950 surf->cb_color_size = S_028060_PITCH_TILE_MAX(pitch) | 951 S_028060_SLICE_TILE_MAX(slice); 952 surf->cb_color_fmask = surf->cb_color_base; 953 surf->cb_color_cmask = surf->cb_color_base; 954 surf->cb_color_mask = 0; 955 956 r600_resource_reference(&surf->cb_buffer_cmask, &rtex->resource); 957 r600_resource_reference(&surf->cb_buffer_fmask, &rtex->resource); 958 959 if (rtex->cmask.size) { 960 surf->cb_color_cmask = rtex->cmask.offset >> 8; 961 surf->cb_color_mask |= S_028100_CMASK_BLOCK_MAX(rtex->cmask.slice_tile_max); 962 963 if (rtex->fmask.size) { 964 color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE); 965 surf->cb_color_fmask = rtex->fmask.offset >> 8; 966 surf->cb_color_mask |= S_028100_FMASK_TILE_MAX(rtex->fmask.slice_tile_max); 967 } else { /* cmask only */ 968 color_info |= S_0280A0_TILE_MODE(V_0280A0_CLEAR_ENABLE); 969 } 970 } else if (force_cmask_fmask) { 971 /* Allocate dummy FMASK and CMASK if they aren't allocated already. 972 * 973 * R6xx needs FMASK and CMASK for the destination buffer of color resolve, 974 * otherwise it hangs. We don't have FMASK and CMASK pre-allocated, 975 * because it's not an MSAA buffer. 976 */ 977 struct r600_cmask_info cmask; 978 struct r600_fmask_info fmask; 979 980 r600_texture_get_cmask_info(&rscreen->b, rtex, &cmask); 981 r600_texture_get_fmask_info(&rscreen->b, rtex, 8, &fmask); 982 983 /* CMASK. */ 984 if (!rctx->dummy_cmask || 985 rctx->dummy_cmask->b.b.width0 < cmask.size || 986 rctx->dummy_cmask->buf->alignment % cmask.alignment != 0) { 987 struct pipe_transfer *transfer; 988 void *ptr; 989 990 r600_resource_reference(&rctx->dummy_cmask, NULL); 991 rctx->dummy_cmask = (struct r600_resource*) 992 r600_aligned_buffer_create(&rscreen->b.b, 0, 993 PIPE_USAGE_DEFAULT, 994 cmask.size, cmask.alignment); 995 996 if (unlikely(!rctx->dummy_cmask)) { 997 surf->color_initialized = false; 998 return; 999 } 1000 1001 /* Set the contents to 0xCC. */ 1002 ptr = pipe_buffer_map(&rctx->b.b, &rctx->dummy_cmask->b.b, PIPE_TRANSFER_WRITE, &transfer); 1003 memset(ptr, 0xCC, cmask.size); 1004 pipe_buffer_unmap(&rctx->b.b, transfer); 1005 } 1006 r600_resource_reference(&surf->cb_buffer_cmask, rctx->dummy_cmask); 1007 1008 /* FMASK. */ 1009 if (!rctx->dummy_fmask || 1010 rctx->dummy_fmask->b.b.width0 < fmask.size || 1011 rctx->dummy_fmask->buf->alignment % fmask.alignment != 0) { 1012 r600_resource_reference(&rctx->dummy_fmask, NULL); 1013 rctx->dummy_fmask = (struct r600_resource*) 1014 r600_aligned_buffer_create(&rscreen->b.b, 0, 1015 PIPE_USAGE_DEFAULT, 1016 fmask.size, fmask.alignment); 1017 1018 if (unlikely(!rctx->dummy_fmask)) { 1019 surf->color_initialized = false; 1020 return; 1021 } 1022 } 1023 r600_resource_reference(&surf->cb_buffer_fmask, rctx->dummy_fmask); 1024 1025 /* Init the registers. */ 1026 color_info |= S_0280A0_TILE_MODE(V_0280A0_FRAG_ENABLE); 1027 surf->cb_color_cmask = 0; 1028 surf->cb_color_fmask = 0; 1029 surf->cb_color_mask = S_028100_CMASK_BLOCK_MAX(cmask.slice_tile_max) | 1030 S_028100_FMASK_TILE_MAX(fmask.slice_tile_max); 1031 } 1032 1033 surf->cb_color_info = color_info; 1034 surf->cb_color_view = color_view; 1035 surf->color_initialized = true; 1036 } 1037 1038 static void r600_init_depth_surface(struct r600_context *rctx, 1039 struct r600_surface *surf) 1040 { 1041 struct r600_texture *rtex = (struct r600_texture*)surf->base.texture; 1042 unsigned level, pitch, slice, format, offset, array_mode; 1043 1044 level = surf->base.u.tex.level; 1045 offset = rtex->surface.u.legacy.level[level].offset; 1046 pitch = rtex->surface.u.legacy.level[level].nblk_x / 8 - 1; 1047 slice = (rtex->surface.u.legacy.level[level].nblk_x * rtex->surface.u.legacy.level[level].nblk_y) / 64; 1048 if (slice) { 1049 slice = slice - 1; 1050 } 1051 switch (rtex->surface.u.legacy.level[level].mode) { 1052 case RADEON_SURF_MODE_2D: 1053 array_mode = V_0280A0_ARRAY_2D_TILED_THIN1; 1054 break; 1055 case RADEON_SURF_MODE_1D: 1056 case RADEON_SURF_MODE_LINEAR_ALIGNED: 1057 default: 1058 array_mode = V_0280A0_ARRAY_1D_TILED_THIN1; 1059 break; 1060 } 1061 1062 format = r600_translate_dbformat(surf->base.format); 1063 assert(format != ~0); 1064 1065 surf->db_depth_info = S_028010_ARRAY_MODE(array_mode) | S_028010_FORMAT(format); 1066 surf->db_depth_base = offset >> 8; 1067 surf->db_depth_view = S_028004_SLICE_START(surf->base.u.tex.first_layer) | 1068 S_028004_SLICE_MAX(surf->base.u.tex.last_layer); 1069 surf->db_depth_size = S_028000_PITCH_TILE_MAX(pitch) | S_028000_SLICE_TILE_MAX(slice); 1070 surf->db_prefetch_limit = (rtex->surface.u.legacy.level[level].nblk_y / 8) - 1; 1071 1072 if (r600_htile_enabled(rtex, level)) { 1073 surf->db_htile_data_base = rtex->htile_offset >> 8; 1074 surf->db_htile_surface = S_028D24_HTILE_WIDTH(1) | 1075 S_028D24_HTILE_HEIGHT(1) | 1076 S_028D24_FULL_CACHE(1); 1077 /* preload is not working properly on r6xx/r7xx */ 1078 surf->db_depth_info |= S_028010_TILE_SURFACE_ENABLE(1); 1079 } 1080 1081 surf->depth_initialized = true; 1082 } 1083 1084 static void r600_set_framebuffer_state(struct pipe_context *ctx, 1085 const struct pipe_framebuffer_state *state) 1086 { 1087 struct r600_context *rctx = (struct r600_context *)ctx; 1088 struct r600_surface *surf; 1089 struct r600_texture *rtex; 1090 unsigned i; 1091 1092 /* Flush TC when changing the framebuffer state, because the only 1093 * client not using TC that can change textures is the framebuffer. 1094 * Other places don't typically have to flush TC. 1095 */ 1096 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE | 1097 R600_CONTEXT_FLUSH_AND_INV | 1098 R600_CONTEXT_FLUSH_AND_INV_CB | 1099 R600_CONTEXT_FLUSH_AND_INV_CB_META | 1100 R600_CONTEXT_FLUSH_AND_INV_DB | 1101 R600_CONTEXT_FLUSH_AND_INV_DB_META | 1102 R600_CONTEXT_INV_TEX_CACHE; 1103 1104 /* Set the new state. */ 1105 util_copy_framebuffer_state(&rctx->framebuffer.state, state); 1106 1107 rctx->framebuffer.export_16bpc = state->nr_cbufs != 0; 1108 rctx->framebuffer.cb0_is_integer = state->nr_cbufs && state->cbufs[0] && 1109 util_format_is_pure_integer(state->cbufs[0]->format); 1110 rctx->framebuffer.compressed_cb_mask = 0; 1111 rctx->framebuffer.is_msaa_resolve = state->nr_cbufs == 2 && 1112 state->cbufs[0] && state->cbufs[1] && 1113 state->cbufs[0]->texture->nr_samples > 1 && 1114 state->cbufs[1]->texture->nr_samples <= 1; 1115 rctx->framebuffer.nr_samples = util_framebuffer_get_num_samples(state); 1116 1117 /* Colorbuffers. */ 1118 for (i = 0; i < state->nr_cbufs; i++) { 1119 /* The resolve buffer must have CMASK and FMASK to prevent hardlocks on R6xx. */ 1120 bool force_cmask_fmask = rctx->b.chip_class == R600 && 1121 rctx->framebuffer.is_msaa_resolve && 1122 i == 1; 1123 1124 surf = (struct r600_surface*)state->cbufs[i]; 1125 if (!surf) 1126 continue; 1127 1128 rtex = (struct r600_texture*)surf->base.texture; 1129 r600_context_add_resource_size(ctx, state->cbufs[i]->texture); 1130 1131 if (!surf->color_initialized || force_cmask_fmask) { 1132 r600_init_color_surface(rctx, surf, force_cmask_fmask); 1133 if (force_cmask_fmask) { 1134 /* re-initialize later without compression */ 1135 surf->color_initialized = false; 1136 } 1137 } 1138 1139 if (!surf->export_16bpc) { 1140 rctx->framebuffer.export_16bpc = false; 1141 } 1142 1143 if (rtex->fmask.size) { 1144 rctx->framebuffer.compressed_cb_mask |= 1 << i; 1145 } 1146 } 1147 1148 /* Update alpha-test state dependencies. 1149 * Alpha-test is done on the first colorbuffer only. */ 1150 if (state->nr_cbufs) { 1151 bool alphatest_bypass = false; 1152 1153 surf = (struct r600_surface*)state->cbufs[0]; 1154 if (surf) { 1155 alphatest_bypass = surf->alphatest_bypass; 1156 } 1157 1158 if (rctx->alphatest_state.bypass != alphatest_bypass) { 1159 rctx->alphatest_state.bypass = alphatest_bypass; 1160 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 1161 } 1162 } 1163 1164 /* ZS buffer. */ 1165 if (state->zsbuf) { 1166 surf = (struct r600_surface*)state->zsbuf; 1167 1168 r600_context_add_resource_size(ctx, state->zsbuf->texture); 1169 1170 if (!surf->depth_initialized) { 1171 r600_init_depth_surface(rctx, surf); 1172 } 1173 1174 if (state->zsbuf->format != rctx->poly_offset_state.zs_format) { 1175 rctx->poly_offset_state.zs_format = state->zsbuf->format; 1176 r600_mark_atom_dirty(rctx, &rctx->poly_offset_state.atom); 1177 } 1178 1179 if (rctx->db_state.rsurf != surf) { 1180 rctx->db_state.rsurf = surf; 1181 r600_mark_atom_dirty(rctx, &rctx->db_state.atom); 1182 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1183 } 1184 } else if (rctx->db_state.rsurf) { 1185 rctx->db_state.rsurf = NULL; 1186 r600_mark_atom_dirty(rctx, &rctx->db_state.atom); 1187 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1188 } 1189 1190 if (rctx->cb_misc_state.nr_cbufs != state->nr_cbufs) { 1191 rctx->cb_misc_state.nr_cbufs = state->nr_cbufs; 1192 r600_mark_atom_dirty(rctx, &rctx->cb_misc_state.atom); 1193 } 1194 1195 if (state->nr_cbufs == 0 && rctx->alphatest_state.bypass) { 1196 rctx->alphatest_state.bypass = false; 1197 r600_mark_atom_dirty(rctx, &rctx->alphatest_state.atom); 1198 } 1199 1200 /* Calculate the CS size. */ 1201 rctx->framebuffer.atom.num_dw = 1202 10 /*COLOR_INFO*/ + 4 /*SCISSOR*/ + 3 /*SHADER_CONTROL*/ + 8 /*MSAA*/; 1203 1204 if (rctx->framebuffer.state.nr_cbufs) { 1205 rctx->framebuffer.atom.num_dw += 15 * rctx->framebuffer.state.nr_cbufs; 1206 rctx->framebuffer.atom.num_dw += 3 * (2 + rctx->framebuffer.state.nr_cbufs); 1207 } 1208 if (rctx->framebuffer.state.zsbuf) { 1209 rctx->framebuffer.atom.num_dw += 16; 1210 } else if (rctx->screen->b.info.drm_minor >= 18) { 1211 rctx->framebuffer.atom.num_dw += 3; 1212 } 1213 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770) { 1214 rctx->framebuffer.atom.num_dw += 2; 1215 } 1216 1217 r600_mark_atom_dirty(rctx, &rctx->framebuffer.atom); 1218 1219 r600_set_sample_locations_constant_buffer(rctx); 1220 rctx->framebuffer.do_update_surf_dirtiness = true; 1221 } 1222 1223 static uint32_t sample_locs_2x[] = { 1224 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4), 1225 FILL_SREG(-4, 4, 4, -4, -4, 4, 4, -4), 1226 }; 1227 static unsigned max_dist_2x = 4; 1228 1229 static uint32_t sample_locs_4x[] = { 1230 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6), 1231 FILL_SREG(-2, -2, 2, 2, -6, 6, 6, -6), 1232 }; 1233 static unsigned max_dist_4x = 6; 1234 static uint32_t sample_locs_8x[] = { 1235 FILL_SREG(-1, 1, 1, 5, 3, -5, 5, 3), 1236 FILL_SREG(-7, -1, -3, -7, 7, -3, -5, 7), 1237 }; 1238 static unsigned max_dist_8x = 7; 1239 1240 static void r600_get_sample_position(struct pipe_context *ctx, 1241 unsigned sample_count, 1242 unsigned sample_index, 1243 float *out_value) 1244 { 1245 int offset, index; 1246 struct { 1247 int idx:4; 1248 } val; 1249 switch (sample_count) { 1250 case 1: 1251 default: 1252 out_value[0] = out_value[1] = 0.5; 1253 break; 1254 case 2: 1255 offset = 4 * (sample_index * 2); 1256 val.idx = (sample_locs_2x[0] >> offset) & 0xf; 1257 out_value[0] = (float)(val.idx + 8) / 16.0f; 1258 val.idx = (sample_locs_2x[0] >> (offset + 4)) & 0xf; 1259 out_value[1] = (float)(val.idx + 8) / 16.0f; 1260 break; 1261 case 4: 1262 offset = 4 * (sample_index * 2); 1263 val.idx = (sample_locs_4x[0] >> offset) & 0xf; 1264 out_value[0] = (float)(val.idx + 8) / 16.0f; 1265 val.idx = (sample_locs_4x[0] >> (offset + 4)) & 0xf; 1266 out_value[1] = (float)(val.idx + 8) / 16.0f; 1267 break; 1268 case 8: 1269 offset = 4 * (sample_index % 4 * 2); 1270 index = (sample_index / 4); 1271 val.idx = (sample_locs_8x[index] >> offset) & 0xf; 1272 out_value[0] = (float)(val.idx + 8) / 16.0f; 1273 val.idx = (sample_locs_8x[index] >> (offset + 4)) & 0xf; 1274 out_value[1] = (float)(val.idx + 8) / 16.0f; 1275 break; 1276 } 1277 } 1278 1279 static void r600_emit_msaa_state(struct r600_context *rctx, int nr_samples) 1280 { 1281 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1282 unsigned max_dist = 0; 1283 1284 if (rctx->b.family == CHIP_R600) { 1285 switch (nr_samples) { 1286 default: 1287 nr_samples = 0; 1288 break; 1289 case 2: 1290 radeon_set_config_reg(cs, R_008B40_PA_SC_AA_SAMPLE_LOCS_2S, sample_locs_2x[0]); 1291 max_dist = max_dist_2x; 1292 break; 1293 case 4: 1294 radeon_set_config_reg(cs, R_008B44_PA_SC_AA_SAMPLE_LOCS_4S, sample_locs_4x[0]); 1295 max_dist = max_dist_4x; 1296 break; 1297 case 8: 1298 radeon_set_config_reg_seq(cs, R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0, 2); 1299 radeon_emit(cs, sample_locs_8x[0]); /* R_008B48_PA_SC_AA_SAMPLE_LOCS_8S_WD0 */ 1300 radeon_emit(cs, sample_locs_8x[1]); /* R_008B4C_PA_SC_AA_SAMPLE_LOCS_8S_WD1 */ 1301 max_dist = max_dist_8x; 1302 break; 1303 } 1304 } else { 1305 switch (nr_samples) { 1306 default: 1307 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1308 radeon_emit(cs, 0); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1309 radeon_emit(cs, 0); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1310 nr_samples = 0; 1311 break; 1312 case 2: 1313 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1314 radeon_emit(cs, sample_locs_2x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1315 radeon_emit(cs, sample_locs_2x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1316 max_dist = max_dist_2x; 1317 break; 1318 case 4: 1319 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1320 radeon_emit(cs, sample_locs_4x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1321 radeon_emit(cs, sample_locs_4x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1322 max_dist = max_dist_4x; 1323 break; 1324 case 8: 1325 radeon_set_context_reg_seq(cs, R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX, 2); 1326 radeon_emit(cs, sample_locs_8x[0]); /* R_028C1C_PA_SC_AA_SAMPLE_LOCS_MCTX */ 1327 radeon_emit(cs, sample_locs_8x[1]); /* R_028C20_PA_SC_AA_SAMPLE_LOCS_8D_WD1_MCTX */ 1328 max_dist = max_dist_8x; 1329 break; 1330 } 1331 } 1332 1333 if (nr_samples > 1) { 1334 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1335 radeon_emit(cs, S_028C00_LAST_PIXEL(1) | 1336 S_028C00_EXPAND_LINE_WIDTH(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1337 radeon_emit(cs, S_028C04_MSAA_NUM_SAMPLES(util_logbase2(nr_samples)) | 1338 S_028C04_MAX_SAMPLE_DIST(max_dist)); /* R_028C04_PA_SC_AA_CONFIG */ 1339 } else { 1340 radeon_set_context_reg_seq(cs, R_028C00_PA_SC_LINE_CNTL, 2); 1341 radeon_emit(cs, S_028C00_LAST_PIXEL(1)); /* R_028C00_PA_SC_LINE_CNTL */ 1342 radeon_emit(cs, 0); /* R_028C04_PA_SC_AA_CONFIG */ 1343 } 1344 } 1345 1346 static void r600_emit_framebuffer_state(struct r600_context *rctx, struct r600_atom *atom) 1347 { 1348 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1349 struct pipe_framebuffer_state *state = &rctx->framebuffer.state; 1350 unsigned nr_cbufs = state->nr_cbufs; 1351 struct r600_surface **cb = (struct r600_surface**)&state->cbufs[0]; 1352 unsigned i, sbu = 0; 1353 1354 /* Colorbuffers. */ 1355 radeon_set_context_reg_seq(cs, R_0280A0_CB_COLOR0_INFO, 8); 1356 for (i = 0; i < nr_cbufs; i++) { 1357 radeon_emit(cs, cb[i] ? cb[i]->cb_color_info : 0); 1358 } 1359 /* set CB_COLOR1_INFO for possible dual-src blending */ 1360 if (rctx->framebuffer.dual_src_blend && i == 1 && cb[0]) { 1361 radeon_emit(cs, cb[0]->cb_color_info); 1362 i++; 1363 } 1364 for (; i < 8; i++) { 1365 radeon_emit(cs, 0); 1366 } 1367 1368 if (nr_cbufs) { 1369 for (i = 0; i < nr_cbufs; i++) { 1370 unsigned reloc; 1371 1372 if (!cb[i]) 1373 continue; 1374 1375 /* COLOR_BASE */ 1376 radeon_set_context_reg(cs, R_028040_CB_COLOR0_BASE + i*4, cb[i]->cb_color_base); 1377 1378 reloc = radeon_add_to_buffer_list(&rctx->b, 1379 &rctx->b.gfx, 1380 (struct r600_resource*)cb[i]->base.texture, 1381 RADEON_USAGE_READWRITE, 1382 cb[i]->base.texture->nr_samples > 1 ? 1383 RADEON_PRIO_COLOR_BUFFER_MSAA : 1384 RADEON_PRIO_COLOR_BUFFER); 1385 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1386 radeon_emit(cs, reloc); 1387 1388 /* FMASK */ 1389 radeon_set_context_reg(cs, R_0280E0_CB_COLOR0_FRAG + i*4, cb[i]->cb_color_fmask); 1390 1391 reloc = radeon_add_to_buffer_list(&rctx->b, 1392 &rctx->b.gfx, 1393 cb[i]->cb_buffer_fmask, 1394 RADEON_USAGE_READWRITE, 1395 cb[i]->base.texture->nr_samples > 1 ? 1396 RADEON_PRIO_COLOR_BUFFER_MSAA : 1397 RADEON_PRIO_COLOR_BUFFER); 1398 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1399 radeon_emit(cs, reloc); 1400 1401 /* CMASK */ 1402 radeon_set_context_reg(cs, R_0280C0_CB_COLOR0_TILE + i*4, cb[i]->cb_color_cmask); 1403 1404 reloc = radeon_add_to_buffer_list(&rctx->b, 1405 &rctx->b.gfx, 1406 cb[i]->cb_buffer_cmask, 1407 RADEON_USAGE_READWRITE, 1408 cb[i]->base.texture->nr_samples > 1 ? 1409 RADEON_PRIO_COLOR_BUFFER_MSAA : 1410 RADEON_PRIO_COLOR_BUFFER); 1411 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1412 radeon_emit(cs, reloc); 1413 } 1414 1415 radeon_set_context_reg_seq(cs, R_028060_CB_COLOR0_SIZE, nr_cbufs); 1416 for (i = 0; i < nr_cbufs; i++) { 1417 radeon_emit(cs, cb[i] ? cb[i]->cb_color_size : 0); 1418 } 1419 1420 radeon_set_context_reg_seq(cs, R_028080_CB_COLOR0_VIEW, nr_cbufs); 1421 for (i = 0; i < nr_cbufs; i++) { 1422 radeon_emit(cs, cb[i] ? cb[i]->cb_color_view : 0); 1423 } 1424 1425 radeon_set_context_reg_seq(cs, R_028100_CB_COLOR0_MASK, nr_cbufs); 1426 for (i = 0; i < nr_cbufs; i++) { 1427 radeon_emit(cs, cb[i] ? cb[i]->cb_color_mask : 0); 1428 } 1429 1430 sbu |= SURFACE_BASE_UPDATE_COLOR_NUM(nr_cbufs); 1431 } 1432 1433 /* SURFACE_BASE_UPDATE */ 1434 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { 1435 radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); 1436 radeon_emit(cs, sbu); 1437 sbu = 0; 1438 } 1439 1440 /* Zbuffer. */ 1441 if (state->zsbuf) { 1442 struct r600_surface *surf = (struct r600_surface*)state->zsbuf; 1443 unsigned reloc = radeon_add_to_buffer_list(&rctx->b, 1444 &rctx->b.gfx, 1445 (struct r600_resource*)state->zsbuf->texture, 1446 RADEON_USAGE_READWRITE, 1447 surf->base.texture->nr_samples > 1 ? 1448 RADEON_PRIO_DEPTH_BUFFER_MSAA : 1449 RADEON_PRIO_DEPTH_BUFFER); 1450 1451 radeon_set_context_reg_seq(cs, R_028000_DB_DEPTH_SIZE, 2); 1452 radeon_emit(cs, surf->db_depth_size); /* R_028000_DB_DEPTH_SIZE */ 1453 radeon_emit(cs, surf->db_depth_view); /* R_028004_DB_DEPTH_VIEW */ 1454 radeon_set_context_reg_seq(cs, R_02800C_DB_DEPTH_BASE, 2); 1455 radeon_emit(cs, surf->db_depth_base); /* R_02800C_DB_DEPTH_BASE */ 1456 radeon_emit(cs, surf->db_depth_info); /* R_028010_DB_DEPTH_INFO */ 1457 1458 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1459 radeon_emit(cs, reloc); 1460 1461 radeon_set_context_reg(cs, R_028D34_DB_PREFETCH_LIMIT, surf->db_prefetch_limit); 1462 1463 sbu |= SURFACE_BASE_UPDATE_DEPTH; 1464 } else if (rctx->screen->b.info.drm_minor >= 18) { 1465 /* DRM 2.6.18 allows the INVALID format to disable depth/stencil. 1466 * Older kernels are out of luck. */ 1467 radeon_set_context_reg(cs, R_028010_DB_DEPTH_INFO, S_028010_FORMAT(V_028010_DEPTH_INVALID)); 1468 } 1469 1470 /* SURFACE_BASE_UPDATE */ 1471 if (rctx->b.family > CHIP_R600 && rctx->b.family < CHIP_RV770 && sbu) { 1472 radeon_emit(cs, PKT3(PKT3_SURFACE_BASE_UPDATE, 0, 0)); 1473 radeon_emit(cs, sbu); 1474 sbu = 0; 1475 } 1476 1477 /* Framebuffer dimensions. */ 1478 radeon_set_context_reg_seq(cs, R_028204_PA_SC_WINDOW_SCISSOR_TL, 2); 1479 radeon_emit(cs, S_028240_TL_X(0) | S_028240_TL_Y(0) | 1480 S_028240_WINDOW_OFFSET_DISABLE(1)); /* R_028204_PA_SC_WINDOW_SCISSOR_TL */ 1481 radeon_emit(cs, S_028244_BR_X(state->width) | 1482 S_028244_BR_Y(state->height)); /* R_028208_PA_SC_WINDOW_SCISSOR_BR */ 1483 1484 if (rctx->framebuffer.is_msaa_resolve) { 1485 radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1); 1486 } else { 1487 /* Always enable the first colorbuffer in CB_SHADER_CONTROL. This 1488 * will assure that the alpha-test will work even if there is 1489 * no colorbuffer bound. */ 1490 radeon_set_context_reg(cs, R_0287A0_CB_SHADER_CONTROL, 1491 (1ull << MAX2(nr_cbufs, 1)) - 1); 1492 } 1493 1494 r600_emit_msaa_state(rctx, rctx->framebuffer.nr_samples); 1495 } 1496 1497 static void r600_set_min_samples(struct pipe_context *ctx, unsigned min_samples) 1498 { 1499 struct r600_context *rctx = (struct r600_context *)ctx; 1500 1501 if (rctx->ps_iter_samples == min_samples) 1502 return; 1503 1504 rctx->ps_iter_samples = min_samples; 1505 if (rctx->framebuffer.nr_samples > 1) { 1506 r600_mark_atom_dirty(rctx, &rctx->rasterizer_state.atom); 1507 if (rctx->b.chip_class == R600) 1508 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 1509 } 1510 } 1511 1512 static void r600_emit_cb_misc_state(struct r600_context *rctx, struct r600_atom *atom) 1513 { 1514 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1515 struct r600_cb_misc_state *a = (struct r600_cb_misc_state*)atom; 1516 1517 if (G_028808_SPECIAL_OP(a->cb_color_control) == V_028808_SPECIAL_RESOLVE_BOX) { 1518 radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); 1519 if (rctx->b.chip_class == R600) { 1520 radeon_emit(cs, 0xff); /* R_028238_CB_TARGET_MASK */ 1521 radeon_emit(cs, 0xff); /* R_02823C_CB_SHADER_MASK */ 1522 } else { 1523 radeon_emit(cs, 0xf); /* R_028238_CB_TARGET_MASK */ 1524 radeon_emit(cs, 0xf); /* R_02823C_CB_SHADER_MASK */ 1525 } 1526 radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, a->cb_color_control); 1527 } else { 1528 unsigned fb_colormask = (1ULL << ((unsigned)a->nr_cbufs * 4)) - 1; 1529 unsigned ps_colormask = (1ULL << ((unsigned)a->nr_ps_color_outputs * 4)) - 1; 1530 unsigned multiwrite = a->multiwrite && a->nr_cbufs > 1; 1531 1532 radeon_set_context_reg_seq(cs, R_028238_CB_TARGET_MASK, 2); 1533 radeon_emit(cs, a->blend_colormask & fb_colormask); /* R_028238_CB_TARGET_MASK */ 1534 /* Always enable the first color output to make sure alpha-test works even without one. */ 1535 radeon_emit(cs, 0xf | (multiwrite ? fb_colormask : ps_colormask)); /* R_02823C_CB_SHADER_MASK */ 1536 radeon_set_context_reg(cs, R_028808_CB_COLOR_CONTROL, 1537 a->cb_color_control | 1538 S_028808_MULTIWRITE_ENABLE(multiwrite)); 1539 } 1540 } 1541 1542 static void r600_emit_db_state(struct r600_context *rctx, struct r600_atom *atom) 1543 { 1544 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1545 struct r600_db_state *a = (struct r600_db_state*)atom; 1546 1547 if (a->rsurf && a->rsurf->db_htile_surface) { 1548 struct r600_texture *rtex = (struct r600_texture *)a->rsurf->base.texture; 1549 unsigned reloc_idx; 1550 1551 radeon_set_context_reg(cs, R_02802C_DB_DEPTH_CLEAR, fui(rtex->depth_clear_value)); 1552 radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, a->rsurf->db_htile_surface); 1553 radeon_set_context_reg(cs, R_028014_DB_HTILE_DATA_BASE, a->rsurf->db_htile_data_base); 1554 reloc_idx = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, &rtex->resource, 1555 RADEON_USAGE_READWRITE, RADEON_PRIO_HTILE); 1556 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1557 radeon_emit(cs, reloc_idx); 1558 } else { 1559 radeon_set_context_reg(cs, R_028D24_DB_HTILE_SURFACE, 0); 1560 } 1561 } 1562 1563 static void r600_emit_db_misc_state(struct r600_context *rctx, struct r600_atom *atom) 1564 { 1565 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1566 struct r600_db_misc_state *a = (struct r600_db_misc_state*)atom; 1567 unsigned db_render_control = 0; 1568 unsigned db_render_override = 1569 S_028D10_FORCE_HIS_ENABLE0(V_028D10_FORCE_DISABLE) | 1570 S_028D10_FORCE_HIS_ENABLE1(V_028D10_FORCE_DISABLE); 1571 1572 if (rctx->b.chip_class >= R700) { 1573 switch (a->ps_conservative_z) { 1574 default: /* fall through */ 1575 case TGSI_FS_DEPTH_LAYOUT_ANY: 1576 db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_ANY_Z); 1577 break; 1578 case TGSI_FS_DEPTH_LAYOUT_GREATER: 1579 db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_GREATER_THAN_Z); 1580 break; 1581 case TGSI_FS_DEPTH_LAYOUT_LESS: 1582 db_render_control |= S_028D0C_CONSERVATIVE_Z_EXPORT(V_028D0C_EXPORT_LESS_THAN_Z); 1583 break; 1584 } 1585 } 1586 1587 if (rctx->b.num_occlusion_queries > 0 && 1588 !a->occlusion_queries_disabled) { 1589 if (rctx->b.chip_class >= R700) { 1590 db_render_control |= S_028D0C_R700_PERFECT_ZPASS_COUNTS(1); 1591 } 1592 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1593 } else { 1594 db_render_control |= S_028D0C_ZPASS_INCREMENT_DISABLE(1); 1595 } 1596 1597 if (rctx->db_state.rsurf && rctx->db_state.rsurf->db_htile_surface) { 1598 /* FORCE_OFF means HiZ/HiS are determined by DB_SHADER_CONTROL */ 1599 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_OFF); 1600 /* This is to fix a lockup when hyperz and alpha test are enabled at 1601 * the same time somehow GPU get confuse on which order to pick for 1602 * z test 1603 */ 1604 if (rctx->alphatest_state.sx_alpha_test_control) { 1605 db_render_override |= S_028D10_FORCE_SHADER_Z_ORDER(1); 1606 } 1607 } else { 1608 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE); 1609 } 1610 if (rctx->b.chip_class == R600 && rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0) { 1611 /* sample shading and hyperz causes lockups on R6xx chips */ 1612 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE); 1613 } 1614 if (a->flush_depthstencil_through_cb) { 1615 assert(a->copy_depth || a->copy_stencil); 1616 1617 db_render_control |= S_028D0C_DEPTH_COPY_ENABLE(a->copy_depth) | 1618 S_028D0C_STENCIL_COPY_ENABLE(a->copy_stencil) | 1619 S_028D0C_COPY_CENTROID(1) | 1620 S_028D0C_COPY_SAMPLE(a->copy_sample); 1621 1622 if (rctx->b.chip_class == R600) 1623 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1624 1625 if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 || 1626 rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635) 1627 db_render_override |= S_028D10_FORCE_HIZ_ENABLE(V_028D10_FORCE_DISABLE); 1628 } else if (a->flush_depth_inplace || a->flush_stencil_inplace) { 1629 db_render_control |= S_028D0C_DEPTH_COMPRESS_DISABLE(a->flush_depth_inplace) | 1630 S_028D0C_STENCIL_COMPRESS_DISABLE(a->flush_stencil_inplace); 1631 db_render_override |= S_028D10_NOOP_CULL_DISABLE(1); 1632 } 1633 if (a->htile_clear) { 1634 db_render_control |= S_028D0C_DEPTH_CLEAR_ENABLE(1); 1635 } 1636 1637 /* RV770 workaround for a hang with 8x MSAA. */ 1638 if (rctx->b.family == CHIP_RV770 && a->log_samples == 3) { 1639 db_render_override |= S_028D10_MAX_TILES_IN_DTT(6); 1640 } 1641 1642 radeon_set_context_reg_seq(cs, R_028D0C_DB_RENDER_CONTROL, 2); 1643 radeon_emit(cs, db_render_control); /* R_028D0C_DB_RENDER_CONTROL */ 1644 radeon_emit(cs, db_render_override); /* R_028D10_DB_RENDER_OVERRIDE */ 1645 radeon_set_context_reg(cs, R_02880C_DB_SHADER_CONTROL, a->db_shader_control); 1646 } 1647 1648 static void r600_emit_config_state(struct r600_context *rctx, struct r600_atom *atom) 1649 { 1650 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1651 struct r600_config_state *a = (struct r600_config_state*)atom; 1652 1653 radeon_set_config_reg(cs, R_008C04_SQ_GPR_RESOURCE_MGMT_1, a->sq_gpr_resource_mgmt_1); 1654 radeon_set_config_reg(cs, R_008C08_SQ_GPR_RESOURCE_MGMT_2, a->sq_gpr_resource_mgmt_2); 1655 } 1656 1657 static void r600_emit_vertex_buffers(struct r600_context *rctx, struct r600_atom *atom) 1658 { 1659 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1660 uint32_t dirty_mask = rctx->vertex_buffer_state.dirty_mask; 1661 1662 while (dirty_mask) { 1663 struct pipe_vertex_buffer *vb; 1664 struct r600_resource *rbuffer; 1665 unsigned offset; 1666 unsigned buffer_index = u_bit_scan(&dirty_mask); 1667 1668 vb = &rctx->vertex_buffer_state.vb[buffer_index]; 1669 rbuffer = (struct r600_resource*)vb->buffer.resource; 1670 assert(rbuffer); 1671 1672 offset = vb->buffer_offset; 1673 1674 /* fetch resources start at index 320 (OFFSET_FS) */ 1675 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1676 radeon_emit(cs, (R600_FETCH_CONSTANTS_OFFSET_FS + buffer_index) * 7); 1677 radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ 1678 radeon_emit(cs, rbuffer->b.b.width0 - offset - 1); /* RESOURCEi_WORD1 */ 1679 radeon_emit(cs, /* RESOURCEi_WORD2 */ 1680 S_038008_ENDIAN_SWAP(r600_endian_swap(32)) | 1681 S_038008_STRIDE(vb->stride)); 1682 radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ 1683 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 1684 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 1685 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ 1686 1687 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1688 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1689 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER)); 1690 } 1691 } 1692 1693 static void r600_emit_constant_buffers(struct r600_context *rctx, 1694 struct r600_constbuf_state *state, 1695 unsigned buffer_id_base, 1696 unsigned reg_alu_constbuf_size, 1697 unsigned reg_alu_const_cache) 1698 { 1699 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1700 uint32_t dirty_mask = state->dirty_mask; 1701 1702 while (dirty_mask) { 1703 struct pipe_constant_buffer *cb; 1704 struct r600_resource *rbuffer; 1705 unsigned offset; 1706 unsigned buffer_index = ffs(dirty_mask) - 1; 1707 unsigned gs_ring_buffer = (buffer_index == R600_GS_RING_CONST_BUFFER); 1708 cb = &state->cb[buffer_index]; 1709 rbuffer = (struct r600_resource*)cb->buffer; 1710 assert(rbuffer); 1711 1712 offset = cb->buffer_offset; 1713 1714 if (!gs_ring_buffer) { 1715 assert(buffer_index < R600_MAX_HW_CONST_BUFFERS); 1716 radeon_set_context_reg(cs, reg_alu_constbuf_size + buffer_index * 4, 1717 DIV_ROUND_UP(cb->buffer_size, 256)); 1718 radeon_set_context_reg(cs, reg_alu_const_cache + buffer_index * 4, offset >> 8); 1719 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1720 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1721 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER)); 1722 } 1723 1724 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1725 radeon_emit(cs, (buffer_id_base + buffer_index) * 7); 1726 radeon_emit(cs, offset); /* RESOURCEi_WORD0 */ 1727 radeon_emit(cs, rbuffer->b.b.width0 - offset - 1); /* RESOURCEi_WORD1 */ 1728 radeon_emit(cs, /* RESOURCEi_WORD2 */ 1729 S_038008_ENDIAN_SWAP(gs_ring_buffer ? ENDIAN_NONE : r600_endian_swap(32)) | 1730 S_038008_STRIDE(gs_ring_buffer ? 4 : 16)); 1731 radeon_emit(cs, 0); /* RESOURCEi_WORD3 */ 1732 radeon_emit(cs, 0); /* RESOURCEi_WORD4 */ 1733 radeon_emit(cs, 0); /* RESOURCEi_WORD5 */ 1734 radeon_emit(cs, 0xc0000000); /* RESOURCEi_WORD6 */ 1735 1736 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1737 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1738 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER)); 1739 1740 dirty_mask &= ~(1 << buffer_index); 1741 } 1742 state->dirty_mask = 0; 1743 } 1744 1745 static void r600_emit_vs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1746 { 1747 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX], 1748 R600_FETCH_CONSTANTS_OFFSET_VS, 1749 R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 1750 R_028980_ALU_CONST_CACHE_VS_0); 1751 } 1752 1753 static void r600_emit_gs_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1754 { 1755 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY], 1756 R600_FETCH_CONSTANTS_OFFSET_GS, 1757 R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 1758 R_0289C0_ALU_CONST_CACHE_GS_0); 1759 } 1760 1761 static void r600_emit_ps_constant_buffers(struct r600_context *rctx, struct r600_atom *atom) 1762 { 1763 r600_emit_constant_buffers(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT], 1764 R600_FETCH_CONSTANTS_OFFSET_PS, 1765 R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 1766 R_028940_ALU_CONST_CACHE_PS_0); 1767 } 1768 1769 static void r600_emit_sampler_views(struct r600_context *rctx, 1770 struct r600_samplerview_state *state, 1771 unsigned resource_id_base) 1772 { 1773 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1774 uint32_t dirty_mask = state->dirty_mask; 1775 1776 while (dirty_mask) { 1777 struct r600_pipe_sampler_view *rview; 1778 unsigned resource_index = u_bit_scan(&dirty_mask); 1779 unsigned reloc; 1780 1781 rview = state->views[resource_index]; 1782 assert(rview); 1783 1784 radeon_emit(cs, PKT3(PKT3_SET_RESOURCE, 7, 0)); 1785 radeon_emit(cs, (resource_id_base + resource_index) * 7); 1786 radeon_emit_array(cs, rview->tex_resource_words, 7); 1787 1788 reloc = radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rview->tex_resource, 1789 RADEON_USAGE_READ, 1790 r600_get_sampler_view_priority(rview->tex_resource)); 1791 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1792 radeon_emit(cs, reloc); 1793 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1794 radeon_emit(cs, reloc); 1795 } 1796 state->dirty_mask = 0; 1797 } 1798 1799 1800 static void r600_emit_vs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1801 { 1802 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views, R600_FETCH_CONSTANTS_OFFSET_VS + R600_MAX_CONST_BUFFERS); 1803 } 1804 1805 static void r600_emit_gs_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1806 { 1807 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views, R600_FETCH_CONSTANTS_OFFSET_GS + R600_MAX_CONST_BUFFERS); 1808 } 1809 1810 static void r600_emit_ps_sampler_views(struct r600_context *rctx, struct r600_atom *atom) 1811 { 1812 r600_emit_sampler_views(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views, R600_FETCH_CONSTANTS_OFFSET_PS + R600_MAX_CONST_BUFFERS); 1813 } 1814 1815 static void r600_emit_sampler_states(struct r600_context *rctx, 1816 struct r600_textures_info *texinfo, 1817 unsigned resource_id_base, 1818 unsigned border_color_reg) 1819 { 1820 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1821 uint32_t dirty_mask = texinfo->states.dirty_mask; 1822 1823 while (dirty_mask) { 1824 struct r600_pipe_sampler_state *rstate; 1825 struct r600_pipe_sampler_view *rview; 1826 unsigned i = u_bit_scan(&dirty_mask); 1827 1828 rstate = texinfo->states.states[i]; 1829 assert(rstate); 1830 rview = texinfo->views.views[i]; 1831 1832 /* TEX_ARRAY_OVERRIDE must be set for array textures to disable 1833 * filtering between layers. 1834 * Don't update TEX_ARRAY_OVERRIDE if we don't have the sampler view. 1835 */ 1836 if (rview) { 1837 enum pipe_texture_target target = rview->base.texture->target; 1838 if (target == PIPE_TEXTURE_1D_ARRAY || 1839 target == PIPE_TEXTURE_2D_ARRAY) { 1840 rstate->tex_sampler_words[0] |= S_03C000_TEX_ARRAY_OVERRIDE(1); 1841 texinfo->is_array_sampler[i] = true; 1842 } else { 1843 rstate->tex_sampler_words[0] &= C_03C000_TEX_ARRAY_OVERRIDE; 1844 texinfo->is_array_sampler[i] = false; 1845 } 1846 } 1847 1848 radeon_emit(cs, PKT3(PKT3_SET_SAMPLER, 3, 0)); 1849 radeon_emit(cs, (resource_id_base + i) * 3); 1850 radeon_emit_array(cs, rstate->tex_sampler_words, 3); 1851 1852 if (rstate->border_color_use) { 1853 unsigned offset; 1854 1855 offset = border_color_reg; 1856 offset += i * 16; 1857 radeon_set_config_reg_seq(cs, offset, 4); 1858 radeon_emit_array(cs, rstate->border_color.ui, 4); 1859 } 1860 } 1861 texinfo->states.dirty_mask = 0; 1862 } 1863 1864 static void r600_emit_vs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1865 { 1866 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_VERTEX], 18, R_00A600_TD_VS_SAMPLER0_BORDER_RED); 1867 } 1868 1869 static void r600_emit_gs_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1870 { 1871 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY], 36, R_00A800_TD_GS_SAMPLER0_BORDER_RED); 1872 } 1873 1874 static void r600_emit_ps_sampler_states(struct r600_context *rctx, struct r600_atom *atom) 1875 { 1876 r600_emit_sampler_states(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT], 0, R_00A400_TD_PS_SAMPLER0_BORDER_RED); 1877 } 1878 1879 static void r600_emit_seamless_cube_map(struct r600_context *rctx, struct r600_atom *atom) 1880 { 1881 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1882 unsigned tmp; 1883 1884 tmp = S_009508_DISABLE_CUBE_ANISO(1) | 1885 S_009508_SYNC_GRADIENT(1) | 1886 S_009508_SYNC_WALKER(1) | 1887 S_009508_SYNC_ALIGNER(1); 1888 if (!rctx->seamless_cube_map.enabled) { 1889 tmp |= S_009508_DISABLE_CUBE_WRAP(1); 1890 } 1891 radeon_set_config_reg(cs, R_009508_TA_CNTL_AUX, tmp); 1892 } 1893 1894 static void r600_emit_sample_mask(struct r600_context *rctx, struct r600_atom *a) 1895 { 1896 struct r600_sample_mask *s = (struct r600_sample_mask*)a; 1897 uint8_t mask = s->sample_mask; 1898 1899 radeon_set_context_reg(rctx->b.gfx.cs, R_028C48_PA_SC_AA_MASK, 1900 mask | (mask << 8) | (mask << 16) | (mask << 24)); 1901 } 1902 1903 static void r600_emit_vertex_fetch_shader(struct r600_context *rctx, struct r600_atom *a) 1904 { 1905 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1906 struct r600_cso_state *state = (struct r600_cso_state*)a; 1907 struct r600_fetch_shader *shader = (struct r600_fetch_shader*)state->cso; 1908 1909 if (!shader) 1910 return; 1911 1912 radeon_set_context_reg(cs, R_028894_SQ_PGM_START_FS, shader->offset >> 8); 1913 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1914 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, shader->buffer, 1915 RADEON_USAGE_READ, 1916 RADEON_PRIO_SHADER_BINARY)); 1917 } 1918 1919 static void r600_emit_shader_stages(struct r600_context *rctx, struct r600_atom *a) 1920 { 1921 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1922 struct r600_shader_stages_state *state = (struct r600_shader_stages_state*)a; 1923 1924 uint32_t v2 = 0, primid = 0; 1925 1926 if (rctx->vs_shader->current->shader.vs_as_gs_a) { 1927 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_A); 1928 primid = 1; 1929 } 1930 1931 if (state->geom_enable) { 1932 uint32_t cut_val; 1933 1934 if (rctx->gs_shader->gs_max_out_vertices <= 128) 1935 cut_val = V_028A40_GS_CUT_128; 1936 else if (rctx->gs_shader->gs_max_out_vertices <= 256) 1937 cut_val = V_028A40_GS_CUT_256; 1938 else if (rctx->gs_shader->gs_max_out_vertices <= 512) 1939 cut_val = V_028A40_GS_CUT_512; 1940 else 1941 cut_val = V_028A40_GS_CUT_1024; 1942 1943 v2 = S_028A40_MODE(V_028A40_GS_SCENARIO_G) | 1944 S_028A40_CUT_MODE(cut_val); 1945 1946 if (rctx->gs_shader->current->shader.gs_prim_id_input) 1947 primid = 1; 1948 } 1949 1950 radeon_set_context_reg(cs, R_028A40_VGT_GS_MODE, v2); 1951 radeon_set_context_reg(cs, R_028A84_VGT_PRIMITIVEID_EN, primid); 1952 } 1953 1954 static void r600_emit_gs_rings(struct r600_context *rctx, struct r600_atom *a) 1955 { 1956 struct radeon_winsys_cs *cs = rctx->b.gfx.cs; 1957 struct r600_gs_rings_state *state = (struct r600_gs_rings_state*)a; 1958 struct r600_resource *rbuffer; 1959 1960 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1961 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1962 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1963 1964 if (state->enable) { 1965 rbuffer =(struct r600_resource*)state->esgs_ring.buffer; 1966 radeon_set_config_reg(cs, R_008C40_SQ_ESGS_RING_BASE, 0); 1967 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1968 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1969 RADEON_USAGE_READWRITE, 1970 RADEON_PRIO_SHADER_RINGS)); 1971 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 1972 state->esgs_ring.buffer_size >> 8); 1973 1974 rbuffer =(struct r600_resource*)state->gsvs_ring.buffer; 1975 radeon_set_config_reg(cs, R_008C48_SQ_GSVS_RING_BASE, 0); 1976 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0)); 1977 radeon_emit(cs, radeon_add_to_buffer_list(&rctx->b, &rctx->b.gfx, rbuffer, 1978 RADEON_USAGE_READWRITE, 1979 RADEON_PRIO_SHADER_RINGS)); 1980 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 1981 state->gsvs_ring.buffer_size >> 8); 1982 } else { 1983 radeon_set_config_reg(cs, R_008C44_SQ_ESGS_RING_SIZE, 0); 1984 radeon_set_config_reg(cs, R_008C4C_SQ_GSVS_RING_SIZE, 0); 1985 } 1986 1987 radeon_set_config_reg(cs, R_008040_WAIT_UNTIL, S_008040_WAIT_3D_IDLE(1)); 1988 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0)); 1989 radeon_emit(cs, EVENT_TYPE(EVENT_TYPE_VGT_FLUSH)); 1990 } 1991 1992 /* Adjust GPR allocation on R6xx/R7xx */ 1993 bool r600_adjust_gprs(struct r600_context *rctx) 1994 { 1995 unsigned num_gprs[R600_NUM_HW_STAGES]; 1996 unsigned new_gprs[R600_NUM_HW_STAGES]; 1997 unsigned cur_gprs[R600_NUM_HW_STAGES]; 1998 unsigned def_gprs[R600_NUM_HW_STAGES]; 1999 unsigned def_num_clause_temp_gprs = rctx->r6xx_num_clause_temp_gprs; 2000 unsigned max_gprs; 2001 unsigned tmp, tmp2; 2002 unsigned i; 2003 bool need_recalc = false, use_default = true; 2004 2005 /* hardware will reserve twice num_clause_temp_gprs */ 2006 max_gprs = def_num_clause_temp_gprs * 2; 2007 for (i = 0; i < R600_NUM_HW_STAGES; i++) { 2008 def_gprs[i] = rctx->default_gprs[i]; 2009 max_gprs += def_gprs[i]; 2010 } 2011 2012 cur_gprs[R600_HW_STAGE_PS] = G_008C04_NUM_PS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 2013 cur_gprs[R600_HW_STAGE_VS] = G_008C04_NUM_VS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_1); 2014 cur_gprs[R600_HW_STAGE_GS] = G_008C08_NUM_GS_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 2015 cur_gprs[R600_HW_STAGE_ES] = G_008C08_NUM_ES_GPRS(rctx->config_state.sq_gpr_resource_mgmt_2); 2016 2017 num_gprs[R600_HW_STAGE_PS] = rctx->ps_shader->current->shader.bc.ngpr; 2018 if (rctx->gs_shader) { 2019 num_gprs[R600_HW_STAGE_ES] = rctx->vs_shader->current->shader.bc.ngpr; 2020 num_gprs[R600_HW_STAGE_GS] = rctx->gs_shader->current->shader.bc.ngpr; 2021 num_gprs[R600_HW_STAGE_VS] = rctx->gs_shader->current->gs_copy_shader->shader.bc.ngpr; 2022 } else { 2023 num_gprs[R600_HW_STAGE_ES] = 0; 2024 num_gprs[R600_HW_STAGE_GS] = 0; 2025 num_gprs[R600_HW_STAGE_VS] = rctx->vs_shader->current->shader.bc.ngpr; 2026 } 2027 2028 for (i = 0; i < R600_NUM_HW_STAGES; i++) { 2029 new_gprs[i] = num_gprs[i]; 2030 if (new_gprs[i] > cur_gprs[i]) 2031 need_recalc = true; 2032 if (new_gprs[i] > def_gprs[i]) 2033 use_default = false; 2034 } 2035 2036 /* the sum of all SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS must <= to max_gprs */ 2037 if (!need_recalc) 2038 return true; 2039 2040 /* try to use switch back to default */ 2041 if (!use_default) { 2042 /* always privilege vs stage so that at worst we have the 2043 * pixel stage producing wrong output (not the vertex 2044 * stage) */ 2045 new_gprs[R600_HW_STAGE_PS] = max_gprs - def_num_clause_temp_gprs * 2; 2046 for (i = R600_HW_STAGE_VS; i < R600_NUM_HW_STAGES; i++) 2047 new_gprs[R600_HW_STAGE_PS] -= new_gprs[i]; 2048 } else { 2049 for (i = 0; i < R600_NUM_HW_STAGES; i++) 2050 new_gprs[i] = def_gprs[i]; 2051 } 2052 2053 /* SQ_PGM_RESOURCES_*.NUM_GPRS must always be program to a value <= 2054 * SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS otherwise the GPU will lockup 2055 * Also if a shader use more gpr than SQ_GPR_RESOURCE_MGMT*.NUM_*_GPRS 2056 * it will lockup. So in this case just discard the draw command 2057 * and don't change the current gprs repartitions. 2058 */ 2059 for (i = 0; i < R600_NUM_HW_STAGES; i++) { 2060 if (num_gprs[i] > new_gprs[i]) { 2061 R600_ERR("shaders require too many register (%d + %d + %d + %d) " 2062 "for a combined maximum of %d\n", 2063 num_gprs[R600_HW_STAGE_PS], num_gprs[R600_HW_STAGE_VS], num_gprs[R600_HW_STAGE_ES], num_gprs[R600_HW_STAGE_GS], max_gprs); 2064 return false; 2065 } 2066 } 2067 2068 /* in some case we endup recomputing the current value */ 2069 tmp = S_008C04_NUM_PS_GPRS(new_gprs[R600_HW_STAGE_PS]) | 2070 S_008C04_NUM_VS_GPRS(new_gprs[R600_HW_STAGE_VS]) | 2071 S_008C04_NUM_CLAUSE_TEMP_GPRS(def_num_clause_temp_gprs); 2072 2073 tmp2 = S_008C08_NUM_ES_GPRS(new_gprs[R600_HW_STAGE_ES]) | 2074 S_008C08_NUM_GS_GPRS(new_gprs[R600_HW_STAGE_GS]); 2075 if (rctx->config_state.sq_gpr_resource_mgmt_1 != tmp || rctx->config_state.sq_gpr_resource_mgmt_2 != tmp2) { 2076 rctx->config_state.sq_gpr_resource_mgmt_1 = tmp; 2077 rctx->config_state.sq_gpr_resource_mgmt_2 = tmp2; 2078 r600_mark_atom_dirty(rctx, &rctx->config_state.atom); 2079 rctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE; 2080 } 2081 return true; 2082 } 2083 2084 void r600_init_atom_start_cs(struct r600_context *rctx) 2085 { 2086 int ps_prio; 2087 int vs_prio; 2088 int gs_prio; 2089 int es_prio; 2090 int num_ps_gprs; 2091 int num_vs_gprs; 2092 int num_gs_gprs; 2093 int num_es_gprs; 2094 int num_temp_gprs; 2095 int num_ps_threads; 2096 int num_vs_threads; 2097 int num_gs_threads; 2098 int num_es_threads; 2099 int num_ps_stack_entries; 2100 int num_vs_stack_entries; 2101 int num_gs_stack_entries; 2102 int num_es_stack_entries; 2103 enum radeon_family family; 2104 struct r600_command_buffer *cb = &rctx->start_cs_cmd; 2105 uint32_t tmp, i; 2106 2107 r600_init_command_buffer(cb, 256); 2108 2109 /* R6xx requires this packet at the start of each command buffer */ 2110 if (rctx->b.chip_class == R600) { 2111 r600_store_value(cb, PKT3(PKT3_START_3D_CMDBUF, 0, 0)); 2112 r600_store_value(cb, 0); 2113 } 2114 /* All asics require this one */ 2115 r600_store_value(cb, PKT3(PKT3_CONTEXT_CONTROL, 1, 0)); 2116 r600_store_value(cb, 0x80000000); 2117 r600_store_value(cb, 0x80000000); 2118 2119 /* We're setting config registers here. */ 2120 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2121 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PS_PARTIAL_FLUSH) | EVENT_INDEX(4)); 2122 2123 /* This enables pipeline stat & streamout queries. 2124 * They are only disabled by blits. 2125 */ 2126 r600_store_value(cb, PKT3(PKT3_EVENT_WRITE, 0, 0)); 2127 r600_store_value(cb, EVENT_TYPE(EVENT_TYPE_PIPELINESTAT_START) | EVENT_INDEX(0)); 2128 2129 family = rctx->b.family; 2130 ps_prio = 0; 2131 vs_prio = 1; 2132 gs_prio = 2; 2133 es_prio = 3; 2134 switch (family) { 2135 case CHIP_R600: 2136 num_ps_gprs = 192; 2137 num_vs_gprs = 56; 2138 num_temp_gprs = 4; 2139 num_gs_gprs = 0; 2140 num_es_gprs = 0; 2141 num_ps_threads = 136; 2142 num_vs_threads = 48; 2143 num_gs_threads = 4; 2144 num_es_threads = 4; 2145 num_ps_stack_entries = 128; 2146 num_vs_stack_entries = 128; 2147 num_gs_stack_entries = 0; 2148 num_es_stack_entries = 0; 2149 break; 2150 case CHIP_RV630: 2151 case CHIP_RV635: 2152 num_ps_gprs = 84; 2153 num_vs_gprs = 36; 2154 num_temp_gprs = 4; 2155 num_gs_gprs = 0; 2156 num_es_gprs = 0; 2157 num_ps_threads = 144; 2158 num_vs_threads = 40; 2159 num_gs_threads = 4; 2160 num_es_threads = 4; 2161 num_ps_stack_entries = 40; 2162 num_vs_stack_entries = 40; 2163 num_gs_stack_entries = 32; 2164 num_es_stack_entries = 16; 2165 break; 2166 case CHIP_RV610: 2167 case CHIP_RV620: 2168 case CHIP_RS780: 2169 case CHIP_RS880: 2170 default: 2171 num_ps_gprs = 84; 2172 num_vs_gprs = 36; 2173 num_temp_gprs = 4; 2174 num_gs_gprs = 0; 2175 num_es_gprs = 0; 2176 /* use limits 40 VS and at least 16 ES/GS */ 2177 num_ps_threads = 120; 2178 num_vs_threads = 40; 2179 num_gs_threads = 16; 2180 num_es_threads = 16; 2181 num_ps_stack_entries = 40; 2182 num_vs_stack_entries = 40; 2183 num_gs_stack_entries = 32; 2184 num_es_stack_entries = 16; 2185 break; 2186 case CHIP_RV670: 2187 num_ps_gprs = 144; 2188 num_vs_gprs = 40; 2189 num_temp_gprs = 4; 2190 num_gs_gprs = 0; 2191 num_es_gprs = 0; 2192 num_ps_threads = 136; 2193 num_vs_threads = 48; 2194 num_gs_threads = 4; 2195 num_es_threads = 4; 2196 num_ps_stack_entries = 40; 2197 num_vs_stack_entries = 40; 2198 num_gs_stack_entries = 32; 2199 num_es_stack_entries = 16; 2200 break; 2201 case CHIP_RV770: 2202 num_ps_gprs = 130; 2203 num_vs_gprs = 56; 2204 num_temp_gprs = 4; 2205 num_gs_gprs = 31; 2206 num_es_gprs = 31; 2207 num_ps_threads = 180; 2208 num_vs_threads = 60; 2209 num_gs_threads = 4; 2210 num_es_threads = 4; 2211 num_ps_stack_entries = 128; 2212 num_vs_stack_entries = 128; 2213 num_gs_stack_entries = 128; 2214 num_es_stack_entries = 128; 2215 break; 2216 case CHIP_RV730: 2217 case CHIP_RV740: 2218 num_ps_gprs = 84; 2219 num_vs_gprs = 36; 2220 num_temp_gprs = 4; 2221 num_gs_gprs = 0; 2222 num_es_gprs = 0; 2223 num_ps_threads = 180; 2224 num_vs_threads = 60; 2225 num_gs_threads = 4; 2226 num_es_threads = 4; 2227 num_ps_stack_entries = 128; 2228 num_vs_stack_entries = 128; 2229 num_gs_stack_entries = 0; 2230 num_es_stack_entries = 0; 2231 break; 2232 case CHIP_RV710: 2233 num_ps_gprs = 192; 2234 num_vs_gprs = 56; 2235 num_temp_gprs = 4; 2236 num_gs_gprs = 0; 2237 num_es_gprs = 0; 2238 num_ps_threads = 136; 2239 num_vs_threads = 48; 2240 num_gs_threads = 4; 2241 num_es_threads = 4; 2242 num_ps_stack_entries = 128; 2243 num_vs_stack_entries = 128; 2244 num_gs_stack_entries = 0; 2245 num_es_stack_entries = 0; 2246 break; 2247 } 2248 2249 rctx->default_gprs[R600_HW_STAGE_PS] = num_ps_gprs; 2250 rctx->default_gprs[R600_HW_STAGE_VS] = num_vs_gprs; 2251 rctx->default_gprs[R600_HW_STAGE_GS] = 0; 2252 rctx->default_gprs[R600_HW_STAGE_ES] = 0; 2253 2254 rctx->r6xx_num_clause_temp_gprs = num_temp_gprs; 2255 2256 /* SQ_CONFIG */ 2257 tmp = 0; 2258 switch (family) { 2259 case CHIP_RV610: 2260 case CHIP_RV620: 2261 case CHIP_RS780: 2262 case CHIP_RS880: 2263 case CHIP_RV710: 2264 break; 2265 default: 2266 tmp |= S_008C00_VC_ENABLE(1); 2267 break; 2268 } 2269 tmp |= S_008C00_DX9_CONSTS(0); 2270 tmp |= S_008C00_ALU_INST_PREFER_VECTOR(1); 2271 tmp |= S_008C00_PS_PRIO(ps_prio); 2272 tmp |= S_008C00_VS_PRIO(vs_prio); 2273 tmp |= S_008C00_GS_PRIO(gs_prio); 2274 tmp |= S_008C00_ES_PRIO(es_prio); 2275 r600_store_config_reg(cb, R_008C00_SQ_CONFIG, tmp); 2276 2277 /* SQ_GPR_RESOURCE_MGMT_2 */ 2278 tmp = S_008C08_NUM_GS_GPRS(num_gs_gprs); 2279 tmp |= S_008C08_NUM_ES_GPRS(num_es_gprs); 2280 r600_store_config_reg_seq(cb, R_008C08_SQ_GPR_RESOURCE_MGMT_2, 4); 2281 r600_store_value(cb, tmp); 2282 2283 /* SQ_THREAD_RESOURCE_MGMT */ 2284 tmp = S_008C0C_NUM_PS_THREADS(num_ps_threads); 2285 tmp |= S_008C0C_NUM_VS_THREADS(num_vs_threads); 2286 tmp |= S_008C0C_NUM_GS_THREADS(num_gs_threads); 2287 tmp |= S_008C0C_NUM_ES_THREADS(num_es_threads); 2288 r600_store_value(cb, tmp); /* R_008C0C_SQ_THREAD_RESOURCE_MGMT */ 2289 2290 /* SQ_STACK_RESOURCE_MGMT_1 */ 2291 tmp = S_008C10_NUM_PS_STACK_ENTRIES(num_ps_stack_entries); 2292 tmp |= S_008C10_NUM_VS_STACK_ENTRIES(num_vs_stack_entries); 2293 r600_store_value(cb, tmp); /* R_008C10_SQ_STACK_RESOURCE_MGMT_1 */ 2294 2295 /* SQ_STACK_RESOURCE_MGMT_2 */ 2296 tmp = S_008C14_NUM_GS_STACK_ENTRIES(num_gs_stack_entries); 2297 tmp |= S_008C14_NUM_ES_STACK_ENTRIES(num_es_stack_entries); 2298 r600_store_value(cb, tmp); /* R_008C14_SQ_STACK_RESOURCE_MGMT_2 */ 2299 2300 r600_store_config_reg(cb, R_009714_VC_ENHANCE, 0); 2301 2302 if (rctx->b.chip_class >= R700) { 2303 r600_store_context_reg(cb, R_028A50_VGT_ENHANCE, 4); 2304 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0x00004000); 2305 r600_store_config_reg(cb, R_009830_DB_DEBUG, 0); 2306 r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x00420204); 2307 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 0); 2308 } else { 2309 r600_store_config_reg(cb, R_008D8C_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ, 0); 2310 r600_store_config_reg(cb, R_009830_DB_DEBUG, 0x82000000); 2311 r600_store_config_reg(cb, R_009838_DB_WATERMARKS, 0x01020204); 2312 r600_store_context_reg(cb, R_0286C8_SPI_THREAD_GROUPING, 1); 2313 } 2314 r600_store_context_reg_seq(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 9); 2315 r600_store_value(cb, 0); /* R_0288A8_SQ_ESGS_RING_ITEMSIZE */ 2316 r600_store_value(cb, 0); /* R_0288AC_SQ_GSVS_RING_ITEMSIZE */ 2317 r600_store_value(cb, 0); /* R_0288B0_SQ_ESTMP_RING_ITEMSIZE */ 2318 r600_store_value(cb, 0); /* R_0288B4_SQ_GSTMP_RING_ITEMSIZE */ 2319 r600_store_value(cb, 0); /* R_0288B8_SQ_VSTMP_RING_ITEMSIZE */ 2320 r600_store_value(cb, 0); /* R_0288BC_SQ_PSTMP_RING_ITEMSIZE */ 2321 r600_store_value(cb, 0); /* R_0288C0_SQ_FBUF_RING_ITEMSIZE */ 2322 r600_store_value(cb, 0); /* R_0288C4_SQ_REDUC_RING_ITEMSIZE */ 2323 r600_store_value(cb, 0); /* R_0288C8_SQ_GS_VERT_ITEMSIZE */ 2324 2325 /* to avoid GPU doing any preloading of constant from random address */ 2326 r600_store_context_reg_seq(cb, R_028140_ALU_CONST_BUFFER_SIZE_PS_0, 16); 2327 for (i = 0; i < 16; i++) 2328 r600_store_value(cb, 0); 2329 2330 r600_store_context_reg_seq(cb, R_028180_ALU_CONST_BUFFER_SIZE_VS_0, 16); 2331 for (i = 0; i < 16; i++) 2332 r600_store_value(cb, 0); 2333 2334 r600_store_context_reg_seq(cb, R_0281C0_ALU_CONST_BUFFER_SIZE_GS_0, 16); 2335 for (i = 0; i < 16; i++) 2336 r600_store_value(cb, 0); 2337 2338 r600_store_context_reg_seq(cb, R_028A10_VGT_OUTPUT_PATH_CNTL, 13); 2339 r600_store_value(cb, 0); /* R_028A10_VGT_OUTPUT_PATH_CNTL */ 2340 r600_store_value(cb, 0); /* R_028A14_VGT_HOS_CNTL */ 2341 r600_store_value(cb, 0); /* R_028A18_VGT_HOS_MAX_TESS_LEVEL */ 2342 r600_store_value(cb, 0); /* R_028A1C_VGT_HOS_MIN_TESS_LEVEL */ 2343 r600_store_value(cb, 0); /* R_028A20_VGT_HOS_REUSE_DEPTH */ 2344 r600_store_value(cb, 0); /* R_028A24_VGT_GROUP_PRIM_TYPE */ 2345 r600_store_value(cb, 0); /* R_028A28_VGT_GROUP_FIRST_DECR */ 2346 r600_store_value(cb, 0); /* R_028A2C_VGT_GROUP_DECR */ 2347 r600_store_value(cb, 0); /* R_028A30_VGT_GROUP_VECT_0_CNTL */ 2348 r600_store_value(cb, 0); /* R_028A34_VGT_GROUP_VECT_1_CNTL */ 2349 r600_store_value(cb, 0); /* R_028A38_VGT_GROUP_VECT_0_FMT_CNTL */ 2350 r600_store_value(cb, 0); /* R_028A3C_VGT_GROUP_VECT_1_FMT_CNTL */ 2351 r600_store_value(cb, 0); /* R_028A40_VGT_GS_MODE, 0); */ 2352 2353 r600_store_context_reg(cb, R_028A84_VGT_PRIMITIVEID_EN, 0); 2354 r600_store_context_reg(cb, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 0); 2355 r600_store_context_reg(cb, R_028AA4_VGT_INSTANCE_STEP_RATE_1, 0); 2356 2357 r600_store_context_reg_seq(cb, R_028AB4_VGT_REUSE_OFF, 2); 2358 r600_store_value(cb, 1); /* R_028AB4_VGT_REUSE_OFF */ 2359 r600_store_value(cb, 0); /* R_028AB8_VGT_VTX_CNT_EN */ 2360 2361 r600_store_context_reg(cb, R_028B20_VGT_STRMOUT_BUFFER_EN, 0); 2362 2363 r600_store_ctl_const(cb, R_03CFF0_SQ_VTX_BASE_VTX_LOC, 0); 2364 2365 r600_store_context_reg(cb, R_028028_DB_STENCIL_CLEAR, 0); 2366 2367 r600_store_context_reg_seq(cb, R_0286DC_SPI_FOG_CNTL, 3); 2368 r600_store_value(cb, 0); /* R_0286DC_SPI_FOG_CNTL */ 2369 r600_store_value(cb, 0); /* R_0286E0_SPI_FOG_FUNC_SCALE */ 2370 r600_store_value(cb, 0); /* R_0286E4_SPI_FOG_FUNC_BIAS */ 2371 2372 r600_store_context_reg_seq(cb, R_028D28_DB_SRESULTS_COMPARE_STATE0, 3); 2373 r600_store_value(cb, 0); /* R_028D28_DB_SRESULTS_COMPARE_STATE0 */ 2374 r600_store_value(cb, 0); /* R_028D2C_DB_SRESULTS_COMPARE_STATE1 */ 2375 r600_store_value(cb, 0); /* R_028D30_DB_PRELOAD_CONTROL */ 2376 2377 r600_store_context_reg(cb, R_028820_PA_CL_NANINF_CNTL, 0); 2378 r600_store_context_reg(cb, R_028A48_PA_SC_MPASS_PS_CNTL, 0); 2379 2380 r600_store_context_reg(cb, R_028200_PA_SC_WINDOW_OFFSET, 0); 2381 r600_store_context_reg(cb, R_02820C_PA_SC_CLIPRECT_RULE, 0xFFFF); 2382 2383 if (rctx->b.chip_class >= R700) { 2384 r600_store_context_reg(cb, R_028230_PA_SC_EDGERULE, 0xAAAAAAAA); 2385 } 2386 2387 r600_store_context_reg_seq(cb, R_028C30_CB_CLRCMP_CONTROL, 4); 2388 r600_store_value(cb, 0x1000000); /* R_028C30_CB_CLRCMP_CONTROL */ 2389 r600_store_value(cb, 0); /* R_028C34_CB_CLRCMP_SRC */ 2390 r600_store_value(cb, 0xFF); /* R_028C38_CB_CLRCMP_DST */ 2391 r600_store_value(cb, 0xFFFFFFFF); /* R_028C3C_CB_CLRCMP_MSK */ 2392 2393 r600_store_context_reg_seq(cb, R_028030_PA_SC_SCREEN_SCISSOR_TL, 2); 2394 r600_store_value(cb, 0); /* R_028030_PA_SC_SCREEN_SCISSOR_TL */ 2395 r600_store_value(cb, S_028034_BR_X(8192) | S_028034_BR_Y(8192)); /* R_028034_PA_SC_SCREEN_SCISSOR_BR */ 2396 2397 r600_store_context_reg_seq(cb, R_028240_PA_SC_GENERIC_SCISSOR_TL, 2); 2398 r600_store_value(cb, 0); /* R_028240_PA_SC_GENERIC_SCISSOR_TL */ 2399 r600_store_value(cb, S_028244_BR_X(8192) | S_028244_BR_Y(8192)); /* R_028244_PA_SC_GENERIC_SCISSOR_BR */ 2400 2401 r600_store_context_reg_seq(cb, R_0288CC_SQ_PGM_CF_OFFSET_PS, 5); 2402 r600_store_value(cb, 0); /* R_0288CC_SQ_PGM_CF_OFFSET_PS */ 2403 r600_store_value(cb, 0); /* R_0288D0_SQ_PGM_CF_OFFSET_VS */ 2404 r600_store_value(cb, 0); /* R_0288D4_SQ_PGM_CF_OFFSET_GS */ 2405 r600_store_value(cb, 0); /* R_0288D8_SQ_PGM_CF_OFFSET_ES */ 2406 r600_store_value(cb, 0); /* R_0288DC_SQ_PGM_CF_OFFSET_FS */ 2407 2408 r600_store_context_reg(cb, R_0288E0_SQ_VTX_SEMANTIC_CLEAR, ~0); 2409 2410 r600_store_context_reg_seq(cb, R_028400_VGT_MAX_VTX_INDX, 2); 2411 r600_store_value(cb, ~0); /* R_028400_VGT_MAX_VTX_INDX */ 2412 r600_store_value(cb, 0); /* R_028404_VGT_MIN_VTX_INDX */ 2413 2414 r600_store_context_reg(cb, R_0288A4_SQ_PGM_RESOURCES_FS, 0); 2415 2416 if (rctx->b.chip_class == R700) 2417 r600_store_context_reg(cb, R_028350_SX_MISC, 0); 2418 if (rctx->b.chip_class == R700 && rctx->screen->b.has_streamout) 2419 r600_store_context_reg(cb, R_028354_SX_SURFACE_SYNC, S_028354_SURFACE_SYNC_MASK(0xf)); 2420 2421 r600_store_context_reg(cb, R_028800_DB_DEPTH_CONTROL, 0); 2422 if (rctx->screen->b.has_streamout) { 2423 r600_store_context_reg(cb, R_028B28_VGT_STRMOUT_DRAW_OPAQUE_OFFSET, 0); 2424 } 2425 2426 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0, 0x1000FFF); 2427 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (32 * 4), 0x1000FFF); 2428 r600_store_loop_const(cb, R_03E200_SQ_LOOP_CONST_0 + (64 * 4), 0x1000FFF); 2429 } 2430 2431 void r600_update_ps_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2432 { 2433 struct r600_context *rctx = (struct r600_context *)ctx; 2434 struct r600_command_buffer *cb = &shader->command_buffer; 2435 struct r600_shader *rshader = &shader->shader; 2436 unsigned i, exports_ps, num_cout, spi_ps_in_control_0, spi_input_z, spi_ps_in_control_1, db_shader_control; 2437 int pos_index = -1, face_index = -1, fixed_pt_position_index = -1; 2438 unsigned tmp, sid, ufi = 0; 2439 int need_linear = 0; 2440 unsigned z_export = 0, stencil_export = 0, mask_export = 0; 2441 unsigned sprite_coord_enable = rctx->rasterizer ? rctx->rasterizer->sprite_coord_enable : 0; 2442 2443 if (!cb->buf) { 2444 r600_init_command_buffer(cb, 64); 2445 } else { 2446 cb->num_dw = 0; 2447 } 2448 2449 r600_store_context_reg_seq(cb, R_028644_SPI_PS_INPUT_CNTL_0, rshader->ninput); 2450 for (i = 0; i < rshader->ninput; i++) { 2451 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION) 2452 pos_index = i; 2453 if (rshader->input[i].name == TGSI_SEMANTIC_FACE && face_index == -1) 2454 face_index = i; 2455 if (rshader->input[i].name == TGSI_SEMANTIC_SAMPLEID) 2456 fixed_pt_position_index = i; 2457 2458 sid = rshader->input[i].spi_sid; 2459 2460 tmp = S_028644_SEMANTIC(sid); 2461 2462 /* D3D 9 behaviour. GL is undefined */ 2463 if (rshader->input[i].name == TGSI_SEMANTIC_COLOR && rshader->input[i].sid == 0) 2464 tmp |= S_028644_DEFAULT_VAL(3); 2465 2466 if (rshader->input[i].name == TGSI_SEMANTIC_POSITION || 2467 rshader->input[i].interpolate == TGSI_INTERPOLATE_CONSTANT || 2468 (rshader->input[i].interpolate == TGSI_INTERPOLATE_COLOR && 2469 rctx->rasterizer && rctx->rasterizer->flatshade)) 2470 tmp |= S_028644_FLAT_SHADE(1); 2471 2472 if (rshader->input[i].name == TGSI_SEMANTIC_GENERIC && 2473 sprite_coord_enable & (1 << rshader->input[i].sid)) { 2474 tmp |= S_028644_PT_SPRITE_TEX(1); 2475 } 2476 2477 if (rshader->input[i].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) 2478 tmp |= S_028644_SEL_CENTROID(1); 2479 2480 if (rshader->input[i].interpolate_location == TGSI_INTERPOLATE_LOC_SAMPLE) 2481 tmp |= S_028644_SEL_SAMPLE(1); 2482 2483 if (rshader->input[i].interpolate == TGSI_INTERPOLATE_LINEAR) { 2484 need_linear = 1; 2485 tmp |= S_028644_SEL_LINEAR(1); 2486 } 2487 2488 r600_store_value(cb, tmp); 2489 } 2490 2491 db_shader_control = 0; 2492 for (i = 0; i < rshader->noutput; i++) { 2493 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION) 2494 z_export = 1; 2495 if (rshader->output[i].name == TGSI_SEMANTIC_STENCIL) 2496 stencil_export = 1; 2497 if (rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK && 2498 rctx->framebuffer.nr_samples > 1 && rctx->ps_iter_samples > 0) 2499 mask_export = 1; 2500 } 2501 db_shader_control |= S_02880C_Z_EXPORT_ENABLE(z_export); 2502 db_shader_control |= S_02880C_STENCIL_REF_EXPORT_ENABLE(stencil_export); 2503 db_shader_control |= S_02880C_MASK_EXPORT_ENABLE(mask_export); 2504 if (rshader->uses_kill) 2505 db_shader_control |= S_02880C_KILL_ENABLE(1); 2506 2507 exports_ps = 0; 2508 for (i = 0; i < rshader->noutput; i++) { 2509 if (rshader->output[i].name == TGSI_SEMANTIC_POSITION || 2510 rshader->output[i].name == TGSI_SEMANTIC_STENCIL || 2511 rshader->output[i].name == TGSI_SEMANTIC_SAMPLEMASK) { 2512 exports_ps |= 1; 2513 } 2514 } 2515 num_cout = rshader->nr_ps_color_exports; 2516 exports_ps |= S_028854_EXPORT_COLORS(num_cout); 2517 if (!exports_ps) { 2518 /* always at least export 1 component per pixel */ 2519 exports_ps = 2; 2520 } 2521 2522 shader->nr_ps_color_outputs = num_cout; 2523 2524 spi_ps_in_control_0 = S_0286CC_NUM_INTERP(rshader->ninput) | 2525 S_0286CC_PERSP_GRADIENT_ENA(1)| 2526 S_0286CC_LINEAR_GRADIENT_ENA(need_linear); 2527 spi_input_z = 0; 2528 if (pos_index != -1) { 2529 spi_ps_in_control_0 |= (S_0286CC_POSITION_ENA(1) | 2530 S_0286CC_POSITION_CENTROID(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_CENTROID) | 2531 S_0286CC_POSITION_ADDR(rshader->input[pos_index].gpr) | 2532 S_0286CC_BARYC_SAMPLE_CNTL(1)) | 2533 S_0286CC_POSITION_SAMPLE(rshader->input[pos_index].interpolate_location == TGSI_INTERPOLATE_LOC_SAMPLE); 2534 spi_input_z |= S_0286D8_PROVIDE_Z_TO_SPI(1); 2535 } 2536 2537 spi_ps_in_control_1 = 0; 2538 if (face_index != -1) { 2539 spi_ps_in_control_1 |= S_0286D0_FRONT_FACE_ENA(1) | 2540 S_0286D0_FRONT_FACE_ADDR(rshader->input[face_index].gpr); 2541 } 2542 if (fixed_pt_position_index != -1) { 2543 spi_ps_in_control_1 |= S_0286D0_FIXED_PT_POSITION_ENA(1) | 2544 S_0286D0_FIXED_PT_POSITION_ADDR(rshader->input[fixed_pt_position_index].gpr); 2545 } 2546 2547 /* HW bug in original R600 */ 2548 if (rctx->b.family == CHIP_R600) 2549 ufi = 1; 2550 2551 r600_store_context_reg_seq(cb, R_0286CC_SPI_PS_IN_CONTROL_0, 2); 2552 r600_store_value(cb, spi_ps_in_control_0); /* R_0286CC_SPI_PS_IN_CONTROL_0 */ 2553 r600_store_value(cb, spi_ps_in_control_1); /* R_0286D0_SPI_PS_IN_CONTROL_1 */ 2554 2555 r600_store_context_reg(cb, R_0286D8_SPI_INPUT_Z, spi_input_z); 2556 2557 r600_store_context_reg_seq(cb, R_028850_SQ_PGM_RESOURCES_PS, 2); 2558 r600_store_value(cb, /* R_028850_SQ_PGM_RESOURCES_PS*/ 2559 S_028850_NUM_GPRS(rshader->bc.ngpr) | 2560 /* 2561 * docs are misleading about the dx10_clamp bit. This only affects 2562 * instructions using CLAMP dst modifier, in which case they will 2563 * return 0 with this set for a NaN (otherwise NaN). 2564 */ 2565 S_028850_DX10_CLAMP(1) | 2566 S_028850_STACK_SIZE(rshader->bc.nstack) | 2567 S_028850_UNCACHED_FIRST_INST(ufi)); 2568 r600_store_value(cb, exports_ps); /* R_028854_SQ_PGM_EXPORTS_PS */ 2569 2570 r600_store_context_reg(cb, R_028840_SQ_PGM_START_PS, 0); 2571 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2572 2573 /* only set some bits here, the other bits are set in the dsa state */ 2574 shader->db_shader_control = db_shader_control; 2575 shader->ps_depth_export = z_export | stencil_export | mask_export; 2576 2577 shader->sprite_coord_enable = sprite_coord_enable; 2578 if (rctx->rasterizer) 2579 shader->flatshade = rctx->rasterizer->flatshade; 2580 } 2581 2582 void r600_update_vs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2583 { 2584 struct r600_command_buffer *cb = &shader->command_buffer; 2585 struct r600_shader *rshader = &shader->shader; 2586 unsigned spi_vs_out_id[10] = {}; 2587 unsigned i, tmp, nparams = 0; 2588 2589 for (i = 0; i < rshader->noutput; i++) { 2590 if (rshader->output[i].spi_sid) { 2591 tmp = rshader->output[i].spi_sid << ((nparams & 3) * 8); 2592 spi_vs_out_id[nparams / 4] |= tmp; 2593 nparams++; 2594 } 2595 } 2596 2597 r600_init_command_buffer(cb, 32); 2598 2599 r600_store_context_reg_seq(cb, R_028614_SPI_VS_OUT_ID_0, 10); 2600 for (i = 0; i < 10; i++) { 2601 r600_store_value(cb, spi_vs_out_id[i]); 2602 } 2603 2604 /* Certain attributes (position, psize, etc.) don't count as params. 2605 * VS is required to export at least one param and r600_shader_from_tgsi() 2606 * takes care of adding a dummy export. 2607 */ 2608 if (nparams < 1) 2609 nparams = 1; 2610 2611 r600_store_context_reg(cb, R_0286C4_SPI_VS_OUT_CONFIG, 2612 S_0286C4_VS_EXPORT_COUNT(nparams - 1)); 2613 r600_store_context_reg(cb, R_028868_SQ_PGM_RESOURCES_VS, 2614 S_028868_NUM_GPRS(rshader->bc.ngpr) | 2615 S_028868_DX10_CLAMP(1) | 2616 S_028868_STACK_SIZE(rshader->bc.nstack)); 2617 if (rshader->vs_position_window_space) { 2618 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 2619 S_028818_VTX_XY_FMT(1) | S_028818_VTX_Z_FMT(1)); 2620 } else { 2621 r600_store_context_reg(cb, R_028818_PA_CL_VTE_CNTL, 2622 S_028818_VTX_W0_FMT(1) | 2623 S_028818_VPORT_X_SCALE_ENA(1) | S_028818_VPORT_X_OFFSET_ENA(1) | 2624 S_028818_VPORT_Y_SCALE_ENA(1) | S_028818_VPORT_Y_OFFSET_ENA(1) | 2625 S_028818_VPORT_Z_SCALE_ENA(1) | S_028818_VPORT_Z_OFFSET_ENA(1)); 2626 2627 } 2628 r600_store_context_reg(cb, R_028858_SQ_PGM_START_VS, 0); 2629 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2630 2631 shader->pa_cl_vs_out_cntl = 2632 S_02881C_VS_OUT_CCDIST0_VEC_ENA((rshader->clip_dist_write & 0x0F) != 0) | 2633 S_02881C_VS_OUT_CCDIST1_VEC_ENA((rshader->clip_dist_write & 0xF0) != 0) | 2634 S_02881C_VS_OUT_MISC_VEC_ENA(rshader->vs_out_misc_write) | 2635 S_02881C_USE_VTX_POINT_SIZE(rshader->vs_out_point_size) | 2636 S_02881C_USE_VTX_EDGE_FLAG(rshader->vs_out_edgeflag) | 2637 S_02881C_USE_VTX_RENDER_TARGET_INDX(rshader->vs_out_layer) | 2638 S_02881C_USE_VTX_VIEWPORT_INDX(rshader->vs_out_viewport); 2639 } 2640 2641 #define RV610_GSVS_ALIGN 32 2642 #define R600_GSVS_ALIGN 16 2643 2644 void r600_update_gs_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2645 { 2646 struct r600_context *rctx = (struct r600_context *)ctx; 2647 struct r600_command_buffer *cb = &shader->command_buffer; 2648 struct r600_shader *rshader = &shader->shader; 2649 struct r600_shader *cp_shader = &shader->gs_copy_shader->shader; 2650 unsigned gsvs_itemsize = 2651 (cp_shader->ring_item_sizes[0] * shader->selector->gs_max_out_vertices) >> 2; 2652 2653 /* some r600s needs gsvs itemsize aligned to cacheline size 2654 this was fixed in rs780 and above. */ 2655 switch (rctx->b.family) { 2656 case CHIP_RV610: 2657 gsvs_itemsize = align(gsvs_itemsize, RV610_GSVS_ALIGN); 2658 break; 2659 case CHIP_R600: 2660 case CHIP_RV630: 2661 case CHIP_RV670: 2662 case CHIP_RV620: 2663 case CHIP_RV635: 2664 gsvs_itemsize = align(gsvs_itemsize, R600_GSVS_ALIGN); 2665 break; 2666 default: 2667 break; 2668 } 2669 2670 r600_init_command_buffer(cb, 64); 2671 2672 /* VGT_GS_MODE is written by r600_emit_shader_stages */ 2673 r600_store_context_reg(cb, R_028AB8_VGT_VTX_CNT_EN, 1); 2674 2675 if (rctx->b.chip_class >= R700) { 2676 r600_store_context_reg(cb, R_028B38_VGT_GS_MAX_VERT_OUT, 2677 S_028B38_MAX_VERT_OUT(shader->selector->gs_max_out_vertices)); 2678 } 2679 r600_store_context_reg(cb, R_028A6C_VGT_GS_OUT_PRIM_TYPE, 2680 r600_conv_prim_to_gs_out(shader->selector->gs_output_prim)); 2681 2682 r600_store_context_reg(cb, R_0288C8_SQ_GS_VERT_ITEMSIZE, 2683 cp_shader->ring_item_sizes[0] >> 2); 2684 2685 r600_store_context_reg(cb, R_0288A8_SQ_ESGS_RING_ITEMSIZE, 2686 (rshader->ring_item_sizes[0]) >> 2); 2687 2688 r600_store_context_reg(cb, R_0288AC_SQ_GSVS_RING_ITEMSIZE, 2689 gsvs_itemsize); 2690 2691 /* FIXME calculate these values somehow ??? */ 2692 r600_store_config_reg_seq(cb, R_0088C8_VGT_GS_PER_ES, 2); 2693 r600_store_value(cb, 0x80); /* GS_PER_ES */ 2694 r600_store_value(cb, 0x100); /* ES_PER_GS */ 2695 r600_store_config_reg_seq(cb, R_0088E8_VGT_GS_PER_VS, 1); 2696 r600_store_value(cb, 0x2); /* GS_PER_VS */ 2697 2698 r600_store_context_reg(cb, R_02887C_SQ_PGM_RESOURCES_GS, 2699 S_02887C_NUM_GPRS(rshader->bc.ngpr) | 2700 S_02887C_DX10_CLAMP(1) | 2701 S_02887C_STACK_SIZE(rshader->bc.nstack)); 2702 r600_store_context_reg(cb, R_02886C_SQ_PGM_START_GS, 0); 2703 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2704 } 2705 2706 void r600_update_es_state(struct pipe_context *ctx, struct r600_pipe_shader *shader) 2707 { 2708 struct r600_command_buffer *cb = &shader->command_buffer; 2709 struct r600_shader *rshader = &shader->shader; 2710 2711 r600_init_command_buffer(cb, 32); 2712 2713 r600_store_context_reg(cb, R_028890_SQ_PGM_RESOURCES_ES, 2714 S_028890_NUM_GPRS(rshader->bc.ngpr) | 2715 S_028890_DX10_CLAMP(1) | 2716 S_028890_STACK_SIZE(rshader->bc.nstack)); 2717 r600_store_context_reg(cb, R_028880_SQ_PGM_START_ES, 0); 2718 /* After that, the NOP relocation packet must be emitted (shader->bo, RADEON_USAGE_READ). */ 2719 } 2720 2721 2722 void *r600_create_resolve_blend(struct r600_context *rctx) 2723 { 2724 struct pipe_blend_state blend; 2725 unsigned i; 2726 2727 memset(&blend, 0, sizeof(blend)); 2728 blend.independent_blend_enable = true; 2729 for (i = 0; i < 2; i++) { 2730 blend.rt[i].colormask = 0xf; 2731 blend.rt[i].blend_enable = 1; 2732 blend.rt[i].rgb_func = PIPE_BLEND_ADD; 2733 blend.rt[i].alpha_func = PIPE_BLEND_ADD; 2734 blend.rt[i].rgb_src_factor = PIPE_BLENDFACTOR_ZERO; 2735 blend.rt[i].rgb_dst_factor = PIPE_BLENDFACTOR_ZERO; 2736 blend.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO; 2737 blend.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO; 2738 } 2739 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); 2740 } 2741 2742 void *r700_create_resolve_blend(struct r600_context *rctx) 2743 { 2744 struct pipe_blend_state blend; 2745 2746 memset(&blend, 0, sizeof(blend)); 2747 blend.independent_blend_enable = true; 2748 blend.rt[0].colormask = 0xf; 2749 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_RESOLVE_BOX); 2750 } 2751 2752 void *r600_create_decompress_blend(struct r600_context *rctx) 2753 { 2754 struct pipe_blend_state blend; 2755 2756 memset(&blend, 0, sizeof(blend)); 2757 blend.independent_blend_enable = true; 2758 blend.rt[0].colormask = 0xf; 2759 return r600_create_blend_state_mode(&rctx->b.b, &blend, V_028808_SPECIAL_EXPAND_SAMPLES); 2760 } 2761 2762 void *r600_create_db_flush_dsa(struct r600_context *rctx) 2763 { 2764 struct pipe_depth_stencil_alpha_state dsa; 2765 boolean quirk = false; 2766 2767 if (rctx->b.family == CHIP_RV610 || rctx->b.family == CHIP_RV630 || 2768 rctx->b.family == CHIP_RV620 || rctx->b.family == CHIP_RV635) 2769 quirk = true; 2770 2771 memset(&dsa, 0, sizeof(dsa)); 2772 2773 if (quirk) { 2774 dsa.depth.enabled = 1; 2775 dsa.depth.func = PIPE_FUNC_LEQUAL; 2776 dsa.stencil[0].enabled = 1; 2777 dsa.stencil[0].func = PIPE_FUNC_ALWAYS; 2778 dsa.stencil[0].zpass_op = PIPE_STENCIL_OP_KEEP; 2779 dsa.stencil[0].zfail_op = PIPE_STENCIL_OP_INCR; 2780 dsa.stencil[0].writemask = 0xff; 2781 } 2782 2783 return rctx->b.b.create_depth_stencil_alpha_state(&rctx->b.b, &dsa); 2784 } 2785 2786 void r600_update_db_shader_control(struct r600_context * rctx) 2787 { 2788 bool dual_export; 2789 unsigned db_shader_control; 2790 uint8_t ps_conservative_z; 2791 2792 if (!rctx->ps_shader) { 2793 return; 2794 } 2795 2796 dual_export = rctx->framebuffer.export_16bpc && 2797 !rctx->ps_shader->current->ps_depth_export; 2798 2799 db_shader_control = rctx->ps_shader->current->db_shader_control | 2800 S_02880C_DUAL_EXPORT_ENABLE(dual_export); 2801 2802 ps_conservative_z = rctx->ps_shader->current->shader.ps_conservative_z; 2803 2804 /* When alpha test is enabled we can't trust the hw to make the proper 2805 * decision on the order in which ztest should be run related to fragment 2806 * shader execution. 2807 * 2808 * If alpha test is enabled perform z test after fragment. RE_Z (early 2809 * z test but no write to the zbuffer) seems to cause lockup on r6xx/r7xx 2810 */ 2811 if (rctx->alphatest_state.sx_alpha_test_control) { 2812 db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z); 2813 } else { 2814 db_shader_control |= S_02880C_Z_ORDER(V_02880C_EARLY_Z_THEN_LATE_Z); 2815 } 2816 2817 if (db_shader_control != rctx->db_misc_state.db_shader_control || 2818 ps_conservative_z != rctx->db_misc_state.ps_conservative_z) { 2819 rctx->db_misc_state.db_shader_control = db_shader_control; 2820 rctx->db_misc_state.ps_conservative_z = ps_conservative_z; 2821 r600_mark_atom_dirty(rctx, &rctx->db_misc_state.atom); 2822 } 2823 } 2824 2825 static inline unsigned r600_array_mode(unsigned mode) 2826 { 2827 switch (mode) { 2828 default: 2829 case RADEON_SURF_MODE_LINEAR_ALIGNED: return V_0280A0_ARRAY_LINEAR_ALIGNED; 2830 break; 2831 case RADEON_SURF_MODE_1D: return V_0280A0_ARRAY_1D_TILED_THIN1; 2832 break; 2833 case RADEON_SURF_MODE_2D: return V_0280A0_ARRAY_2D_TILED_THIN1; 2834 } 2835 } 2836 2837 static boolean r600_dma_copy_tile(struct r600_context *rctx, 2838 struct pipe_resource *dst, 2839 unsigned dst_level, 2840 unsigned dst_x, 2841 unsigned dst_y, 2842 unsigned dst_z, 2843 struct pipe_resource *src, 2844 unsigned src_level, 2845 unsigned src_x, 2846 unsigned src_y, 2847 unsigned src_z, 2848 unsigned copy_height, 2849 unsigned pitch, 2850 unsigned bpp) 2851 { 2852 struct radeon_winsys_cs *cs = rctx->b.dma.cs; 2853 struct r600_texture *rsrc = (struct r600_texture*)src; 2854 struct r600_texture *rdst = (struct r600_texture*)dst; 2855 unsigned array_mode, lbpp, pitch_tile_max, slice_tile_max, size; 2856 unsigned ncopy, height, cheight, detile, i, x, y, z, src_mode, dst_mode; 2857 uint64_t base, addr; 2858 2859 dst_mode = rdst->surface.u.legacy.level[dst_level].mode; 2860 src_mode = rsrc->surface.u.legacy.level[src_level].mode; 2861 assert(dst_mode != src_mode); 2862 2863 y = 0; 2864 lbpp = util_logbase2(bpp); 2865 pitch_tile_max = ((pitch / bpp) / 8) - 1; 2866 2867 if (dst_mode == RADEON_SURF_MODE_LINEAR_ALIGNED) { 2868 /* T2L */ 2869 array_mode = r600_array_mode(src_mode); 2870 slice_tile_max = (rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.u.legacy.level[src_level].nblk_y) / (8*8); 2871 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 2872 /* linear height must be the same as the slice tile max height, it's ok even 2873 * if the linear destination/source have smaller heigh as the size of the 2874 * dma packet will be using the copy_height which is always smaller or equal 2875 * to the linear height 2876 */ 2877 height = u_minify(rsrc->resource.b.b.height0, src_level); 2878 detile = 1; 2879 x = src_x; 2880 y = src_y; 2881 z = src_z; 2882 base = rsrc->surface.u.legacy.level[src_level].offset; 2883 addr = rdst->surface.u.legacy.level[dst_level].offset; 2884 addr += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z; 2885 addr += dst_y * pitch + dst_x * bpp; 2886 } else { 2887 /* L2T */ 2888 array_mode = r600_array_mode(dst_mode); 2889 slice_tile_max = (rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.u.legacy.level[dst_level].nblk_y) / (8*8); 2890 slice_tile_max = slice_tile_max ? slice_tile_max - 1 : 0; 2891 /* linear height must be the same as the slice tile max height, it's ok even 2892 * if the linear destination/source have smaller heigh as the size of the 2893 * dma packet will be using the copy_height which is always smaller or equal 2894 * to the linear height 2895 */ 2896 height = u_minify(rdst->resource.b.b.height0, dst_level); 2897 detile = 0; 2898 x = dst_x; 2899 y = dst_y; 2900 z = dst_z; 2901 base = rdst->surface.u.legacy.level[dst_level].offset; 2902 addr = rsrc->surface.u.legacy.level[src_level].offset; 2903 addr += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_z; 2904 addr += src_y * pitch + src_x * bpp; 2905 } 2906 /* check that we are in dw/base alignment constraint */ 2907 if (addr % 4 || base % 256) { 2908 return FALSE; 2909 } 2910 2911 /* It's a r6xx/r7xx limitation, the blit must be on 8 boundary for number 2912 * line in the blit. Compute max 8 line we can copy in the size limit 2913 */ 2914 cheight = ((R600_DMA_COPY_MAX_SIZE_DW * 4) / pitch) & 0xfffffff8; 2915 ncopy = (copy_height / cheight) + !!(copy_height % cheight); 2916 r600_need_dma_space(&rctx->b, ncopy * 7, &rdst->resource, &rsrc->resource); 2917 2918 for (i = 0; i < ncopy; i++) { 2919 cheight = cheight > copy_height ? copy_height : cheight; 2920 size = (cheight * pitch) / 4; 2921 /* emit reloc before writing cs so that cs is always in consistent state */ 2922 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rsrc->resource, RADEON_USAGE_READ, 2923 RADEON_PRIO_SDMA_TEXTURE); 2924 radeon_add_to_buffer_list(&rctx->b, &rctx->b.dma, &rdst->resource, RADEON_USAGE_WRITE, 2925 RADEON_PRIO_SDMA_TEXTURE); 2926 radeon_emit(cs, DMA_PACKET(DMA_PACKET_COPY, 1, 0, size)); 2927 radeon_emit(cs, base >> 8); 2928 radeon_emit(cs, (detile << 31) | (array_mode << 27) | 2929 (lbpp << 24) | ((height - 1) << 10) | 2930 pitch_tile_max); 2931 radeon_emit(cs, (slice_tile_max << 12) | (z << 0)); 2932 radeon_emit(cs, (x << 3) | (y << 17)); 2933 radeon_emit(cs, addr & 0xfffffffc); 2934 radeon_emit(cs, (addr >> 32UL) & 0xff); 2935 copy_height -= cheight; 2936 addr += cheight * pitch; 2937 y += cheight; 2938 } 2939 return TRUE; 2940 } 2941 2942 static void r600_dma_copy(struct pipe_context *ctx, 2943 struct pipe_resource *dst, 2944 unsigned dst_level, 2945 unsigned dstx, unsigned dsty, unsigned dstz, 2946 struct pipe_resource *src, 2947 unsigned src_level, 2948 const struct pipe_box *src_box) 2949 { 2950 struct r600_context *rctx = (struct r600_context *)ctx; 2951 struct r600_texture *rsrc = (struct r600_texture*)src; 2952 struct r600_texture *rdst = (struct r600_texture*)dst; 2953 unsigned dst_pitch, src_pitch, bpp, dst_mode, src_mode, copy_height; 2954 unsigned src_w, dst_w; 2955 unsigned src_x, src_y; 2956 unsigned dst_x = dstx, dst_y = dsty, dst_z = dstz; 2957 2958 if (rctx->b.dma.cs == NULL) { 2959 goto fallback; 2960 } 2961 2962 if (dst->target == PIPE_BUFFER && src->target == PIPE_BUFFER) { 2963 if (dst_x % 4 || src_box->x % 4 || src_box->width % 4) 2964 goto fallback; 2965 2966 r600_dma_copy_buffer(rctx, dst, src, dst_x, src_box->x, src_box->width); 2967 return; 2968 } 2969 2970 if (src_box->depth > 1 || 2971 !r600_prepare_for_dma_blit(&rctx->b, rdst, dst_level, dstx, dsty, 2972 dstz, rsrc, src_level, src_box)) 2973 goto fallback; 2974 2975 src_x = util_format_get_nblocksx(src->format, src_box->x); 2976 dst_x = util_format_get_nblocksx(src->format, dst_x); 2977 src_y = util_format_get_nblocksy(src->format, src_box->y); 2978 dst_y = util_format_get_nblocksy(src->format, dst_y); 2979 2980 bpp = rdst->surface.bpe; 2981 dst_pitch = rdst->surface.u.legacy.level[dst_level].nblk_x * rdst->surface.bpe; 2982 src_pitch = rsrc->surface.u.legacy.level[src_level].nblk_x * rsrc->surface.bpe; 2983 src_w = u_minify(rsrc->resource.b.b.width0, src_level); 2984 dst_w = u_minify(rdst->resource.b.b.width0, dst_level); 2985 copy_height = src_box->height / rsrc->surface.blk_h; 2986 2987 dst_mode = rdst->surface.u.legacy.level[dst_level].mode; 2988 src_mode = rsrc->surface.u.legacy.level[src_level].mode; 2989 2990 if (src_pitch != dst_pitch || src_box->x || dst_x || src_w != dst_w) { 2991 /* strict requirement on r6xx/r7xx */ 2992 goto fallback; 2993 } 2994 /* lot of constraint on alignment this should capture them all */ 2995 if (src_pitch % 8 || src_box->y % 8 || dst_y % 8) { 2996 goto fallback; 2997 } 2998 2999 if (src_mode == dst_mode) { 3000 uint64_t dst_offset, src_offset, size; 3001 3002 /* simple dma blit would do NOTE code here assume : 3003 * src_box.x/y == 0 3004 * dst_x/y == 0 3005 * dst_pitch == src_pitch 3006 */ 3007 src_offset= rsrc->surface.u.legacy.level[src_level].offset; 3008 src_offset += (uint64_t)rsrc->surface.u.legacy.level[src_level].slice_size_dw * 4 * src_box->z; 3009 src_offset += src_y * src_pitch + src_x * bpp; 3010 dst_offset = rdst->surface.u.legacy.level[dst_level].offset; 3011 dst_offset += (uint64_t)rdst->surface.u.legacy.level[dst_level].slice_size_dw * 4 * dst_z; 3012 dst_offset += dst_y * dst_pitch + dst_x * bpp; 3013 size = src_box->height * src_pitch; 3014 /* must be dw aligned */ 3015 if (dst_offset % 4 || src_offset % 4 || size % 4) { 3016 goto fallback; 3017 } 3018 r600_dma_copy_buffer(rctx, dst, src, dst_offset, src_offset, size); 3019 } else { 3020 if (!r600_dma_copy_tile(rctx, dst, dst_level, dst_x, dst_y, dst_z, 3021 src, src_level, src_x, src_y, src_box->z, 3022 copy_height, dst_pitch, bpp)) { 3023 goto fallback; 3024 } 3025 } 3026 return; 3027 3028 fallback: 3029 r600_resource_copy_region(ctx, dst, dst_level, dstx, dsty, dstz, 3030 src, src_level, src_box); 3031 } 3032 3033 void r600_init_state_functions(struct r600_context *rctx) 3034 { 3035 unsigned id = 1; 3036 unsigned i; 3037 /* !!! 3038 * To avoid GPU lockup registers must be emited in a specific order 3039 * (no kidding ...). The order below is important and have been 3040 * partialy infered from analyzing fglrx command stream. 3041 * 3042 * Don't reorder atom without carefully checking the effect (GPU lockup 3043 * or piglit regression). 3044 * !!! 3045 */ 3046 3047 r600_init_atom(rctx, &rctx->framebuffer.atom, id++, r600_emit_framebuffer_state, 0); 3048 3049 /* shader const */ 3050 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_VERTEX].atom, id++, r600_emit_vs_constant_buffers, 0); 3051 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_GEOMETRY].atom, id++, r600_emit_gs_constant_buffers, 0); 3052 r600_init_atom(rctx, &rctx->constbuf_state[PIPE_SHADER_FRAGMENT].atom, id++, r600_emit_ps_constant_buffers, 0); 3053 3054 /* sampler must be emited before TA_CNTL_AUX otherwise DISABLE_CUBE_WRAP change 3055 * does not take effect (TA_CNTL_AUX emited by r600_emit_seamless_cube_map) 3056 */ 3057 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].states.atom, id++, r600_emit_vs_sampler_states, 0); 3058 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].states.atom, id++, r600_emit_gs_sampler_states, 0); 3059 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].states.atom, id++, r600_emit_ps_sampler_states, 0); 3060 /* resource */ 3061 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_VERTEX].views.atom, id++, r600_emit_vs_sampler_views, 0); 3062 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_GEOMETRY].views.atom, id++, r600_emit_gs_sampler_views, 0); 3063 r600_init_atom(rctx, &rctx->samplers[PIPE_SHADER_FRAGMENT].views.atom, id++, r600_emit_ps_sampler_views, 0); 3064 r600_init_atom(rctx, &rctx->vertex_buffer_state.atom, id++, r600_emit_vertex_buffers, 0); 3065 3066 r600_init_atom(rctx, &rctx->vgt_state.atom, id++, r600_emit_vgt_state, 10); 3067 3068 r600_init_atom(rctx, &rctx->seamless_cube_map.atom, id++, r600_emit_seamless_cube_map, 3); 3069 r600_init_atom(rctx, &rctx->sample_mask.atom, id++, r600_emit_sample_mask, 3); 3070 rctx->sample_mask.sample_mask = ~0; 3071 3072 r600_init_atom(rctx, &rctx->alphatest_state.atom, id++, r600_emit_alphatest_state, 6); 3073 r600_init_atom(rctx, &rctx->blend_color.atom, id++, r600_emit_blend_color, 6); 3074 r600_init_atom(rctx, &rctx->blend_state.atom, id++, r600_emit_cso_state, 0); 3075 r600_init_atom(rctx, &rctx->cb_misc_state.atom, id++, r600_emit_cb_misc_state, 7); 3076 r600_init_atom(rctx, &rctx->clip_misc_state.atom, id++, r600_emit_clip_misc_state, 6); 3077 r600_init_atom(rctx, &rctx->clip_state.atom, id++, r600_emit_clip_state, 26); 3078 r600_init_atom(rctx, &rctx->db_misc_state.atom, id++, r600_emit_db_misc_state, 7); 3079 r600_init_atom(rctx, &rctx->db_state.atom, id++, r600_emit_db_state, 11); 3080 r600_init_atom(rctx, &rctx->dsa_state.atom, id++, r600_emit_cso_state, 0); 3081 r600_init_atom(rctx, &rctx->poly_offset_state.atom, id++, r600_emit_polygon_offset, 9); 3082 r600_init_atom(rctx, &rctx->rasterizer_state.atom, id++, r600_emit_cso_state, 0); 3083 r600_add_atom(rctx, &rctx->b.scissors.atom, id++); 3084 r600_add_atom(rctx, &rctx->b.viewports.atom, id++); 3085 r600_init_atom(rctx, &rctx->config_state.atom, id++, r600_emit_config_state, 3); 3086 r600_init_atom(rctx, &rctx->stencil_ref.atom, id++, r600_emit_stencil_ref, 4); 3087 r600_init_atom(rctx, &rctx->vertex_fetch_shader.atom, id++, r600_emit_vertex_fetch_shader, 5); 3088 r600_add_atom(rctx, &rctx->b.render_cond_atom, id++); 3089 r600_add_atom(rctx, &rctx->b.streamout.begin_atom, id++); 3090 r600_add_atom(rctx, &rctx->b.streamout.enable_atom, id++); 3091 for (i = 0; i < R600_NUM_HW_STAGES; i++) 3092 r600_init_atom(rctx, &rctx->hw_shader_stages[i].atom, id++, r600_emit_shader, 0); 3093 r600_init_atom(rctx, &rctx->shader_stages.atom, id++, r600_emit_shader_stages, 0); 3094 r600_init_atom(rctx, &rctx->gs_rings.atom, id++, r600_emit_gs_rings, 0); 3095 3096 rctx->b.b.create_blend_state = r600_create_blend_state; 3097 rctx->b.b.create_depth_stencil_alpha_state = r600_create_dsa_state; 3098 rctx->b.b.create_rasterizer_state = r600_create_rs_state; 3099 rctx->b.b.create_sampler_state = r600_create_sampler_state; 3100 rctx->b.b.create_sampler_view = r600_create_sampler_view; 3101 rctx->b.b.set_framebuffer_state = r600_set_framebuffer_state; 3102 rctx->b.b.set_polygon_stipple = r600_set_polygon_stipple; 3103 rctx->b.b.set_min_samples = r600_set_min_samples; 3104 rctx->b.b.get_sample_position = r600_get_sample_position; 3105 rctx->b.dma_copy = r600_dma_copy; 3106 } 3107 /* this function must be last */ 3108